mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 00:51:51 -04:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini:
"On top of a lot of Arm fixes, this includes a massive rename of types
and variables in tools/testing/selftests/kvm - these were
unnecessarily different from what the kernel uses, so they're being
made consistent.
arm64:
- Allow tracing for non-pKVM, which was accidentally disabled when
the series was merged
- Rationalise the way the pKVM hypercall ranges are defined by using
the same mechanism as already used for the vcpu_sysreg enum
- Enforce that SMCCC function numbers relayed by the pKVM proxy are
actually compliant with the specification
- Fix a couple of feature to idreg mappings which resulted in the
wrong sanitisation being applied
- Fix the GICD_IIDR revision number field that could never been
written correctly by userspace
- Make kvm_vcpu_initialized() correctly use its parameter instead of
relying on the surrounding context
- Enforce correct ordering in __pkvm_init_vcpu(), plugging a
potential pin leak at the same time
- Move __pkvm_init_finalise() to a less dangerous spot, avoiding
future problems
- Restore functional userspace irqchip support after a four year
breakage (last functional kernel was 5.18...)
- Spelling fixes
Selftests:
- Rename types across all KVM selftests to more closely align with
types used in the kernel:
vm_vaddr_t -> gva_t
vm_paddr_t -> gpa_t
uint64_t -> u64
uint32_t -> u32
uint16_t -> u16
uint8_t -> u8
int64_t -> s64
int32_t -> s32
int16_t -> s16
int8_t -> s8
- Fix Loongarch compilation"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (31 commits)
KVM: selftests: Add check_steal_time_uapi() implementation for LoongArch
KVM: arm64: Wake-up from WFI when iqrchip is in userspace
KVM: arm64: Fix initialisation order in __pkvm_init_finalise()
KVM: arm64: Fix pin leak and publication ordering in __pkvm_init_vcpu()
KVM: arm64: Fix kvm_vcpu_initialized() macro parameter
KVM: arm64: Fix FEAT_SPE_FnE to use PMSIDR_EL1.FnE, not PMSVer
KVM: arm64: Fix typo in feature check comments
KVM: arm64: Fix FEAT_Debugv8p9 to check DebugVer, not PMUVer
KVM: arm64: Reject non compliant SMCCC function calls in pKVM
KVM: arm64: vgic: Fix IIDR revision field extracted from wrong value
KVM: selftests: Replace "paddr" with "gpa" throughout
KVM: selftests: Replace "u64 nested_paddr" with "gpa_t l2_gpa"
KVM: selftests: Replace "u64 gpa" with "gpa_t" throughout
KVM: selftests: Replace "vaddr" with "gva" throughout
KVM: selftests: Clarify that arm64's inject_uer() takes a host PA, not a guest PA
KVM: selftests: Rename translate_to_host_paddr() => translate_hva_to_hpa()
KVM: selftests: Rename vm_vaddr_populate_bitmap() => vm_populate_gva_bitmap()
KVM: selftests: Rename vm_vaddr_unused_gap() => vm_unused_gva_gap()
KVM: selftests: Drop "vaddr_" from APIs that allocate memory for a given VM
KVM: selftests: Use u8 instead of uint8_t
...
This commit is contained in:
@@ -50,6 +50,9 @@
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
#define MARKER(m) \
|
||||
m, __after_##m = m - 1
|
||||
|
||||
enum __kvm_host_smccc_func {
|
||||
/* Hypercalls that are unavailable once pKVM has finalised. */
|
||||
/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
|
||||
@@ -59,8 +62,10 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
|
||||
|
||||
MARKER(__KVM_HOST_SMCCC_FUNC_MIN_PKVM),
|
||||
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
|
||||
__KVM_HOST_SMCCC_FUNC_MIN_PKVM = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
|
||||
|
||||
/* Hypercalls that are always available and common to [nh]VHE/pKVM. */
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
|
||||
@@ -72,11 +77,20 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
|
||||
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_load,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_unload,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_enable,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_update_clock,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_reset,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_enable_event,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_write_event,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v5_save_apr,
|
||||
__KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr,
|
||||
__KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM = __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr,
|
||||
|
||||
MARKER(__KVM_HOST_SMCCC_FUNC_PKVM_ONLY),
|
||||
|
||||
/* Hypercalls that are available only when pKVM has finalised. */
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
|
||||
@@ -100,14 +114,8 @@ enum __kvm_host_smccc_func {
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
|
||||
__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_load,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_unload,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_enable,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_update_clock,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_reset,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_enable_event,
|
||||
__KVM_HOST_SMCCC_FUNC___tracing_write_event,
|
||||
|
||||
MARKER(__KVM_HOST_SMCCC_FUNC_MAX)
|
||||
};
|
||||
|
||||
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
|
||||
|
||||
@@ -450,9 +450,6 @@ struct kvm_vcpu_fault_info {
|
||||
r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
|
||||
__after_##r = __MAX__(__before_##r - 1, r)
|
||||
|
||||
#define MARKER(m) \
|
||||
m, __after_##m = m - 1
|
||||
|
||||
enum vcpu_sysreg {
|
||||
__INVALID_SYSREG__, /* 0 is reserved as an invalid value */
|
||||
MPIDR_EL1, /* MultiProcessor Affinity Register */
|
||||
@@ -1548,7 +1545,7 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
|
||||
#define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
|
||||
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
|
||||
|
||||
#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
|
||||
#define kvm_vcpu_initialized(v) vcpu_get_flag(v, VCPU_INITIALIZED)
|
||||
|
||||
int kvm_trng_call(struct kvm_vcpu *vcpu);
|
||||
#ifdef CONFIG_KVM
|
||||
|
||||
@@ -824,6 +824,10 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||
{
|
||||
bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE);
|
||||
|
||||
irq_lines |= (!irqchip_in_kernel(v->kvm) &&
|
||||
(kvm_timer_should_notify_user(v) ||
|
||||
kvm_pmu_should_notify_user(v)));
|
||||
|
||||
return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
|
||||
&& !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
|
||||
}
|
||||
|
||||
@@ -131,7 +131,6 @@ struct reg_feat_map_desc {
|
||||
}
|
||||
|
||||
#define FEAT_SPE ID_AA64DFR0_EL1, PMSVer, IMP
|
||||
#define FEAT_SPE_FnE ID_AA64DFR0_EL1, PMSVer, V1P2
|
||||
#define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP
|
||||
#define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP
|
||||
#define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP
|
||||
@@ -192,7 +191,7 @@ struct reg_feat_map_desc {
|
||||
#define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP
|
||||
#define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP
|
||||
#define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP
|
||||
#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9
|
||||
#define FEAT_Debugv8p9 ID_AA64DFR0_EL1, DebugVer, V8P9
|
||||
#define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP
|
||||
#define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP
|
||||
#define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP
|
||||
@@ -283,7 +282,7 @@ static bool feat_anerr(struct kvm *kvm)
|
||||
static bool feat_sme_smps(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Revists this if KVM ever supports SME -- this really should
|
||||
* Revisit this if KVM ever supports SME -- this really should
|
||||
* look at the guest's view of SMIDR_EL1. Funnily enough, this
|
||||
* is not captured in the JSON file, but only as a note in the
|
||||
* ARM ARM.
|
||||
@@ -295,17 +294,27 @@ static bool feat_sme_smps(struct kvm *kvm)
|
||||
static bool feat_spe_fds(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Revists this if KVM ever supports SPE -- this really should
|
||||
* Revisit this if KVM ever supports SPE -- this really should
|
||||
* look at the guest's view of PMSIDR_EL1.
|
||||
*/
|
||||
return (kvm_has_feat(kvm, FEAT_SPEv1p4) &&
|
||||
(read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FDS));
|
||||
}
|
||||
|
||||
static bool feat_spe_fne(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Revisit this if KVM ever supports SPE -- this really should
|
||||
* look at the guest's view of PMSIDR_EL1.
|
||||
*/
|
||||
return (kvm_has_feat(kvm, FEAT_SPEv1p2) &&
|
||||
(read_sysreg_s(SYS_PMSIDR_EL1) & PMSIDR_EL1_FnE));
|
||||
}
|
||||
|
||||
static bool feat_trbe_mpam(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* Revists this if KVM ever supports both MPAM and TRBE --
|
||||
* Revisit this if KVM ever supports both MPAM and TRBE --
|
||||
* this really should look at the guest's view of TRBIDR_EL1.
|
||||
*/
|
||||
return (kvm_has_feat(kvm, FEAT_TRBE) &&
|
||||
@@ -537,7 +546,7 @@ static const struct reg_bits_to_feat_map hdfgrtr_feat_map[] = {
|
||||
HDFGRTR_EL2_PMBPTR_EL1 |
|
||||
HDFGRTR_EL2_PMBLIMITR_EL1,
|
||||
FEAT_SPE),
|
||||
NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE),
|
||||
NEEDS_FEAT(HDFGRTR_EL2_nPMSNEVFR_EL1, feat_spe_fne),
|
||||
NEEDS_FEAT(HDFGRTR_EL2_nBRBDATA |
|
||||
HDFGRTR_EL2_nBRBCTL |
|
||||
HDFGRTR_EL2_nBRBIDR,
|
||||
@@ -605,7 +614,7 @@ static const struct reg_bits_to_feat_map hdfgwtr_feat_map[] = {
|
||||
HDFGWTR_EL2_PMBPTR_EL1 |
|
||||
HDFGWTR_EL2_PMBLIMITR_EL1,
|
||||
FEAT_SPE),
|
||||
NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, FEAT_SPE_FnE),
|
||||
NEEDS_FEAT(HDFGWTR_EL2_nPMSNEVFR_EL1, feat_spe_fne),
|
||||
NEEDS_FEAT(HDFGWTR_EL2_nBRBDATA |
|
||||
HDFGWTR_EL2_nBRBCTL,
|
||||
FEAT_BRBE),
|
||||
|
||||
@@ -709,6 +709,14 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
|
||||
HANDLE_FUNC(__kvm_flush_cpu_context),
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__tracing_load),
|
||||
HANDLE_FUNC(__tracing_unload),
|
||||
HANDLE_FUNC(__tracing_enable),
|
||||
HANDLE_FUNC(__tracing_swap_reader),
|
||||
HANDLE_FUNC(__tracing_update_clock),
|
||||
HANDLE_FUNC(__tracing_reset),
|
||||
HANDLE_FUNC(__tracing_enable_event),
|
||||
HANDLE_FUNC(__tracing_write_event),
|
||||
HANDLE_FUNC(__vgic_v3_save_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
|
||||
HANDLE_FUNC(__vgic_v5_save_apr),
|
||||
@@ -735,22 +743,16 @@ static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__pkvm_vcpu_load),
|
||||
HANDLE_FUNC(__pkvm_vcpu_put),
|
||||
HANDLE_FUNC(__pkvm_tlb_flush_vmid),
|
||||
HANDLE_FUNC(__tracing_load),
|
||||
HANDLE_FUNC(__tracing_unload),
|
||||
HANDLE_FUNC(__tracing_enable),
|
||||
HANDLE_FUNC(__tracing_swap_reader),
|
||||
HANDLE_FUNC(__tracing_update_clock),
|
||||
HANDLE_FUNC(__tracing_reset),
|
||||
HANDLE_FUNC(__tracing_enable_event),
|
||||
HANDLE_FUNC(__tracing_write_event),
|
||||
};
|
||||
|
||||
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(unsigned long, id, host_ctxt, 0);
|
||||
unsigned long hcall_min = 0, hcall_max = -1;
|
||||
unsigned long hcall_min = 0, hcall_max = __KVM_HOST_SMCCC_FUNC_MAX;
|
||||
hcall_t hfn;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(host_hcall) != __KVM_HOST_SMCCC_FUNC_MAX);
|
||||
|
||||
/*
|
||||
* If pKVM has been initialised then reject any calls to the
|
||||
* early "privileged" hypercalls. Note that we cannot reject
|
||||
@@ -763,16 +765,14 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
if (static_branch_unlikely(&kvm_protected_mode_initialized)) {
|
||||
hcall_min = __KVM_HOST_SMCCC_FUNC_MIN_PKVM;
|
||||
} else {
|
||||
hcall_max = __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM;
|
||||
hcall_max = __KVM_HOST_SMCCC_FUNC_PKVM_ONLY;
|
||||
}
|
||||
|
||||
id &= ~ARM_SMCCC_CALL_HINTS;
|
||||
id -= KVM_HOST_SMCCC_ID(0);
|
||||
|
||||
if (unlikely(id < hcall_min || id > hcall_max ||
|
||||
id >= ARRAY_SIZE(host_hcall))) {
|
||||
if (unlikely(id < hcall_min || id >= hcall_max))
|
||||
goto inval;
|
||||
}
|
||||
|
||||
hfn = host_hcall[id];
|
||||
if (unlikely(!hfn))
|
||||
@@ -805,6 +805,10 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
|
||||
}
|
||||
|
||||
func_id &= ~ARM_SMCCC_CALL_HINTS;
|
||||
if (upper_32_bits(func_id)) {
|
||||
cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
|
||||
goto exit_skip_instr;
|
||||
}
|
||||
|
||||
handled = kvm_host_psci_handler(host_ctxt, func_id);
|
||||
if (!handled)
|
||||
|
||||
@@ -266,7 +266,8 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
|
||||
if (hyp_vm->kvm.created_vcpus <= vcpu_idx)
|
||||
goto unlock;
|
||||
|
||||
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
|
||||
/* Pairs with smp_store_release() in register_hyp_vcpu(). */
|
||||
hyp_vcpu = smp_load_acquire(&hyp_vm->vcpus[vcpu_idx]);
|
||||
if (!hyp_vcpu)
|
||||
goto unlock;
|
||||
|
||||
@@ -860,12 +861,30 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
|
||||
* the page-aligned size of 'struct pkvm_hyp_vcpu'.
|
||||
* Return 0 on success, negative error code on failure.
|
||||
*/
|
||||
static int register_hyp_vcpu(struct pkvm_hyp_vm *hyp_vm,
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu)
|
||||
{
|
||||
unsigned int idx = hyp_vcpu->vcpu.vcpu_idx;
|
||||
|
||||
if (idx >= hyp_vm->kvm.created_vcpus)
|
||||
return -EINVAL;
|
||||
|
||||
if (hyp_vm->vcpus[idx])
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Ensure the hyp_vcpu is initialised before publishing it to
|
||||
* the vCPU-load path via 'hyp_vm->vcpus[]'.
|
||||
*/
|
||||
smp_store_release(&hyp_vm->vcpus[idx], hyp_vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
||||
unsigned long vcpu_hva)
|
||||
{
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu;
|
||||
struct pkvm_hyp_vm *hyp_vm;
|
||||
unsigned int idx;
|
||||
int ret;
|
||||
|
||||
hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
|
||||
@@ -884,18 +903,11 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
idx = hyp_vcpu->vcpu.vcpu_idx;
|
||||
if (idx >= hyp_vm->kvm.created_vcpus) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
ret = register_hyp_vcpu(hyp_vm, hyp_vcpu);
|
||||
if (ret) {
|
||||
unpin_host_vcpu(host_vcpu);
|
||||
unpin_host_sve_state(hyp_vcpu);
|
||||
}
|
||||
|
||||
if (hyp_vm->vcpus[idx]) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
hyp_vm->vcpus[idx] = hyp_vcpu;
|
||||
unlock:
|
||||
hyp_spin_unlock(&vm_table_lock);
|
||||
|
||||
|
||||
@@ -312,10 +312,6 @@ void __noreturn __pkvm_init_finalise(void)
|
||||
};
|
||||
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
|
||||
|
||||
ret = fix_host_ownership();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = fix_hyp_pgtable_refcnt();
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -324,6 +320,10 @@ void __noreturn __pkvm_init_finalise(void)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = fix_host_ownership();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = hyp_ffa_init(ffa_proxy_pages);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -91,7 +91,7 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
|
||||
* migration from old kernels to new kernels with legacy
|
||||
* userspace.
|
||||
*/
|
||||
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
|
||||
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val);
|
||||
switch (reg) {
|
||||
case KVM_VGIC_IMP_REV_2:
|
||||
case KVM_VGIC_IMP_REV_3:
|
||||
|
||||
@@ -194,7 +194,7 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
|
||||
if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
|
||||
reg = FIELD_GET(GICD_IIDR_REVISION_MASK, val);
|
||||
switch (reg) {
|
||||
case KVM_VGIC_IMP_REV_2:
|
||||
case KVM_VGIC_IMP_REV_3:
|
||||
|
||||
@@ -101,15 +101,15 @@ struct test_params {
|
||||
enum vm_mem_backing_src_type backing_src;
|
||||
|
||||
/* The amount of memory to allocate for each vCPU. */
|
||||
uint64_t vcpu_memory_bytes;
|
||||
u64 vcpu_memory_bytes;
|
||||
|
||||
/* The number of vCPUs to create in the VM. */
|
||||
int nr_vcpus;
|
||||
};
|
||||
|
||||
static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
|
||||
static u64 pread_u64(int fd, const char *filename, u64 index)
|
||||
{
|
||||
uint64_t value;
|
||||
u64 value;
|
||||
off_t offset = index * sizeof(value);
|
||||
|
||||
TEST_ASSERT(pread(fd, &value, sizeof(value), offset) == sizeof(value),
|
||||
@@ -123,13 +123,13 @@ static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
|
||||
#define PAGEMAP_PRESENT (1ULL << 63)
|
||||
#define PAGEMAP_PFN_MASK ((1ULL << 55) - 1)
|
||||
|
||||
static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
|
||||
static u64 lookup_pfn(int pagemap_fd, struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
uint64_t hva = (uint64_t) addr_gva2hva(vm, gva);
|
||||
uint64_t entry;
|
||||
uint64_t pfn;
|
||||
u64 hva = (u64)addr_gva2hva(vm, gva);
|
||||
u64 entry;
|
||||
u64 pfn;
|
||||
|
||||
entry = pread_uint64(pagemap_fd, "pagemap", hva / getpagesize());
|
||||
entry = pread_u64(pagemap_fd, "pagemap", hva / getpagesize());
|
||||
if (!(entry & PAGEMAP_PRESENT))
|
||||
return 0;
|
||||
|
||||
@@ -139,16 +139,16 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static bool is_page_idle(int page_idle_fd, uint64_t pfn)
|
||||
static bool is_page_idle(int page_idle_fd, u64 pfn)
|
||||
{
|
||||
uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64);
|
||||
u64 bits = pread_u64(page_idle_fd, "page_idle", pfn / 64);
|
||||
|
||||
return !!((bits >> (pfn % 64)) & 1);
|
||||
}
|
||||
|
||||
static void mark_page_idle(int page_idle_fd, uint64_t pfn)
|
||||
static void mark_page_idle(int page_idle_fd, u64 pfn)
|
||||
{
|
||||
uint64_t bits = 1ULL << (pfn % 64);
|
||||
u64 bits = 1ULL << (pfn % 64);
|
||||
|
||||
TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 64)) == 8,
|
||||
"Set page_idle bits for PFN 0x%" PRIx64, pfn);
|
||||
@@ -174,11 +174,11 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm,
|
||||
struct memstress_vcpu_args *vcpu_args)
|
||||
{
|
||||
int vcpu_idx = vcpu_args->vcpu_idx;
|
||||
uint64_t base_gva = vcpu_args->gva;
|
||||
uint64_t pages = vcpu_args->pages;
|
||||
uint64_t page;
|
||||
uint64_t still_idle = 0;
|
||||
uint64_t no_pfn = 0;
|
||||
gva_t base_gva = vcpu_args->gva;
|
||||
u64 pages = vcpu_args->pages;
|
||||
u64 page;
|
||||
u64 still_idle = 0;
|
||||
u64 no_pfn = 0;
|
||||
int page_idle_fd;
|
||||
int pagemap_fd;
|
||||
|
||||
@@ -193,8 +193,8 @@ static void pageidle_mark_vcpu_memory_idle(struct kvm_vm *vm,
|
||||
TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
|
||||
|
||||
for (page = 0; page < pages; page++) {
|
||||
uint64_t gva = base_gva + page * memstress_args.guest_page_size;
|
||||
uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
|
||||
gva_t gva = base_gva + page * memstress_args.guest_page_size;
|
||||
u64 pfn = lookup_pfn(pagemap_fd, vm, gva);
|
||||
|
||||
if (!pfn) {
|
||||
no_pfn++;
|
||||
@@ -297,10 +297,10 @@ static void lru_gen_mark_memory_idle(struct kvm_vm *vm)
|
||||
lru_gen_last_gen = new_gen;
|
||||
}
|
||||
|
||||
static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
|
||||
static void assert_ucall(struct kvm_vcpu *vcpu, u64 expected_ucall)
|
||||
{
|
||||
struct ucall uc;
|
||||
uint64_t actual_ucall = get_ucall(vcpu, &uc);
|
||||
u64 actual_ucall = get_ucall(vcpu, &uc);
|
||||
|
||||
TEST_ASSERT(expected_ucall == actual_ucall,
|
||||
"Guest exited unexpectedly (expected ucall %" PRIu64
|
||||
@@ -417,7 +417,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
*/
|
||||
test_pages = params->nr_vcpus * params->vcpu_memory_bytes /
|
||||
max(memstress_args.guest_page_size,
|
||||
(uint64_t)getpagesize());
|
||||
(u64)getpagesize());
|
||||
|
||||
memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
|
||||
|
||||
|
||||
@@ -78,9 +78,9 @@ static void *test_vcpu_run(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static uint32_t test_get_pcpu(void)
|
||||
static u32 test_get_pcpu(void)
|
||||
{
|
||||
uint32_t pcpu;
|
||||
u32 pcpu;
|
||||
unsigned int nproc_conf;
|
||||
cpu_set_t online_cpuset;
|
||||
|
||||
@@ -98,7 +98,7 @@ static uint32_t test_get_pcpu(void)
|
||||
static int test_migrate_vcpu(unsigned int vcpu_idx)
|
||||
{
|
||||
int ret;
|
||||
uint32_t new_pcpu = test_get_pcpu();
|
||||
u32 new_pcpu = test_get_pcpu();
|
||||
|
||||
pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ static void test_guest_raz(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t raz_wi_reg_ids[] = {
|
||||
static u64 raz_wi_reg_ids[] = {
|
||||
KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1),
|
||||
KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1),
|
||||
KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1),
|
||||
@@ -94,8 +94,8 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) {
|
||||
uint64_t reg_id = raz_wi_reg_ids[i];
|
||||
uint64_t val;
|
||||
u64 reg_id = raz_wi_reg_ids[i];
|
||||
u64 val;
|
||||
|
||||
val = vcpu_get_reg(vcpu, reg_id);
|
||||
TEST_ASSERT_EQ(val, 0);
|
||||
@@ -111,7 +111,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t raz_invariant_reg_ids[] = {
|
||||
static u64 raz_invariant_reg_ids[] = {
|
||||
KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1),
|
||||
KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)),
|
||||
KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1),
|
||||
@@ -123,8 +123,8 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) {
|
||||
uint64_t reg_id = raz_invariant_reg_ids[i];
|
||||
uint64_t val;
|
||||
u64 reg_id = raz_invariant_reg_ids[i];
|
||||
u64 val;
|
||||
|
||||
val = vcpu_get_reg(vcpu, reg_id);
|
||||
TEST_ASSERT_EQ(val, 0);
|
||||
@@ -142,7 +142,7 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
|
||||
|
||||
static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t val, el0;
|
||||
u64 val, el0;
|
||||
|
||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ static void guest_validate_irq(unsigned int intid,
|
||||
struct test_vcpu_shared_data *shared_data)
|
||||
{
|
||||
enum guest_stage stage = shared_data->guest_stage;
|
||||
uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
|
||||
u64 xcnt = 0, xcnt_diff_us, cval = 0;
|
||||
unsigned long xctl = 0;
|
||||
unsigned int timer_irq = 0;
|
||||
unsigned int accessor;
|
||||
@@ -105,7 +105,7 @@ static void guest_validate_irq(unsigned int intid,
|
||||
static void guest_irq_handler(struct ex_regs *regs)
|
||||
{
|
||||
unsigned int intid = gic_get_and_ack_irq();
|
||||
uint32_t cpu = guest_get_vcpuid();
|
||||
u32 cpu = guest_get_vcpuid();
|
||||
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
|
||||
|
||||
guest_validate_irq(intid, shared_data);
|
||||
@@ -116,7 +116,7 @@ static void guest_irq_handler(struct ex_regs *regs)
|
||||
static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
|
||||
enum guest_stage stage)
|
||||
{
|
||||
uint32_t irq_iter, config_iter;
|
||||
u32 irq_iter, config_iter;
|
||||
|
||||
shared_data->guest_stage = stage;
|
||||
shared_data->nr_iter = 0;
|
||||
@@ -140,7 +140,7 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
uint32_t cpu = guest_get_vcpuid();
|
||||
u32 cpu = guest_get_vcpuid();
|
||||
struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
@@ -23,25 +23,25 @@
|
||||
#include "vgic.h"
|
||||
|
||||
/* Depends on counter width. */
|
||||
static uint64_t CVAL_MAX;
|
||||
static u64 CVAL_MAX;
|
||||
/* tval is a signed 32-bit int. */
|
||||
static const int32_t TVAL_MAX = INT32_MAX;
|
||||
static const int32_t TVAL_MIN = INT32_MIN;
|
||||
static const s32 TVAL_MAX = INT32_MAX;
|
||||
static const s32 TVAL_MIN = INT32_MIN;
|
||||
|
||||
/* After how much time we say there is no IRQ. */
|
||||
static const uint32_t TIMEOUT_NO_IRQ_US = 50000;
|
||||
static const u32 TIMEOUT_NO_IRQ_US = 50000;
|
||||
|
||||
/* Counter value to use as the starting one for most tests. Set to CVAL_MAX/2 */
|
||||
static uint64_t DEF_CNT;
|
||||
static u64 DEF_CNT;
|
||||
|
||||
/* Number of runs. */
|
||||
static const uint32_t NR_TEST_ITERS_DEF = 5;
|
||||
static const u32 NR_TEST_ITERS_DEF = 5;
|
||||
|
||||
/* Default wait test time in ms. */
|
||||
static const uint32_t WAIT_TEST_MS = 10;
|
||||
static const u32 WAIT_TEST_MS = 10;
|
||||
|
||||
/* Default "long" wait test time in ms. */
|
||||
static const uint32_t LONG_WAIT_TEST_MS = 100;
|
||||
static const u32 LONG_WAIT_TEST_MS = 100;
|
||||
|
||||
/* Shared with IRQ handler. */
|
||||
struct test_vcpu_shared_data {
|
||||
@@ -53,9 +53,9 @@ struct test_args {
|
||||
/* Virtual or physical timer and counter tests. */
|
||||
enum arch_timer timer;
|
||||
/* Delay used for most timer tests. */
|
||||
uint64_t wait_ms;
|
||||
u64 wait_ms;
|
||||
/* Delay used in the test_long_timer_delays test. */
|
||||
uint64_t long_wait_ms;
|
||||
u64 long_wait_ms;
|
||||
/* Number of iterations. */
|
||||
int iterations;
|
||||
/* Whether to test the physical timer. */
|
||||
@@ -82,12 +82,12 @@ enum sync_cmd {
|
||||
NO_USERSPACE_CMD,
|
||||
};
|
||||
|
||||
typedef void (*sleep_method_t)(enum arch_timer timer, uint64_t usec);
|
||||
typedef void (*sleep_method_t)(enum arch_timer timer, u64 usec);
|
||||
|
||||
static void sleep_poll(enum arch_timer timer, uint64_t usec);
|
||||
static void sleep_sched_poll(enum arch_timer timer, uint64_t usec);
|
||||
static void sleep_in_userspace(enum arch_timer timer, uint64_t usec);
|
||||
static void sleep_migrate(enum arch_timer timer, uint64_t usec);
|
||||
static void sleep_poll(enum arch_timer timer, u64 usec);
|
||||
static void sleep_sched_poll(enum arch_timer timer, u64 usec);
|
||||
static void sleep_in_userspace(enum arch_timer timer, u64 usec);
|
||||
static void sleep_migrate(enum arch_timer timer, u64 usec);
|
||||
|
||||
sleep_method_t sleep_method[] = {
|
||||
sleep_poll,
|
||||
@@ -115,14 +115,14 @@ enum timer_view {
|
||||
TIMER_TVAL,
|
||||
};
|
||||
|
||||
static void assert_irqs_handled(uint32_t n)
|
||||
static void assert_irqs_handled(u32 n)
|
||||
{
|
||||
int h = atomic_read(&shared_data.handled);
|
||||
|
||||
__GUEST_ASSERT(h == n, "Handled %d IRQS but expected %d", h, n);
|
||||
}
|
||||
|
||||
static void userspace_cmd(uint64_t cmd)
|
||||
static void userspace_cmd(u64 cmd)
|
||||
{
|
||||
GUEST_SYNC_ARGS(cmd, 0, 0, 0, 0);
|
||||
}
|
||||
@@ -132,12 +132,12 @@ static void userspace_migrate_vcpu(void)
|
||||
userspace_cmd(USERSPACE_MIGRATE_SELF);
|
||||
}
|
||||
|
||||
static void userspace_sleep(uint64_t usecs)
|
||||
static void userspace_sleep(u64 usecs)
|
||||
{
|
||||
GUEST_SYNC_ARGS(USERSPACE_USLEEP, usecs, 0, 0, 0);
|
||||
}
|
||||
|
||||
static void set_counter(enum arch_timer timer, uint64_t counter)
|
||||
static void set_counter(enum arch_timer timer, u64 counter)
|
||||
{
|
||||
GUEST_SYNC_ARGS(SET_COUNTER_VALUE, counter, timer, 0, 0);
|
||||
}
|
||||
@@ -146,8 +146,8 @@ static void guest_irq_handler(struct ex_regs *regs)
|
||||
{
|
||||
unsigned int intid = gic_get_and_ack_irq();
|
||||
enum arch_timer timer;
|
||||
uint64_t cnt, cval;
|
||||
uint32_t ctl;
|
||||
u64 cnt, cval;
|
||||
u32 ctl;
|
||||
bool timer_condition, istatus;
|
||||
|
||||
if (intid == IAR_SPURIOUS) {
|
||||
@@ -178,8 +178,8 @@ static void guest_irq_handler(struct ex_regs *regs)
|
||||
gic_set_eoi(intid);
|
||||
}
|
||||
|
||||
static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles,
|
||||
uint32_t ctl)
|
||||
static void set_cval_irq(enum arch_timer timer, u64 cval_cycles,
|
||||
u32 ctl)
|
||||
{
|
||||
atomic_set(&shared_data.handled, 0);
|
||||
atomic_set(&shared_data.spurious, 0);
|
||||
@@ -187,8 +187,8 @@ static void set_cval_irq(enum arch_timer timer, uint64_t cval_cycles,
|
||||
timer_set_ctl(timer, ctl);
|
||||
}
|
||||
|
||||
static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
|
||||
uint32_t ctl)
|
||||
static void set_tval_irq(enum arch_timer timer, u64 tval_cycles,
|
||||
u32 ctl)
|
||||
{
|
||||
atomic_set(&shared_data.handled, 0);
|
||||
atomic_set(&shared_data.spurious, 0);
|
||||
@@ -196,7 +196,7 @@ static void set_tval_irq(enum arch_timer timer, uint64_t tval_cycles,
|
||||
timer_set_ctl(timer, ctl);
|
||||
}
|
||||
|
||||
static void set_xval_irq(enum arch_timer timer, uint64_t xval, uint32_t ctl,
|
||||
static void set_xval_irq(enum arch_timer timer, u64 xval, u32 ctl,
|
||||
enum timer_view tv)
|
||||
{
|
||||
switch (tv) {
|
||||
@@ -275,13 +275,13 @@ static void wait_migrate_poll_for_irq(void)
|
||||
* Sleep for usec microseconds by polling in the guest or in
|
||||
* userspace (e.g. userspace_cmd=USERSPACE_SCHEDULE).
|
||||
*/
|
||||
static void guest_poll(enum arch_timer test_timer, uint64_t usec,
|
||||
static void guest_poll(enum arch_timer test_timer, u64 usec,
|
||||
enum sync_cmd usp_cmd)
|
||||
{
|
||||
uint64_t cycles = usec_to_cycles(usec);
|
||||
u64 cycles = usec_to_cycles(usec);
|
||||
/* Whichever timer we are testing with, sleep with the other. */
|
||||
enum arch_timer sleep_timer = 1 - test_timer;
|
||||
uint64_t start = timer_get_cntct(sleep_timer);
|
||||
u64 start = timer_get_cntct(sleep_timer);
|
||||
|
||||
while ((timer_get_cntct(sleep_timer) - start) < cycles) {
|
||||
if (usp_cmd == NO_USERSPACE_CMD)
|
||||
@@ -291,22 +291,22 @@ static void guest_poll(enum arch_timer test_timer, uint64_t usec,
|
||||
}
|
||||
}
|
||||
|
||||
static void sleep_poll(enum arch_timer timer, uint64_t usec)
|
||||
static void sleep_poll(enum arch_timer timer, u64 usec)
|
||||
{
|
||||
guest_poll(timer, usec, NO_USERSPACE_CMD);
|
||||
}
|
||||
|
||||
static void sleep_sched_poll(enum arch_timer timer, uint64_t usec)
|
||||
static void sleep_sched_poll(enum arch_timer timer, u64 usec)
|
||||
{
|
||||
guest_poll(timer, usec, USERSPACE_SCHED_YIELD);
|
||||
}
|
||||
|
||||
static void sleep_migrate(enum arch_timer timer, uint64_t usec)
|
||||
static void sleep_migrate(enum arch_timer timer, u64 usec)
|
||||
{
|
||||
guest_poll(timer, usec, USERSPACE_MIGRATE_SELF);
|
||||
}
|
||||
|
||||
static void sleep_in_userspace(enum arch_timer timer, uint64_t usec)
|
||||
static void sleep_in_userspace(enum arch_timer timer, u64 usec)
|
||||
{
|
||||
userspace_sleep(usec);
|
||||
}
|
||||
@@ -315,15 +315,15 @@ static void sleep_in_userspace(enum arch_timer timer, uint64_t usec)
|
||||
* Reset the timer state to some nice values like the counter not being close
|
||||
* to the edge, and the control register masked and disabled.
|
||||
*/
|
||||
static void reset_timer_state(enum arch_timer timer, uint64_t cnt)
|
||||
static void reset_timer_state(enum arch_timer timer, u64 cnt)
|
||||
{
|
||||
set_counter(timer, cnt);
|
||||
timer_set_ctl(timer, CTL_IMASK);
|
||||
}
|
||||
|
||||
static void test_timer_xval(enum arch_timer timer, uint64_t xval,
|
||||
static void test_timer_xval(enum arch_timer timer, u64 xval,
|
||||
enum timer_view tv, irq_wait_method_t wm, bool reset_state,
|
||||
uint64_t reset_cnt)
|
||||
u64 reset_cnt)
|
||||
{
|
||||
local_irq_disable();
|
||||
|
||||
@@ -348,23 +348,23 @@ static void test_timer_xval(enum arch_timer timer, uint64_t xval,
|
||||
* the "runner", like: tools/testing/selftests/kselftest/runner.sh.
|
||||
*/
|
||||
|
||||
static void test_timer_cval(enum arch_timer timer, uint64_t cval,
|
||||
static void test_timer_cval(enum arch_timer timer, u64 cval,
|
||||
irq_wait_method_t wm, bool reset_state,
|
||||
uint64_t reset_cnt)
|
||||
u64 reset_cnt)
|
||||
{
|
||||
test_timer_xval(timer, cval, TIMER_CVAL, wm, reset_state, reset_cnt);
|
||||
}
|
||||
|
||||
static void test_timer_tval(enum arch_timer timer, int32_t tval,
|
||||
static void test_timer_tval(enum arch_timer timer, s32 tval,
|
||||
irq_wait_method_t wm, bool reset_state,
|
||||
uint64_t reset_cnt)
|
||||
u64 reset_cnt)
|
||||
{
|
||||
test_timer_xval(timer, (uint64_t) tval, TIMER_TVAL, wm, reset_state,
|
||||
test_timer_xval(timer, (u64)tval, TIMER_TVAL, wm, reset_state,
|
||||
reset_cnt);
|
||||
}
|
||||
|
||||
static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval,
|
||||
uint64_t usec, enum timer_view timer_view,
|
||||
static void test_xval_check_no_irq(enum arch_timer timer, u64 xval,
|
||||
u64 usec, enum timer_view timer_view,
|
||||
sleep_method_t guest_sleep)
|
||||
{
|
||||
local_irq_disable();
|
||||
@@ -379,17 +379,17 @@ static void test_xval_check_no_irq(enum arch_timer timer, uint64_t xval,
|
||||
assert_irqs_handled(0);
|
||||
}
|
||||
|
||||
static void test_cval_no_irq(enum arch_timer timer, uint64_t cval,
|
||||
uint64_t usec, sleep_method_t wm)
|
||||
static void test_cval_no_irq(enum arch_timer timer, u64 cval,
|
||||
u64 usec, sleep_method_t wm)
|
||||
{
|
||||
test_xval_check_no_irq(timer, cval, usec, TIMER_CVAL, wm);
|
||||
}
|
||||
|
||||
static void test_tval_no_irq(enum arch_timer timer, int32_t tval, uint64_t usec,
|
||||
static void test_tval_no_irq(enum arch_timer timer, s32 tval, u64 usec,
|
||||
sleep_method_t wm)
|
||||
{
|
||||
/* tval will be cast to an int32_t in test_xval_check_no_irq */
|
||||
test_xval_check_no_irq(timer, (uint64_t) tval, usec, TIMER_TVAL, wm);
|
||||
/* tval will be cast to an s32 in test_xval_check_no_irq */
|
||||
test_xval_check_no_irq(timer, (u64)tval, usec, TIMER_TVAL, wm);
|
||||
}
|
||||
|
||||
/* Test masking/unmasking a timer using the timer mask (not the IRQ mask). */
|
||||
@@ -463,7 +463,7 @@ static void test_timers_fired_multiple_times(enum arch_timer timer)
|
||||
* timeout for the wait: we use the wfi instruction.
|
||||
*/
|
||||
static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm,
|
||||
int32_t delta_1_ms, int32_t delta_2_ms)
|
||||
s32 delta_1_ms, s32 delta_2_ms)
|
||||
{
|
||||
local_irq_disable();
|
||||
reset_timer_state(timer, DEF_CNT);
|
||||
@@ -488,7 +488,7 @@ static void test_reprogramming_timer(enum arch_timer timer, irq_wait_method_t wm
|
||||
static void test_reprogram_timers(enum arch_timer timer)
|
||||
{
|
||||
int i;
|
||||
uint64_t base_wait = test_args.wait_ms;
|
||||
u64 base_wait = test_args.wait_ms;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
|
||||
/*
|
||||
@@ -504,8 +504,8 @@ static void test_reprogram_timers(enum arch_timer timer)
|
||||
|
||||
static void test_basic_functionality(enum arch_timer timer)
|
||||
{
|
||||
int32_t tval = (int32_t) msec_to_cycles(test_args.wait_ms);
|
||||
uint64_t cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
|
||||
s32 tval = (s32)msec_to_cycles(test_args.wait_ms);
|
||||
u64 cval = DEF_CNT + msec_to_cycles(test_args.wait_ms);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
|
||||
@@ -593,7 +593,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t
|
||||
reset_timer_state(timer, DEF_CNT);
|
||||
|
||||
set_cval_irq(timer,
|
||||
(uint64_t) TVAL_MAX +
|
||||
(u64)TVAL_MAX +
|
||||
msec_to_cycles(test_args.wait_ms) / 2, CTL_ENABLE);
|
||||
|
||||
set_counter(timer, TVAL_MAX);
|
||||
@@ -608,7 +608,7 @@ static void test_set_cnt_after_tval_max(enum arch_timer timer, irq_wait_method_t
|
||||
/* Test timers set for: cval = now + TVAL_MAX + wait_ms / 2 */
|
||||
static void test_timers_above_tval_max(enum arch_timer timer)
|
||||
{
|
||||
uint64_t cval;
|
||||
u64 cval;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@@ -638,8 +638,8 @@ static void test_timers_above_tval_max(enum arch_timer timer)
|
||||
* sets the counter to cnt_1, the [c|t]val, the counter to cnt_2, and
|
||||
* then waits for an IRQ.
|
||||
*/
|
||||
static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1,
|
||||
uint64_t xval, uint64_t cnt_2,
|
||||
static void test_set_cnt_after_xval(enum arch_timer timer, u64 cnt_1,
|
||||
u64 xval, u64 cnt_2,
|
||||
irq_wait_method_t wm, enum timer_view tv)
|
||||
{
|
||||
local_irq_disable();
|
||||
@@ -662,8 +662,8 @@ static void test_set_cnt_after_xval(enum arch_timer timer, uint64_t cnt_1,
|
||||
* then waits for an IRQ.
|
||||
*/
|
||||
static void test_set_cnt_after_xval_no_irq(enum arch_timer timer,
|
||||
uint64_t cnt_1, uint64_t xval,
|
||||
uint64_t cnt_2,
|
||||
u64 cnt_1, u64 xval,
|
||||
u64 cnt_2,
|
||||
sleep_method_t guest_sleep,
|
||||
enum timer_view tv)
|
||||
{
|
||||
@@ -684,31 +684,31 @@ static void test_set_cnt_after_xval_no_irq(enum arch_timer timer,
|
||||
timer_set_ctl(timer, CTL_IMASK);
|
||||
}
|
||||
|
||||
static void test_set_cnt_after_tval(enum arch_timer timer, uint64_t cnt_1,
|
||||
int32_t tval, uint64_t cnt_2,
|
||||
static void test_set_cnt_after_tval(enum arch_timer timer, u64 cnt_1,
|
||||
s32 tval, u64 cnt_2,
|
||||
irq_wait_method_t wm)
|
||||
{
|
||||
test_set_cnt_after_xval(timer, cnt_1, tval, cnt_2, wm, TIMER_TVAL);
|
||||
}
|
||||
|
||||
static void test_set_cnt_after_cval(enum arch_timer timer, uint64_t cnt_1,
|
||||
uint64_t cval, uint64_t cnt_2,
|
||||
static void test_set_cnt_after_cval(enum arch_timer timer, u64 cnt_1,
|
||||
u64 cval, u64 cnt_2,
|
||||
irq_wait_method_t wm)
|
||||
{
|
||||
test_set_cnt_after_xval(timer, cnt_1, cval, cnt_2, wm, TIMER_CVAL);
|
||||
}
|
||||
|
||||
static void test_set_cnt_after_tval_no_irq(enum arch_timer timer,
|
||||
uint64_t cnt_1, int32_t tval,
|
||||
uint64_t cnt_2, sleep_method_t wm)
|
||||
u64 cnt_1, s32 tval,
|
||||
u64 cnt_2, sleep_method_t wm)
|
||||
{
|
||||
test_set_cnt_after_xval_no_irq(timer, cnt_1, tval, cnt_2, wm,
|
||||
TIMER_TVAL);
|
||||
}
|
||||
|
||||
static void test_set_cnt_after_cval_no_irq(enum arch_timer timer,
|
||||
uint64_t cnt_1, uint64_t cval,
|
||||
uint64_t cnt_2, sleep_method_t wm)
|
||||
u64 cnt_1, u64 cval,
|
||||
u64 cnt_2, sleep_method_t wm)
|
||||
{
|
||||
test_set_cnt_after_xval_no_irq(timer, cnt_1, cval, cnt_2, wm,
|
||||
TIMER_CVAL);
|
||||
@@ -718,7 +718,7 @@ static void test_set_cnt_after_cval_no_irq(enum arch_timer timer,
|
||||
static void test_move_counters_ahead_of_timers(enum arch_timer timer)
|
||||
{
|
||||
int i;
|
||||
int32_t tval;
|
||||
s32 tval;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
|
||||
irq_wait_method_t wm = irq_wait_method[i];
|
||||
@@ -730,8 +730,7 @@ static void test_move_counters_ahead_of_timers(enum arch_timer timer)
|
||||
test_set_cnt_after_tval(timer, 0, -1, DEF_CNT + 1, wm);
|
||||
test_set_cnt_after_tval(timer, 0, -1, TVAL_MAX, wm);
|
||||
tval = TVAL_MAX;
|
||||
test_set_cnt_after_tval(timer, 0, tval, (uint64_t) tval + 1,
|
||||
wm);
|
||||
test_set_cnt_after_tval(timer, 0, tval, (u64)tval + 1, wm);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -754,8 +753,8 @@ static void test_move_counters_behind_timers(enum arch_timer timer)
|
||||
|
||||
static void test_timers_in_the_past(enum arch_timer timer)
|
||||
{
|
||||
int32_t tval = -1 * (int32_t) msec_to_cycles(test_args.wait_ms);
|
||||
uint64_t cval;
|
||||
s32 tval = -1 * (s32)msec_to_cycles(test_args.wait_ms);
|
||||
u64 cval;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
|
||||
@@ -790,8 +789,8 @@ static void test_timers_in_the_past(enum arch_timer timer)
|
||||
|
||||
static void test_long_timer_delays(enum arch_timer timer)
|
||||
{
|
||||
int32_t tval = (int32_t) msec_to_cycles(test_args.long_wait_ms);
|
||||
uint64_t cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
|
||||
s32 tval = (s32)msec_to_cycles(test_args.long_wait_ms);
|
||||
u64 cval = DEF_CNT + msec_to_cycles(test_args.long_wait_ms);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(irq_wait_method); i++) {
|
||||
@@ -846,11 +845,11 @@ static void guest_code(enum arch_timer timer)
|
||||
|
||||
static cpu_set_t default_cpuset;
|
||||
|
||||
static uint32_t next_pcpu(void)
|
||||
static u32 next_pcpu(void)
|
||||
{
|
||||
uint32_t max = get_nprocs();
|
||||
uint32_t cur = sched_getcpu();
|
||||
uint32_t next = cur;
|
||||
u32 max = get_nprocs();
|
||||
u32 cur = sched_getcpu();
|
||||
u32 next = cur;
|
||||
cpu_set_t cpuset = default_cpuset;
|
||||
|
||||
TEST_ASSERT(max > 1, "Need at least two physical cpus");
|
||||
@@ -862,7 +861,7 @@ static uint32_t next_pcpu(void)
|
||||
return next;
|
||||
}
|
||||
|
||||
static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt,
|
||||
static void kvm_set_cntxct(struct kvm_vcpu *vcpu, u64 cnt,
|
||||
enum arch_timer timer)
|
||||
{
|
||||
if (timer == PHYSICAL)
|
||||
@@ -874,7 +873,7 @@ static void kvm_set_cntxct(struct kvm_vcpu *vcpu, uint64_t cnt,
|
||||
static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
|
||||
{
|
||||
enum sync_cmd cmd = uc->args[1];
|
||||
uint64_t val = uc->args[2];
|
||||
u64 val = uc->args[2];
|
||||
enum arch_timer timer = uc->args[3];
|
||||
|
||||
switch (cmd) {
|
||||
@@ -1018,8 +1017,8 @@ static bool parse_args(int argc, char *argv[])
|
||||
|
||||
static void set_counter_defaults(void)
|
||||
{
|
||||
const uint64_t MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
|
||||
uint64_t freq = read_sysreg(CNTFRQ_EL0);
|
||||
const u64 MIN_ROLLOVER_SECS = 40ULL * 365 * 24 * 3600;
|
||||
u64 freq = read_sysreg(CNTFRQ_EL0);
|
||||
int width = ilog2(MIN_ROLLOVER_SECS * freq);
|
||||
|
||||
width = clamp(width, 56, 64);
|
||||
|
||||
@@ -31,14 +31,14 @@
|
||||
|
||||
extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start, hw_bp_ctx;
|
||||
extern unsigned char iter_ss_begin, iter_ss_end;
|
||||
static volatile uint64_t sw_bp_addr, hw_bp_addr;
|
||||
static volatile uint64_t wp_addr, wp_data_addr;
|
||||
static volatile uint64_t svc_addr;
|
||||
static volatile uint64_t ss_addr[4], ss_idx;
|
||||
#define PC(v) ((uint64_t)&(v))
|
||||
static volatile u64 sw_bp_addr, hw_bp_addr;
|
||||
static volatile u64 wp_addr, wp_data_addr;
|
||||
static volatile u64 svc_addr;
|
||||
static volatile u64 ss_addr[4], ss_idx;
|
||||
#define PC(v) ((u64)&(v))
|
||||
|
||||
#define GEN_DEBUG_WRITE_REG(reg_name) \
|
||||
static void write_##reg_name(int num, uint64_t val) \
|
||||
static void write_##reg_name(int num, u64 val) \
|
||||
{ \
|
||||
switch (num) { \
|
||||
case 0: \
|
||||
@@ -102,8 +102,8 @@ GEN_DEBUG_WRITE_REG(dbgwvr)
|
||||
|
||||
static void reset_debug_state(void)
|
||||
{
|
||||
uint8_t brps, wrps, i;
|
||||
uint64_t dfr0;
|
||||
u8 brps, wrps, i;
|
||||
u64 dfr0;
|
||||
|
||||
asm volatile("msr daifset, #8");
|
||||
|
||||
@@ -140,7 +140,7 @@ static void enable_os_lock(void)
|
||||
|
||||
static void enable_monitor_debug_exceptions(void)
|
||||
{
|
||||
uint64_t mdscr;
|
||||
u64 mdscr;
|
||||
|
||||
asm volatile("msr daifclr, #8");
|
||||
|
||||
@@ -149,9 +149,9 @@ static void enable_monitor_debug_exceptions(void)
|
||||
isb();
|
||||
}
|
||||
|
||||
static void install_wp(uint8_t wpn, uint64_t addr)
|
||||
static void install_wp(u8 wpn, u64 addr)
|
||||
{
|
||||
uint32_t wcr;
|
||||
u32 wcr;
|
||||
|
||||
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
|
||||
write_dbgwcr(wpn, wcr);
|
||||
@@ -162,9 +162,9 @@ static void install_wp(uint8_t wpn, uint64_t addr)
|
||||
enable_monitor_debug_exceptions();
|
||||
}
|
||||
|
||||
static void install_hw_bp(uint8_t bpn, uint64_t addr)
|
||||
static void install_hw_bp(u8 bpn, u64 addr)
|
||||
{
|
||||
uint32_t bcr;
|
||||
u32 bcr;
|
||||
|
||||
bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
|
||||
write_dbgbcr(bpn, bcr);
|
||||
@@ -174,11 +174,10 @@ static void install_hw_bp(uint8_t bpn, uint64_t addr)
|
||||
enable_monitor_debug_exceptions();
|
||||
}
|
||||
|
||||
static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
|
||||
uint64_t ctx)
|
||||
static void install_wp_ctx(u8 addr_wp, u8 ctx_bp, u64 addr, u64 ctx)
|
||||
{
|
||||
uint32_t wcr;
|
||||
uint64_t ctx_bcr;
|
||||
u32 wcr;
|
||||
u64 ctx_bcr;
|
||||
|
||||
/* Setup a context-aware breakpoint for Linked Context ID Match */
|
||||
ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
|
||||
@@ -188,7 +187,7 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
|
||||
|
||||
/* Setup a linked watchpoint (linked to the context-aware breakpoint) */
|
||||
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E |
|
||||
DBGWCR_WT_LINK | ((uint32_t)ctx_bp << DBGWCR_LBN_SHIFT);
|
||||
DBGWCR_WT_LINK | ((u32)ctx_bp << DBGWCR_LBN_SHIFT);
|
||||
write_dbgwcr(addr_wp, wcr);
|
||||
write_dbgwvr(addr_wp, addr);
|
||||
isb();
|
||||
@@ -196,10 +195,9 @@ static void install_wp_ctx(uint8_t addr_wp, uint8_t ctx_bp, uint64_t addr,
|
||||
enable_monitor_debug_exceptions();
|
||||
}
|
||||
|
||||
void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
|
||||
uint64_t ctx)
|
||||
void install_hw_bp_ctx(u8 addr_bp, u8 ctx_bp, u64 addr, u64 ctx)
|
||||
{
|
||||
uint32_t addr_bcr, ctx_bcr;
|
||||
u32 addr_bcr, ctx_bcr;
|
||||
|
||||
/* Setup a context-aware breakpoint for Linked Context ID Match */
|
||||
ctx_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
|
||||
@@ -213,7 +211,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
|
||||
*/
|
||||
addr_bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E |
|
||||
DBGBCR_BT_ADDR_LINK_CTX |
|
||||
((uint32_t)ctx_bp << DBGBCR_LBN_SHIFT);
|
||||
((u32)ctx_bp << DBGBCR_LBN_SHIFT);
|
||||
write_dbgbcr(addr_bp, addr_bcr);
|
||||
write_dbgbvr(addr_bp, addr);
|
||||
isb();
|
||||
@@ -223,7 +221,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
|
||||
|
||||
static void install_ss(void)
|
||||
{
|
||||
uint64_t mdscr;
|
||||
u64 mdscr;
|
||||
|
||||
asm volatile("msr daifclr, #8");
|
||||
|
||||
@@ -234,9 +232,9 @@ static void install_ss(void)
|
||||
|
||||
static volatile char write_data;
|
||||
|
||||
static void guest_code(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
|
||||
static void guest_code(u8 bpn, u8 wpn, u8 ctx_bpn)
|
||||
{
|
||||
uint64_t ctx = 0xabcdef; /* a random context number */
|
||||
u64 ctx = 0xabcdef; /* a random context number */
|
||||
|
||||
/* Software-breakpoint */
|
||||
reset_debug_state();
|
||||
@@ -377,8 +375,8 @@ static void guest_svc_handler(struct ex_regs *regs)
|
||||
|
||||
static void guest_code_ss(int test_cnt)
|
||||
{
|
||||
uint64_t i;
|
||||
uint64_t bvr, wvr, w_bvr, w_wvr;
|
||||
u64 i;
|
||||
u64 bvr, wvr, w_bvr, w_wvr;
|
||||
|
||||
for (i = 0; i < test_cnt; i++) {
|
||||
/* Bits [1:0] of dbg{b,w}vr are RES0 */
|
||||
@@ -416,12 +414,12 @@ static void guest_code_ss(int test_cnt)
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static int debug_version(uint64_t id_aa64dfr0)
|
||||
static int debug_version(u64 id_aa64dfr0)
|
||||
{
|
||||
return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
|
||||
}
|
||||
|
||||
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
|
||||
static void test_guest_debug_exceptions(u8 bpn, u8 wpn, u8 ctx_bpn)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
@@ -468,8 +466,8 @@ void test_single_step_from_userspace(int test_cnt)
|
||||
struct kvm_vm *vm;
|
||||
struct ucall uc;
|
||||
struct kvm_run *run;
|
||||
uint64_t pc, cmd;
|
||||
uint64_t test_pc = 0;
|
||||
u64 pc, cmd;
|
||||
u64 test_pc = 0;
|
||||
bool ss_enable = false;
|
||||
struct kvm_guest_debug debug = {};
|
||||
|
||||
@@ -506,7 +504,7 @@ void test_single_step_from_userspace(int test_cnt)
|
||||
"Unexpected pc 0x%lx (expected 0x%lx)",
|
||||
pc, test_pc);
|
||||
|
||||
if ((pc + 4) == (uint64_t)&iter_ss_end) {
|
||||
if ((pc + 4) == (u64)&iter_ss_end) {
|
||||
test_pc = 0;
|
||||
debug.control = KVM_GUESTDBG_ENABLE;
|
||||
ss_enable = false;
|
||||
@@ -519,8 +517,8 @@ void test_single_step_from_userspace(int test_cnt)
|
||||
* iter_ss_end, the pc for the next KVM_EXIT_DEBUG should
|
||||
* be the current pc + 4.
|
||||
*/
|
||||
if ((pc >= (uint64_t)&iter_ss_begin) &&
|
||||
(pc < (uint64_t)&iter_ss_end))
|
||||
if ((pc >= (u64)&iter_ss_begin) &&
|
||||
(pc < (u64)&iter_ss_end))
|
||||
test_pc = pc + 4;
|
||||
else
|
||||
test_pc = 0;
|
||||
@@ -533,9 +531,9 @@ void test_single_step_from_userspace(int test_cnt)
|
||||
* Run debug testing using the various breakpoint#, watchpoint# and
|
||||
* context-aware breakpoint# with the given ID_AA64DFR0_EL1 configuration.
|
||||
*/
|
||||
void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
|
||||
void test_guest_debug_exceptions_all(u64 aa64dfr0)
|
||||
{
|
||||
uint8_t brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
|
||||
u8 brp_num, wrp_num, ctx_brp_num, normal_brp_num, ctx_brp_base;
|
||||
int b, w, c;
|
||||
|
||||
/* Number of breakpoints */
|
||||
@@ -580,7 +578,7 @@ int main(int argc, char *argv[])
|
||||
struct kvm_vm *vm;
|
||||
int opt;
|
||||
int ss_iteration = 10000;
|
||||
uint64_t aa64dfr0;
|
||||
u64 aa64dfr0;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
|
||||
|
||||
@@ -29,9 +29,9 @@
|
||||
#define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0
|
||||
|
||||
struct kvm_fw_reg_info {
|
||||
uint64_t reg; /* Register definition */
|
||||
uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
|
||||
uint64_t reset_val; /* Reset value for the register */
|
||||
u64 reg; /* Register definition */
|
||||
u64 max_feat_bit; /* Bit that represents the upper limit of the feature-map */
|
||||
u64 reset_val; /* Reset value for the register */
|
||||
};
|
||||
|
||||
#define FW_REG_INFO(r) \
|
||||
@@ -59,8 +59,8 @@ enum test_stage {
|
||||
static int stage = TEST_STAGE_REG_IFACE;
|
||||
|
||||
struct test_hvc_info {
|
||||
uint32_t func_id;
|
||||
uint64_t arg1;
|
||||
u32 func_id;
|
||||
u64 arg1;
|
||||
};
|
||||
|
||||
#define TEST_HVC_INFO(f, a1) \
|
||||
@@ -152,9 +152,9 @@ static void guest_code(void)
|
||||
}
|
||||
|
||||
struct st_time {
|
||||
uint32_t rev;
|
||||
uint32_t attr;
|
||||
uint64_t st_time;
|
||||
u32 rev;
|
||||
u32 attr;
|
||||
u64 st_time;
|
||||
};
|
||||
|
||||
#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
|
||||
@@ -162,7 +162,7 @@ struct st_time {
|
||||
|
||||
static void steal_time_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t st_ipa = (ulong)ST_GPA_BASE;
|
||||
u64 st_ipa = (ulong)ST_GPA_BASE;
|
||||
unsigned int gpages;
|
||||
|
||||
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
|
||||
@@ -174,13 +174,13 @@ static void steal_time_init(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
|
||||
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
|
||||
uint64_t set_val;
|
||||
u64 set_val;
|
||||
|
||||
/* First 'read' should be the reset value for the reg */
|
||||
val = vcpu_get_reg(vcpu, reg_info->reg);
|
||||
@@ -229,7 +229,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ static volatile bool sys64, undef;
|
||||
|
||||
#define __check_sr_read(r) \
|
||||
({ \
|
||||
uint64_t val; \
|
||||
u64 val; \
|
||||
\
|
||||
sys64 = false; \
|
||||
undef = false; \
|
||||
@@ -101,7 +101,7 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
uint64_t mmfr2;
|
||||
u64 mmfr2;
|
||||
|
||||
test_disable_default_vgic();
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ static volatile bool handled;
|
||||
|
||||
#define __check_sr_read(r) \
|
||||
({ \
|
||||
uint64_t val; \
|
||||
u64 val; \
|
||||
\
|
||||
handled = false; \
|
||||
dsb(sy); \
|
||||
@@ -33,7 +33,7 @@ static volatile bool handled;
|
||||
|
||||
#define __check_gicv5_gicr_op(r) \
|
||||
({ \
|
||||
uint64_t val; \
|
||||
u64 val; \
|
||||
\
|
||||
handled = false; \
|
||||
dsb(sy); \
|
||||
@@ -82,7 +82,7 @@ static volatile bool handled;
|
||||
|
||||
static void guest_code_gicv3(void)
|
||||
{
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
|
||||
@@ -262,7 +262,7 @@ int main(int argc, char *argv[])
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
bool has_v3, has_v5;
|
||||
uint64_t pfr;
|
||||
u64 pfr;
|
||||
|
||||
test_disable_default_vgic();
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#define TEST_PTE_GVA 0xb0000000
|
||||
#define TEST_DATA 0x0123456789ABCDEF
|
||||
|
||||
static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
|
||||
static u64 *guest_test_memory = (u64 *)TEST_GVA;
|
||||
|
||||
#define CMD_NONE (0)
|
||||
#define CMD_SKIP_TEST (1ULL << 1)
|
||||
@@ -48,7 +48,7 @@ static struct event_cnt {
|
||||
|
||||
struct test_desc {
|
||||
const char *name;
|
||||
uint64_t mem_mark_cmd;
|
||||
u64 mem_mark_cmd;
|
||||
/* Skip the test if any prepare function returns false */
|
||||
bool (*guest_prepare[PREPARE_FN_NR])(void);
|
||||
void (*guest_test)(void);
|
||||
@@ -59,8 +59,8 @@ struct test_desc {
|
||||
void (*iabt_handler)(struct ex_regs *regs);
|
||||
void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
|
||||
void (*fail_vcpu_run_handler)(int ret);
|
||||
uint32_t pt_memslot_flags;
|
||||
uint32_t data_memslot_flags;
|
||||
u32 pt_memslot_flags;
|
||||
u32 data_memslot_flags;
|
||||
bool skip;
|
||||
struct event_cnt expected_events;
|
||||
};
|
||||
@@ -70,9 +70,9 @@ struct test_params {
|
||||
struct test_desc *test_desc;
|
||||
};
|
||||
|
||||
static inline void flush_tlb_page(uint64_t vaddr)
|
||||
static inline void flush_tlb_page(gva_t gva)
|
||||
{
|
||||
uint64_t page = vaddr >> 12;
|
||||
gva_t page = gva >> 12;
|
||||
|
||||
dsb(ishst);
|
||||
asm volatile("tlbi vaae1is, %0" :: "r" (page));
|
||||
@@ -82,7 +82,7 @@ static inline void flush_tlb_page(uint64_t vaddr)
|
||||
|
||||
static void guest_write64(void)
|
||||
{
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
|
||||
WRITE_ONCE(*guest_test_memory, TEST_DATA);
|
||||
val = READ_ONCE(*guest_test_memory);
|
||||
@@ -92,8 +92,8 @@ static void guest_write64(void)
|
||||
/* Check the system for atomic instructions. */
|
||||
static bool guest_check_lse(void)
|
||||
{
|
||||
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
|
||||
uint64_t atomic;
|
||||
u64 isar0 = read_sysreg(id_aa64isar0_el1);
|
||||
u64 atomic;
|
||||
|
||||
atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
|
||||
return atomic >= 2;
|
||||
@@ -101,8 +101,8 @@ static bool guest_check_lse(void)
|
||||
|
||||
static bool guest_check_dc_zva(void)
|
||||
{
|
||||
uint64_t dczid = read_sysreg(dczid_el0);
|
||||
uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
|
||||
u64 dczid = read_sysreg(dczid_el0);
|
||||
u64 dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
|
||||
|
||||
return dzp == 0;
|
||||
}
|
||||
@@ -110,7 +110,7 @@ static bool guest_check_dc_zva(void)
|
||||
/* Compare and swap instruction. */
|
||||
static void guest_cas(void)
|
||||
{
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
|
||||
GUEST_ASSERT(guest_check_lse());
|
||||
asm volatile(".arch_extension lse\n"
|
||||
@@ -122,7 +122,7 @@ static void guest_cas(void)
|
||||
|
||||
static void guest_read64(void)
|
||||
{
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
|
||||
val = READ_ONCE(*guest_test_memory);
|
||||
GUEST_ASSERT_EQ(val, 0);
|
||||
@@ -131,7 +131,7 @@ static void guest_read64(void)
|
||||
/* Address translation instruction */
|
||||
static void guest_at(void)
|
||||
{
|
||||
uint64_t par;
|
||||
u64 par;
|
||||
|
||||
asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
|
||||
isb();
|
||||
@@ -148,7 +148,7 @@ static void guest_at(void)
|
||||
*/
|
||||
static void guest_dc_zva(void)
|
||||
{
|
||||
uint16_t val;
|
||||
u16 val;
|
||||
|
||||
asm volatile("dc zva, %0" :: "r" (guest_test_memory));
|
||||
dsb(ish);
|
||||
@@ -164,8 +164,8 @@ static void guest_dc_zva(void)
|
||||
*/
|
||||
static void guest_ld_preidx(void)
|
||||
{
|
||||
uint64_t val;
|
||||
uint64_t addr = TEST_GVA - 8;
|
||||
u64 val;
|
||||
u64 addr = TEST_GVA - 8;
|
||||
|
||||
/*
|
||||
* This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
|
||||
@@ -179,8 +179,8 @@ static void guest_ld_preidx(void)
|
||||
|
||||
static void guest_st_preidx(void)
|
||||
{
|
||||
uint64_t val = TEST_DATA;
|
||||
uint64_t addr = TEST_GVA - 8;
|
||||
u64 val = TEST_DATA;
|
||||
u64 addr = TEST_GVA - 8;
|
||||
|
||||
asm volatile("str %0, [%1, #8]!"
|
||||
: "+r" (val), "+r" (addr));
|
||||
@@ -191,8 +191,8 @@ static void guest_st_preidx(void)
|
||||
|
||||
static bool guest_set_ha(void)
|
||||
{
|
||||
uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
|
||||
uint64_t hadbs, tcr;
|
||||
u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1);
|
||||
u64 hadbs, tcr;
|
||||
|
||||
/* Skip if HA is not supported. */
|
||||
hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
|
||||
@@ -208,7 +208,7 @@ static bool guest_set_ha(void)
|
||||
|
||||
static bool guest_clear_pte_af(void)
|
||||
{
|
||||
*((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
|
||||
*((u64 *)TEST_PTE_GVA) &= ~PTE_AF;
|
||||
flush_tlb_page(TEST_GVA);
|
||||
|
||||
return true;
|
||||
@@ -217,7 +217,7 @@ static bool guest_clear_pte_af(void)
|
||||
static void guest_check_pte_af(void)
|
||||
{
|
||||
dsb(ish);
|
||||
GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
|
||||
GUEST_ASSERT_EQ(*((u64 *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
|
||||
}
|
||||
|
||||
static void guest_check_write_in_dirty_log(void)
|
||||
@@ -302,26 +302,26 @@ static void no_iabt_handler(struct ex_regs *regs)
|
||||
static struct uffd_args {
|
||||
char *copy;
|
||||
void *hva;
|
||||
uint64_t paging_size;
|
||||
u64 paging_size;
|
||||
} pt_args, data_args;
|
||||
|
||||
/* Returns true to continue the test, and false if it should be skipped. */
|
||||
static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
|
||||
struct uffd_args *args)
|
||||
{
|
||||
uint64_t addr = msg->arg.pagefault.address;
|
||||
uint64_t flags = msg->arg.pagefault.flags;
|
||||
u64 addr = msg->arg.pagefault.address;
|
||||
u64 flags = msg->arg.pagefault.flags;
|
||||
struct uffdio_copy copy;
|
||||
int ret;
|
||||
|
||||
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
|
||||
"The only expected UFFD mode is MISSING");
|
||||
TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
|
||||
TEST_ASSERT_EQ(addr, (u64)args->hva);
|
||||
|
||||
pr_debug("uffd fault: addr=%p write=%d\n",
|
||||
(void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
|
||||
|
||||
copy.src = (uint64_t)args->copy;
|
||||
copy.src = (u64)args->copy;
|
||||
copy.dst = addr;
|
||||
copy.len = args->paging_size;
|
||||
copy.mode = 0;
|
||||
@@ -407,7 +407,7 @@ static bool punch_hole_in_backing_store(struct kvm_vm *vm,
|
||||
struct userspace_mem_region *region)
|
||||
{
|
||||
void *hva = (void *)region->region.userspace_addr;
|
||||
uint64_t paging_size = region->region.memory_size;
|
||||
u64 paging_size = region->region.memory_size;
|
||||
int ret, fd = region->fd;
|
||||
|
||||
if (fd != -1) {
|
||||
@@ -438,7 +438,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
|
||||
|
||||
static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
|
||||
{
|
||||
uint64_t data;
|
||||
u64 data;
|
||||
|
||||
memcpy(&data, run->mmio.data, sizeof(data));
|
||||
pr_debug("addr=%lld len=%d w=%d data=%lx\n",
|
||||
@@ -449,11 +449,11 @@ static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
|
||||
|
||||
static bool check_write_in_dirty_log(struct kvm_vm *vm,
|
||||
struct userspace_mem_region *region,
|
||||
uint64_t host_pg_nr)
|
||||
u64 host_pg_nr)
|
||||
{
|
||||
unsigned long *bmap;
|
||||
bool first_page_dirty;
|
||||
uint64_t size = region->region.memory_size;
|
||||
u64 size = region->region.memory_size;
|
||||
|
||||
/* getpage_size() is not always equal to vm->page_size */
|
||||
bmap = bitmap_zalloc(size / getpagesize());
|
||||
@@ -468,7 +468,7 @@ static bool handle_cmd(struct kvm_vm *vm, int cmd)
|
||||
{
|
||||
struct userspace_mem_region *data_region, *pt_region;
|
||||
bool continue_test = true;
|
||||
uint64_t pte_gpa, pte_pg;
|
||||
u64 pte_gpa, pte_pg;
|
||||
|
||||
data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
|
||||
pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
|
||||
@@ -510,7 +510,7 @@ void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
|
||||
events.fail_vcpu_runs += 1;
|
||||
}
|
||||
|
||||
typedef uint32_t aarch64_insn_t;
|
||||
typedef u32 aarch64_insn_t;
|
||||
extern aarch64_insn_t __exec_test[2];
|
||||
|
||||
noinline void __return_0x77(void)
|
||||
@@ -525,7 +525,7 @@ noinline void __return_0x77(void)
|
||||
*/
|
||||
static void load_exec_code_for_test(struct kvm_vm *vm)
|
||||
{
|
||||
uint64_t *code;
|
||||
u64 *code;
|
||||
struct userspace_mem_region *region;
|
||||
void *hva;
|
||||
|
||||
@@ -552,7 +552,7 @@ static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
|
||||
static void setup_gva_maps(struct kvm_vm *vm)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
uint64_t pte_gpa;
|
||||
u64 pte_gpa;
|
||||
|
||||
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
|
||||
/* Map TEST_GVA first. This will install a new PTE. */
|
||||
@@ -574,12 +574,12 @@ enum pf_test_memslots {
|
||||
*/
|
||||
static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
|
||||
{
|
||||
uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
|
||||
uint64_t guest_page_size = vm->page_size;
|
||||
uint64_t max_gfn = vm_compute_max_gfn(vm);
|
||||
u64 backing_src_pagesz = get_backing_src_pagesz(p->src_type);
|
||||
u64 guest_page_size = vm->page_size;
|
||||
u64 max_gfn = vm_compute_max_gfn(vm);
|
||||
/* Enough for 2M of code when using 4K guest pages. */
|
||||
uint64_t code_npages = 512;
|
||||
uint64_t pt_size, data_size, data_gpa;
|
||||
u64 code_npages = 512;
|
||||
u64 pt_size, data_size, data_gpa;
|
||||
|
||||
/*
|
||||
* This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
|
||||
|
||||
@@ -22,8 +22,7 @@
|
||||
#define CPU_ON_ENTRY_ADDR 0xfeedf00dul
|
||||
#define CPU_ON_CONTEXT_ID 0xdeadc0deul
|
||||
|
||||
static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
|
||||
uint64_t context_id)
|
||||
static u64 psci_cpu_on(u64 target_cpu, u64 entry_addr, u64 context_id)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@@ -33,8 +32,7 @@ static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static uint64_t psci_affinity_info(uint64_t target_affinity,
|
||||
uint64_t lowest_affinity_level)
|
||||
static u64 psci_affinity_info(u64 target_affinity, u64 lowest_affinity_level)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@@ -44,7 +42,7 @@ static uint64_t psci_affinity_info(uint64_t target_affinity,
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
|
||||
static u64 psci_system_suspend(u64 entry_addr, u64 context_id)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@@ -54,7 +52,7 @@ static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static uint64_t psci_system_off2(uint64_t type, uint64_t cookie)
|
||||
static u64 psci_system_off2(u64 type, u64 cookie)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@@ -63,7 +61,7 @@ static uint64_t psci_system_off2(uint64_t type, uint64_t cookie)
|
||||
return res.a0;
|
||||
}
|
||||
|
||||
static uint64_t psci_features(uint32_t func_id)
|
||||
static u64 psci_features(u32 func_id)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@@ -110,7 +108,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t obs_pc, obs_x0;
|
||||
u64 obs_pc, obs_x0;
|
||||
|
||||
obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
|
||||
obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]));
|
||||
@@ -123,9 +121,9 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
obs_x0, CPU_ON_CONTEXT_ID);
|
||||
}
|
||||
|
||||
static void guest_test_cpu_on(uint64_t target_cpu)
|
||||
static void guest_test_cpu_on(u64 target_cpu)
|
||||
{
|
||||
uint64_t target_state;
|
||||
u64 target_state;
|
||||
|
||||
GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
|
||||
|
||||
@@ -142,7 +140,7 @@ static void guest_test_cpu_on(uint64_t target_cpu)
|
||||
static void host_test_cpu_on(void)
|
||||
{
|
||||
struct kvm_vcpu *source, *target;
|
||||
uint64_t target_mpidr;
|
||||
u64 target_mpidr;
|
||||
struct kvm_vm *vm;
|
||||
struct ucall uc;
|
||||
|
||||
@@ -166,7 +164,7 @@ static void host_test_cpu_on(void)
|
||||
|
||||
static void guest_test_system_suspend(void)
|
||||
{
|
||||
uint64_t ret;
|
||||
u64 ret;
|
||||
|
||||
/* assert that SYSTEM_SUSPEND is discoverable */
|
||||
GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
|
||||
@@ -200,7 +198,7 @@ static void host_test_system_suspend(void)
|
||||
|
||||
static void guest_test_system_off2(void)
|
||||
{
|
||||
uint64_t ret;
|
||||
u64 ret;
|
||||
|
||||
/* assert that SYSTEM_OFF2 is discoverable */
|
||||
GUEST_ASSERT(psci_features(PSCI_1_3_FN_SYSTEM_OFF2) &
|
||||
@@ -238,7 +236,7 @@ static void host_test_system_off2(void)
|
||||
{
|
||||
struct kvm_vcpu *source, *target;
|
||||
struct kvm_mp_state mps;
|
||||
uint64_t psci_version = 0;
|
||||
u64 psci_version = 0;
|
||||
int nr_shutdowns = 0;
|
||||
struct kvm_run *run;
|
||||
struct ucall uc;
|
||||
|
||||
@@ -51,18 +51,16 @@
|
||||
#define EINJ_OFFSET 0x01234badUL
|
||||
#define EINJ_GVA ((START_GVA) + (EINJ_OFFSET))
|
||||
|
||||
static vm_paddr_t einj_gpa;
|
||||
static gpa_t einj_gpa;
|
||||
static void *einj_hva;
|
||||
static uint64_t einj_hpa;
|
||||
static u64 einj_hpa;
|
||||
static bool far_invalid;
|
||||
|
||||
static uint64_t translate_to_host_paddr(unsigned long vaddr)
|
||||
static u64 translate_hva_to_hpa(unsigned long hva)
|
||||
{
|
||||
uint64_t pinfo;
|
||||
int64_t offset = vaddr / getpagesize() * sizeof(pinfo);
|
||||
u64 pinfo;
|
||||
s64 offset = hva / getpagesize() * sizeof(pinfo);
|
||||
int fd;
|
||||
uint64_t page_addr;
|
||||
uint64_t paddr;
|
||||
|
||||
fd = open("/proc/self/pagemap", O_RDONLY);
|
||||
if (fd < 0)
|
||||
@@ -77,12 +75,11 @@ static uint64_t translate_to_host_paddr(unsigned long vaddr)
|
||||
if ((pinfo & PAGE_PRESENT) == 0)
|
||||
ksft_exit_fail_perror("Page not present");
|
||||
|
||||
page_addr = (pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT;
|
||||
paddr = page_addr + (vaddr & (getpagesize() - 1));
|
||||
return paddr;
|
||||
return ((pinfo & PAGE_PHYSICAL) << MIN_PAGE_SHIFT) +
|
||||
(hva & (getpagesize() - 1));
|
||||
}
|
||||
|
||||
static void write_einj_entry(const char *einj_path, uint64_t val)
|
||||
static void write_einj_entry(const char *einj_path, u64 val)
|
||||
{
|
||||
char cmd[256] = {0};
|
||||
FILE *cmdfile = NULL;
|
||||
@@ -96,7 +93,7 @@ static void write_einj_entry(const char *einj_path, uint64_t val)
|
||||
ksft_exit_fail_perror("Failed to write EINJ entry");
|
||||
}
|
||||
|
||||
static void inject_uer(uint64_t paddr)
|
||||
static void inject_uer(u64 hpa)
|
||||
{
|
||||
if (access("/sys/firmware/acpi/tables/EINJ", R_OK) == -1)
|
||||
ksft_test_result_skip("EINJ table no available in firmware");
|
||||
@@ -106,7 +103,7 @@ static void inject_uer(uint64_t paddr)
|
||||
|
||||
write_einj_entry(EINJ_ETYPE, ERROR_TYPE_MEMORY_UER);
|
||||
write_einj_entry(EINJ_FLAGS, MASK_MEMORY_UER);
|
||||
write_einj_entry(EINJ_ADDR, paddr);
|
||||
write_einj_entry(EINJ_ADDR, hpa);
|
||||
write_einj_entry(EINJ_MASK, ~0x0UL);
|
||||
write_einj_entry(EINJ_NOTRIGGER, 1);
|
||||
write_einj_entry(EINJ_DOIT, 1);
|
||||
@@ -145,10 +142,10 @@ static void setup_sigbus_handler(void)
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
uint64_t guest_data;
|
||||
u64 guest_data;
|
||||
|
||||
/* Consumes error will cause a SEA. */
|
||||
guest_data = *(uint64_t *)EINJ_GVA;
|
||||
guest_data = *(u64 *)EINJ_GVA;
|
||||
|
||||
GUEST_FAIL("Poison not protected by SEA: gva=%#lx, guest_data=%#lx\n",
|
||||
EINJ_GVA, guest_data);
|
||||
@@ -253,8 +250,8 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
|
||||
size_t backing_page_size;
|
||||
size_t guest_page_size;
|
||||
size_t alignment;
|
||||
uint64_t num_guest_pages;
|
||||
vm_paddr_t start_gpa;
|
||||
u64 num_guest_pages;
|
||||
gpa_t start_gpa;
|
||||
enum vm_mem_backing_src_type src_type = VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
@@ -278,7 +275,7 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
|
||||
vm_userspace_mem_region_add(
|
||||
/*vm=*/vm,
|
||||
/*src_type=*/src_type,
|
||||
/*guest_paddr=*/start_gpa,
|
||||
/*gpa=*/start_gpa,
|
||||
/*slot=*/1,
|
||||
/*npages=*/num_guest_pages,
|
||||
/*flags=*/0);
|
||||
@@ -292,18 +289,18 @@ static struct kvm_vm *vm_create_with_sea_handler(struct kvm_vcpu **vcpu)
|
||||
|
||||
static void vm_inject_memory_uer(struct kvm_vm *vm)
|
||||
{
|
||||
uint64_t guest_data;
|
||||
u64 guest_data;
|
||||
|
||||
einj_gpa = addr_gva2gpa(vm, EINJ_GVA);
|
||||
einj_hva = addr_gva2hva(vm, EINJ_GVA);
|
||||
|
||||
/* Populate certain data before injecting UER. */
|
||||
*(uint64_t *)einj_hva = 0xBAADCAFE;
|
||||
guest_data = *(uint64_t *)einj_hva;
|
||||
*(u64 *)einj_hva = 0xBAADCAFE;
|
||||
guest_data = *(u64 *)einj_hva;
|
||||
ksft_print_msg("Before EINJect: data=%#lx\n",
|
||||
guest_data);
|
||||
|
||||
einj_hpa = translate_to_host_paddr((unsigned long)einj_hva);
|
||||
einj_hpa = translate_hva_to_hpa((unsigned long)einj_hva);
|
||||
|
||||
ksft_print_msg("EINJ_GVA=%#lx, einj_gpa=%#lx, einj_hva=%p, einj_hpa=%#lx\n",
|
||||
EINJ_GVA, einj_gpa, einj_hva, einj_hpa);
|
||||
|
||||
@@ -30,20 +30,20 @@ struct reg_ftr_bits {
|
||||
char *name;
|
||||
bool sign;
|
||||
enum ftr_type type;
|
||||
uint8_t shift;
|
||||
uint64_t mask;
|
||||
u8 shift;
|
||||
u64 mask;
|
||||
/*
|
||||
* For FTR_EXACT, safe_val is used as the exact safe value.
|
||||
* For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
|
||||
*/
|
||||
int64_t safe_val;
|
||||
s64 safe_val;
|
||||
|
||||
/* Allowed to be changed by the host after run */
|
||||
bool mutable;
|
||||
};
|
||||
|
||||
struct test_feature_reg {
|
||||
uint32_t reg;
|
||||
u32 reg;
|
||||
const struct reg_ftr_bits *ftr_bits;
|
||||
};
|
||||
|
||||
@@ -275,9 +275,9 @@ static void guest_code(void)
|
||||
}
|
||||
|
||||
/* Return a safe value to a given ftr_bits an ftr value */
|
||||
uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
|
||||
u64 get_safe_value(const struct reg_ftr_bits *ftr_bits, u64 ftr)
|
||||
{
|
||||
uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;
|
||||
u64 ftr_max = ftr_bits->mask >> ftr_bits->shift;
|
||||
|
||||
TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");
|
||||
|
||||
@@ -329,16 +329,16 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
|
||||
}
|
||||
|
||||
/* Return an invalid value to a given ftr_bits an ftr value */
|
||||
uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
|
||||
u64 get_invalid_value(const struct reg_ftr_bits *ftr_bits, u64 ftr)
|
||||
{
|
||||
uint64_t ftr_max = ftr_bits->mask >> ftr_bits->shift;
|
||||
u64 ftr_max = ftr_bits->mask >> ftr_bits->shift;
|
||||
|
||||
TEST_ASSERT(ftr_max > 1, "This test doesn't support single bit features");
|
||||
|
||||
if (ftr_bits->sign == FTR_UNSIGNED) {
|
||||
switch (ftr_bits->type) {
|
||||
case FTR_EXACT:
|
||||
ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
|
||||
ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1);
|
||||
break;
|
||||
case FTR_LOWER_SAFE:
|
||||
ftr++;
|
||||
@@ -358,7 +358,7 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
|
||||
} else if (ftr != ftr_max) {
|
||||
switch (ftr_bits->type) {
|
||||
case FTR_EXACT:
|
||||
ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
|
||||
ftr = max((u64)ftr_bits->safe_val + 1, ftr + 1);
|
||||
break;
|
||||
case FTR_LOWER_SAFE:
|
||||
ftr++;
|
||||
@@ -382,12 +382,12 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
|
||||
return ftr;
|
||||
}
|
||||
|
||||
static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
const struct reg_ftr_bits *ftr_bits)
|
||||
static u64 test_reg_set_success(struct kvm_vcpu *vcpu, u64 reg,
|
||||
const struct reg_ftr_bits *ftr_bits)
|
||||
{
|
||||
uint8_t shift = ftr_bits->shift;
|
||||
uint64_t mask = ftr_bits->mask;
|
||||
uint64_t val, new_val, ftr;
|
||||
u8 shift = ftr_bits->shift;
|
||||
u64 mask = ftr_bits->mask;
|
||||
u64 val, new_val, ftr;
|
||||
|
||||
val = vcpu_get_reg(vcpu, reg);
|
||||
ftr = (val & mask) >> shift;
|
||||
@@ -405,12 +405,12 @@ static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
static void test_reg_set_fail(struct kvm_vcpu *vcpu, u64 reg,
|
||||
const struct reg_ftr_bits *ftr_bits)
|
||||
{
|
||||
uint8_t shift = ftr_bits->shift;
|
||||
uint64_t mask = ftr_bits->mask;
|
||||
uint64_t val, old_val, ftr;
|
||||
u8 shift = ftr_bits->shift;
|
||||
u64 mask = ftr_bits->mask;
|
||||
u64 val, old_val, ftr;
|
||||
int r;
|
||||
|
||||
val = vcpu_get_reg(vcpu, reg);
|
||||
@@ -431,7 +431,7 @@ static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
|
||||
TEST_ASSERT_EQ(val, old_val);
|
||||
}
|
||||
|
||||
static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
static u64 test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
|
||||
#define encoding_to_range_idx(encoding) \
|
||||
KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding), \
|
||||
@@ -441,7 +441,7 @@ static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
|
||||
static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
{
|
||||
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
struct reg_mask_range range = {
|
||||
.addr = (__u64)masks,
|
||||
};
|
||||
@@ -458,8 +458,8 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
|
||||
const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
|
||||
uint32_t reg_id = test_regs[i].reg;
|
||||
uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
|
||||
u32 reg_id = test_regs[i].reg;
|
||||
u64 reg = KVM_ARM64_SYS_REG(reg_id);
|
||||
int idx;
|
||||
|
||||
/* Get the index to masks array for the idreg */
|
||||
@@ -489,11 +489,11 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
|
||||
#define MPAM_IDREG_TEST 6
|
||||
static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
struct reg_mask_range range = {
|
||||
.addr = (__u64)masks,
|
||||
};
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
int idx, err;
|
||||
|
||||
/*
|
||||
@@ -584,13 +584,13 @@ static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
|
||||
#define MTE_IDREG_TEST 1
|
||||
static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
u64 masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
|
||||
struct reg_mask_range range = {
|
||||
.addr = (__u64)masks,
|
||||
};
|
||||
uint64_t val;
|
||||
uint64_t mte;
|
||||
uint64_t mte_frac;
|
||||
u64 val;
|
||||
u64 mte;
|
||||
u64 mte_frac;
|
||||
int idx, err;
|
||||
|
||||
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
|
||||
@@ -644,7 +644,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
|
||||
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac no longer 0xF\n");
|
||||
}
|
||||
|
||||
static uint64_t reset_mutable_bits(uint32_t id, uint64_t val)
|
||||
static u64 reset_mutable_bits(u32 id, u64 val)
|
||||
{
|
||||
struct test_feature_reg *reg = NULL;
|
||||
|
||||
@@ -674,7 +674,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
|
||||
struct ucall uc;
|
||||
|
||||
while (!done) {
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
|
||||
vcpu_run(vcpu);
|
||||
|
||||
@@ -707,7 +707,7 @@ static void test_guest_reg_read(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void test_clidr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint64_t clidr;
|
||||
u64 clidr;
|
||||
int level;
|
||||
|
||||
clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));
|
||||
@@ -772,10 +772,10 @@ static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu)
|
||||
ksft_test_result_pass("%s\n", __func__);
|
||||
}
|
||||
|
||||
static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)
|
||||
static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, u32 encoding)
|
||||
{
|
||||
size_t idx = encoding_to_range_idx(encoding);
|
||||
uint64_t observed;
|
||||
u64 observed;
|
||||
|
||||
observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
|
||||
TEST_ASSERT_EQ(reset_mutable_bits(encoding, test_reg_vals[idx]),
|
||||
@@ -808,7 +808,7 @@ int main(void)
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
bool aarch64_only;
|
||||
uint64_t val, el0;
|
||||
u64 val, el0;
|
||||
int test_cnt, i, j;
|
||||
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
|
||||
|
||||
@@ -37,7 +37,7 @@ static bool test_runs_at_el2(void)
|
||||
for (conduit = test_runs_at_el2() ? SMC_INSN : HVC_INSN; \
|
||||
conduit <= SMC_INSN; conduit++)
|
||||
|
||||
static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
|
||||
static void guest_main(u32 func_id, enum smccc_conduit conduit)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@@ -49,7 +49,7 @@ static void guest_main(uint32_t func_id, enum smccc_conduit conduit)
|
||||
GUEST_SYNC(res.a0);
|
||||
}
|
||||
|
||||
static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
|
||||
static int __set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions,
|
||||
enum kvm_smccc_filter_action action)
|
||||
{
|
||||
struct kvm_smccc_filter filter = {
|
||||
@@ -62,7 +62,7 @@ static int __set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_fun
|
||||
KVM_ARM_VM_SMCCC_FILTER, &filter);
|
||||
}
|
||||
|
||||
static void set_smccc_filter(struct kvm_vm *vm, uint32_t start, uint32_t nr_functions,
|
||||
static void set_smccc_filter(struct kvm_vm *vm, u32 start, u32 nr_functions,
|
||||
enum kvm_smccc_filter_action action)
|
||||
{
|
||||
int ret = __set_smccc_filter(vm, start, nr_functions, action);
|
||||
@@ -112,7 +112,7 @@ static void test_filter_reserved_range(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = setup_vm(&vcpu);
|
||||
uint32_t smc64_fn;
|
||||
u32 smc64_fn;
|
||||
int r;
|
||||
|
||||
r = __set_smccc_filter(vm, ARM_SMCCC_ARCH_WORKAROUND_1,
|
||||
@@ -217,7 +217,7 @@ static void test_filter_denied(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, uint32_t func_id,
|
||||
static void expect_call_fwd_to_user(struct kvm_vcpu *vcpu, u32 func_id,
|
||||
enum smccc_conduit conduit)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
|
||||
#define NR_VCPUS 4
|
||||
|
||||
#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset)
|
||||
#define REG_OFFSET(vcpu, offset) (((u64)vcpu << 32) | offset)
|
||||
|
||||
#define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2)
|
||||
#define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
@@ -27,10 +27,10 @@
|
||||
struct vm_gic {
|
||||
struct kvm_vm *vm;
|
||||
int gic_fd;
|
||||
uint32_t gic_dev_type;
|
||||
u32 gic_dev_type;
|
||||
};
|
||||
|
||||
static uint64_t max_phys_size;
|
||||
static u64 max_phys_size;
|
||||
|
||||
/*
|
||||
* Helpers to access a redistributor register and verify the ioctl() failed or
|
||||
@@ -39,17 +39,17 @@ static uint64_t max_phys_size;
|
||||
static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset,
|
||||
int want, const char *msg)
|
||||
{
|
||||
uint32_t ignored_val;
|
||||
u32 ignored_val;
|
||||
int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
|
||||
REG_OFFSET(vcpu, offset), &ignored_val);
|
||||
|
||||
TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want);
|
||||
}
|
||||
|
||||
static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want,
|
||||
static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, u32 want,
|
||||
const char *msg)
|
||||
{
|
||||
uint32_t val;
|
||||
u32 val;
|
||||
|
||||
kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
|
||||
REG_OFFSET(vcpu, offset), &val);
|
||||
@@ -71,8 +71,8 @@ static int run_vcpu(struct kvm_vcpu *vcpu)
|
||||
return __vcpu_run(vcpu) ? -errno : 0;
|
||||
}
|
||||
|
||||
static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
|
||||
uint32_t nr_vcpus,
|
||||
static struct vm_gic vm_gic_create_with_vcpus(u32 gic_dev_type,
|
||||
u32 nr_vcpus,
|
||||
struct kvm_vcpu *vcpus[])
|
||||
{
|
||||
struct vm_gic v;
|
||||
@@ -84,7 +84,7 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
|
||||
return v;
|
||||
}
|
||||
|
||||
static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type)
|
||||
static struct vm_gic vm_gic_create_barebones(u32 gic_dev_type)
|
||||
{
|
||||
struct vm_gic v;
|
||||
|
||||
@@ -103,9 +103,9 @@ static void vm_gic_destroy(struct vm_gic *v)
|
||||
}
|
||||
|
||||
struct vgic_region_attr {
|
||||
uint64_t attr;
|
||||
uint64_t size;
|
||||
uint64_t alignment;
|
||||
u64 attr;
|
||||
u64 size;
|
||||
u64 alignment;
|
||||
};
|
||||
|
||||
struct vgic_region_attr gic_v3_dist_region = {
|
||||
@@ -143,7 +143,7 @@ struct vgic_region_attr gic_v2_cpu_region = {
|
||||
static void subtest_dist_rdist(struct vm_gic *v)
|
||||
{
|
||||
int ret;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
struct vgic_region_attr rdist; /* CPU interface in GICv2*/
|
||||
struct vgic_region_attr dist;
|
||||
|
||||
@@ -223,7 +223,7 @@ static void subtest_dist_rdist(struct vm_gic *v)
|
||||
/* Test the new REDIST region API */
|
||||
static void subtest_v3_redist_regions(struct vm_gic *v)
|
||||
{
|
||||
uint64_t addr, expected_addr;
|
||||
u64 addr, expected_addr;
|
||||
int ret;
|
||||
|
||||
ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
|
||||
@@ -332,7 +332,7 @@ static void subtest_v3_redist_regions(struct vm_gic *v)
|
||||
* VGIC KVM device is created and initialized before the secondary CPUs
|
||||
* get created
|
||||
*/
|
||||
static void test_vgic_then_vcpus(uint32_t gic_dev_type)
|
||||
static void test_vgic_then_vcpus(u32 gic_dev_type)
|
||||
{
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
struct vm_gic v;
|
||||
@@ -353,7 +353,7 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type)
|
||||
}
|
||||
|
||||
/* All the VCPUs are created before the VGIC KVM device gets initialized */
|
||||
static void test_vcpus_then_vgic(uint32_t gic_dev_type)
|
||||
static void test_vcpus_then_vgic(u32 gic_dev_type)
|
||||
{
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
struct vm_gic v;
|
||||
@@ -408,7 +408,7 @@ static void test_v3_new_redist_regions(void)
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
void *dummy = NULL;
|
||||
struct vm_gic v;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
int ret;
|
||||
|
||||
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
|
||||
@@ -460,7 +460,7 @@ static void test_v3_new_redist_regions(void)
|
||||
static void test_v3_typer_accesses(void)
|
||||
{
|
||||
struct vm_gic v;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
int ret, i;
|
||||
|
||||
v.vm = vm_create(NR_VCPUS);
|
||||
@@ -518,7 +518,7 @@ static void test_v3_typer_accesses(void)
|
||||
}
|
||||
|
||||
static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
|
||||
uint32_t vcpuids[])
|
||||
u32 vcpuids[])
|
||||
{
|
||||
struct vm_gic v;
|
||||
int i;
|
||||
@@ -544,9 +544,9 @@ static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
|
||||
*/
|
||||
static void test_v3_last_bit_redist_regions(void)
|
||||
{
|
||||
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
|
||||
u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 };
|
||||
struct vm_gic v;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
|
||||
v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
|
||||
|
||||
@@ -578,9 +578,9 @@ static void test_v3_last_bit_redist_regions(void)
|
||||
/* Test last bit with legacy region */
|
||||
static void test_v3_last_bit_single_rdist(void)
|
||||
{
|
||||
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
|
||||
u32 vcpuids[] = { 0, 3, 5, 4, 1, 2 };
|
||||
struct vm_gic v;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
|
||||
v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
|
||||
|
||||
@@ -606,7 +606,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
struct vm_gic v;
|
||||
int ret, i;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
|
||||
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus);
|
||||
|
||||
@@ -638,7 +638,7 @@ static void test_v3_its_region(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
struct vm_gic v;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
int its_fd, ret;
|
||||
|
||||
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
|
||||
@@ -717,11 +717,11 @@ static void test_v3_nassgicap(void)
|
||||
/*
|
||||
* Returns 0 if it's possible to create GIC device of a given type (V2 or V3).
|
||||
*/
|
||||
int test_kvm_device(uint32_t gic_dev_type)
|
||||
int test_kvm_device(u32 gic_dev_type)
|
||||
{
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
struct vm_gic v;
|
||||
uint32_t other;
|
||||
u32 other;
|
||||
int ret;
|
||||
|
||||
v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
|
||||
@@ -968,7 +968,7 @@ static void test_v3_sysregs(void)
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
void run_tests(uint32_t gic_dev_type)
|
||||
void run_tests(u32 gic_dev_type)
|
||||
{
|
||||
test_vcpus_then_vgic(gic_dev_type);
|
||||
test_vgic_then_vcpus(gic_dev_type);
|
||||
|
||||
@@ -24,12 +24,12 @@
|
||||
* function.
|
||||
*/
|
||||
struct test_args {
|
||||
uint32_t nr_irqs; /* number of KVM supported IRQs. */
|
||||
u32 nr_irqs; /* number of KVM supported IRQs. */
|
||||
bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
|
||||
bool level_sensitive; /* 1 is level, 0 is edge */
|
||||
int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
|
||||
bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
|
||||
uint32_t shared_data;
|
||||
u32 shared_data;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -64,15 +64,15 @@ typedef enum {
|
||||
|
||||
struct kvm_inject_args {
|
||||
kvm_inject_cmd cmd;
|
||||
uint32_t first_intid;
|
||||
uint32_t num;
|
||||
u32 first_intid;
|
||||
u32 num;
|
||||
int level;
|
||||
bool expect_failure;
|
||||
};
|
||||
|
||||
/* Used on the guest side to perform the hypercall. */
|
||||
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
|
||||
uint32_t num, int level, bool expect_failure);
|
||||
static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid,
|
||||
u32 num, int level, bool expect_failure);
|
||||
|
||||
/* Used on the host side to get the hypercall info. */
|
||||
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
|
||||
@@ -133,8 +133,8 @@ static struct kvm_inject_desc set_active_fns[] = {
|
||||
for_each_supported_inject_fn((args), (t), (f))
|
||||
|
||||
/* Shared between the guest main thread and the IRQ handlers. */
|
||||
volatile uint64_t irq_handled;
|
||||
volatile uint32_t irqnr_received[MAX_SPI + 1];
|
||||
volatile u64 irq_handled;
|
||||
volatile u32 irqnr_received[MAX_SPI + 1];
|
||||
|
||||
static void reset_stats(void)
|
||||
{
|
||||
@@ -145,25 +145,25 @@ static void reset_stats(void)
|
||||
irqnr_received[i] = 0;
|
||||
}
|
||||
|
||||
static uint64_t gic_read_ap1r0(void)
|
||||
static u64 gic_read_ap1r0(void)
|
||||
{
|
||||
uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1);
|
||||
u64 reg = read_sysreg_s(SYS_ICC_AP1R0_EL1);
|
||||
|
||||
dsb(sy);
|
||||
return reg;
|
||||
}
|
||||
|
||||
static void gic_write_ap1r0(uint64_t val)
|
||||
static void gic_write_ap1r0(u64 val)
|
||||
{
|
||||
write_sysreg_s(val, SYS_ICC_AP1R0_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void guest_set_irq_line(uint32_t intid, uint32_t level);
|
||||
static void guest_set_irq_line(u32 intid, u32 level);
|
||||
|
||||
static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
|
||||
{
|
||||
uint32_t intid = gic_get_and_ack_irq();
|
||||
u32 intid = gic_get_and_ack_irq();
|
||||
|
||||
if (intid == IAR_SPURIOUS)
|
||||
return;
|
||||
@@ -189,8 +189,8 @@ static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
|
||||
GUEST_ASSERT(!gic_irq_get_pending(intid));
|
||||
}
|
||||
|
||||
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
|
||||
uint32_t num, int level, bool expect_failure)
|
||||
static void kvm_inject_call(kvm_inject_cmd cmd, u32 first_intid,
|
||||
u32 num, int level, bool expect_failure)
|
||||
{
|
||||
struct kvm_inject_args args = {
|
||||
.cmd = cmd,
|
||||
@@ -204,7 +204,7 @@ static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
|
||||
|
||||
#define GUEST_ASSERT_IAR_EMPTY() \
|
||||
do { \
|
||||
uint32_t _intid; \
|
||||
u32 _intid; \
|
||||
_intid = gic_get_and_ack_irq(); \
|
||||
GUEST_ASSERT(_intid == IAR_SPURIOUS); \
|
||||
} while (0)
|
||||
@@ -237,13 +237,13 @@ static void reset_priorities(struct test_args *args)
|
||||
gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
|
||||
}
|
||||
|
||||
static void guest_set_irq_line(uint32_t intid, uint32_t level)
|
||||
static void guest_set_irq_line(u32 intid, u32 level)
|
||||
{
|
||||
kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
|
||||
}
|
||||
|
||||
static void test_inject_fail(struct test_args *args,
|
||||
uint32_t intid, kvm_inject_cmd cmd)
|
||||
u32 intid, kvm_inject_cmd cmd)
|
||||
{
|
||||
reset_stats();
|
||||
|
||||
@@ -255,10 +255,10 @@ static void test_inject_fail(struct test_args *args,
|
||||
}
|
||||
|
||||
static void guest_inject(struct test_args *args,
|
||||
uint32_t first_intid, uint32_t num,
|
||||
kvm_inject_cmd cmd)
|
||||
u32 first_intid, u32 num,
|
||||
kvm_inject_cmd cmd)
|
||||
{
|
||||
uint32_t i;
|
||||
u32 i;
|
||||
|
||||
reset_stats();
|
||||
|
||||
@@ -292,10 +292,10 @@ static void guest_inject(struct test_args *args,
|
||||
* deactivated yet.
|
||||
*/
|
||||
static void guest_restore_active(struct test_args *args,
|
||||
uint32_t first_intid, uint32_t num,
|
||||
kvm_inject_cmd cmd)
|
||||
u32 first_intid, u32 num,
|
||||
kvm_inject_cmd cmd)
|
||||
{
|
||||
uint32_t prio, intid, ap1r;
|
||||
u32 prio, intid, ap1r;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@@ -342,9 +342,9 @@ static void guest_restore_active(struct test_args *args,
|
||||
* This function should only be used in test_inject_preemption (with IRQs
|
||||
* masked).
|
||||
*/
|
||||
static uint32_t wait_for_and_activate_irq(void)
|
||||
static u32 wait_for_and_activate_irq(void)
|
||||
{
|
||||
uint32_t intid;
|
||||
u32 intid;
|
||||
|
||||
do {
|
||||
asm volatile("wfi" : : : "memory");
|
||||
@@ -360,11 +360,11 @@ static uint32_t wait_for_and_activate_irq(void)
|
||||
* interrupts for the whole test.
|
||||
*/
|
||||
static void test_inject_preemption(struct test_args *args,
|
||||
uint32_t first_intid, int num,
|
||||
u32 first_intid, int num,
|
||||
const unsigned long *exclude,
|
||||
kvm_inject_cmd cmd)
|
||||
{
|
||||
uint32_t intid, prio, step = KVM_PRIO_STEPS;
|
||||
u32 intid, prio, step = KVM_PRIO_STEPS;
|
||||
int i;
|
||||
|
||||
/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
|
||||
@@ -379,7 +379,7 @@ static void test_inject_preemption(struct test_args *args,
|
||||
local_irq_disable();
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
uint32_t tmp;
|
||||
u32 tmp;
|
||||
intid = i + first_intid;
|
||||
|
||||
if (exclude && test_bit(i, exclude))
|
||||
@@ -431,7 +431,7 @@ static void test_inject_preemption(struct test_args *args,
|
||||
|
||||
static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
|
||||
{
|
||||
uint32_t nr_irqs = args->nr_irqs;
|
||||
u32 nr_irqs = args->nr_irqs;
|
||||
|
||||
if (f->sgi) {
|
||||
guest_inject(args, MIN_SGI, 1, f->cmd);
|
||||
@@ -451,7 +451,7 @@ static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
|
||||
static void test_injection_failure(struct test_args *args,
|
||||
struct kvm_inject_desc *f)
|
||||
{
|
||||
uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
|
||||
u32 bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
|
||||
@@ -490,7 +490,7 @@ static void test_restore_active(struct test_args *args, struct kvm_inject_desc *
|
||||
|
||||
static void guest_code(struct test_args *args)
|
||||
{
|
||||
uint32_t i, nr_irqs = args->nr_irqs;
|
||||
u32 i, nr_irqs = args->nr_irqs;
|
||||
bool level_sensitive = args->level_sensitive;
|
||||
struct kvm_inject_desc *f, *inject_fns;
|
||||
|
||||
@@ -529,8 +529,8 @@ static void guest_code(struct test_args *args)
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
|
||||
struct test_args *test_args, bool expect_failure)
|
||||
static void kvm_irq_line_check(struct kvm_vm *vm, u32 intid, int level,
|
||||
struct test_args *test_args, bool expect_failure)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -548,8 +548,8 @@ static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
|
||||
bool expect_failure)
|
||||
void kvm_irq_set_level_info_check(int gic_fd, u32 intid, int level,
|
||||
bool expect_failure)
|
||||
{
|
||||
if (!expect_failure) {
|
||||
kvm_irq_set_level_info(gic_fd, intid, level);
|
||||
@@ -573,17 +573,18 @@ void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
|
||||
}
|
||||
|
||||
static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
|
||||
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
|
||||
bool expect_failure)
|
||||
u32 intid, u32 num,
|
||||
u32 kvm_max_routes,
|
||||
bool expect_failure)
|
||||
{
|
||||
struct kvm_irq_routing *routing;
|
||||
int ret;
|
||||
uint64_t i;
|
||||
u64 i;
|
||||
|
||||
assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
|
||||
|
||||
routing = kvm_gsi_routing_create();
|
||||
for (i = intid; i < (uint64_t)intid + num; i++)
|
||||
for (i = intid; i < (u64)intid + num; i++)
|
||||
kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
|
||||
|
||||
if (!expect_failure) {
|
||||
@@ -591,7 +592,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
|
||||
} else {
|
||||
ret = _kvm_gsi_routing_write(vm, routing);
|
||||
/* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
|
||||
if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
|
||||
if (((u64)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
|
||||
TEST_ASSERT(ret != 0 && errno == EINVAL,
|
||||
"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
|
||||
"error: rc: %i errno: %i", intid, ret, errno);
|
||||
@@ -602,7 +603,7 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
|
||||
static void kvm_irq_write_ispendr_check(int gic_fd, u32 intid,
|
||||
struct kvm_vcpu *vcpu,
|
||||
bool expect_failure)
|
||||
{
|
||||
@@ -618,13 +619,13 @@ static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
|
||||
}
|
||||
|
||||
static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
|
||||
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
|
||||
bool expect_failure)
|
||||
u32 intid, u32 num, u32 kvm_max_routes,
|
||||
bool expect_failure)
|
||||
{
|
||||
int fd[MAX_SPI];
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
int ret, f;
|
||||
uint64_t i;
|
||||
u64 i;
|
||||
|
||||
/*
|
||||
* There is no way to try injecting an SGI or PPI as the interface
|
||||
@@ -643,29 +644,29 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
|
||||
* that no actual interrupt was injected for those cases.
|
||||
*/
|
||||
|
||||
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
|
||||
for (f = 0, i = intid; i < (u64)intid + num; i++, f++)
|
||||
fd[f] = kvm_new_eventfd();
|
||||
|
||||
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
|
||||
assert(i <= (uint64_t)UINT_MAX);
|
||||
for (f = 0, i = intid; i < (u64)intid + num; i++, f++) {
|
||||
assert(i <= (u64)UINT_MAX);
|
||||
kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]);
|
||||
}
|
||||
|
||||
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
|
||||
for (f = 0, i = intid; i < (u64)intid + num; i++, f++) {
|
||||
val = 1;
|
||||
ret = write(fd[f], &val, sizeof(uint64_t));
|
||||
TEST_ASSERT(ret == sizeof(uint64_t),
|
||||
ret = write(fd[f], &val, sizeof(u64));
|
||||
TEST_ASSERT(ret == sizeof(u64),
|
||||
__KVM_SYSCALL_ERROR("write()", ret));
|
||||
}
|
||||
|
||||
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
|
||||
for (f = 0, i = intid; i < (u64)intid + num; i++, f++)
|
||||
kvm_close(fd[f]);
|
||||
}
|
||||
|
||||
/* handles the valid case: intid=0xffffffff num=1 */
|
||||
#define for_each_intid(first, num, tmp, i) \
|
||||
for ((tmp) = (i) = (first); \
|
||||
(tmp) < (uint64_t)(first) + (uint64_t)(num); \
|
||||
(tmp) < (u64)(first) + (u64)(num); \
|
||||
(tmp)++, (i)++)
|
||||
|
||||
static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
|
||||
@@ -673,13 +674,13 @@ static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
|
||||
struct test_args *test_args)
|
||||
{
|
||||
kvm_inject_cmd cmd = inject_args->cmd;
|
||||
uint32_t intid = inject_args->first_intid;
|
||||
uint32_t num = inject_args->num;
|
||||
u32 intid = inject_args->first_intid;
|
||||
u32 num = inject_args->num;
|
||||
int level = inject_args->level;
|
||||
bool expect_failure = inject_args->expect_failure;
|
||||
struct kvm_vm *vm = vcpu->vm;
|
||||
uint64_t tmp;
|
||||
uint32_t i;
|
||||
u64 tmp;
|
||||
u32 i;
|
||||
|
||||
/* handles the valid case: intid=0xffffffff num=1 */
|
||||
assert(intid < UINT_MAX - num || num == 1);
|
||||
@@ -731,7 +732,7 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
|
||||
struct kvm_inject_args *args)
|
||||
{
|
||||
struct kvm_inject_args *kvm_args_hva;
|
||||
vm_vaddr_t kvm_args_gva;
|
||||
gva_t kvm_args_gva;
|
||||
|
||||
kvm_args_gva = uc->args[1];
|
||||
kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
|
||||
@@ -745,14 +746,14 @@ static void print_args(struct test_args *args)
|
||||
args->eoi_split);
|
||||
}
|
||||
|
||||
static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
|
||||
static void test_vgic(u32 nr_irqs, bool level_sensitive, bool eoi_split)
|
||||
{
|
||||
struct ucall uc;
|
||||
int gic_fd;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_inject_args inject_args;
|
||||
vm_vaddr_t args_gva;
|
||||
gva_t args_gva;
|
||||
|
||||
struct test_args args = {
|
||||
.nr_irqs = nr_irqs,
|
||||
@@ -770,7 +771,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
||||
/* Setup the guest args page (so it gets the args). */
|
||||
args_gva = vm_vaddr_alloc_page(vm);
|
||||
args_gva = vm_alloc_page(vm);
|
||||
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
|
||||
vcpu_args_set(vcpu, 1, args_gva);
|
||||
|
||||
@@ -810,7 +811,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid)
|
||||
gic_set_priority_mask(CPU_PRIO_MASK);
|
||||
|
||||
if (cpuid == 0) {
|
||||
uint32_t intid;
|
||||
u32 intid;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
@@ -848,7 +849,7 @@ static void guest_code_asym_dir(struct test_args *args, int cpuid)
|
||||
|
||||
static void guest_code_group_en(struct test_args *args, int cpuid)
|
||||
{
|
||||
uint32_t intid;
|
||||
u32 intid;
|
||||
|
||||
gic_init(GIC_V3, 2);
|
||||
|
||||
@@ -896,7 +897,7 @@ static void guest_code_group_en(struct test_args *args, int cpuid)
|
||||
|
||||
static void guest_code_timer_spi(struct test_args *args, int cpuid)
|
||||
{
|
||||
uint32_t intid;
|
||||
u32 intid;
|
||||
u64 val;
|
||||
|
||||
gic_init(GIC_V3, 2);
|
||||
@@ -986,7 +987,7 @@ static void test_vgic_two_cpus(void *gcode)
|
||||
struct kvm_vcpu *vcpus[2];
|
||||
struct test_args args = {};
|
||||
struct kvm_vm *vm;
|
||||
vm_vaddr_t args_gva;
|
||||
gva_t args_gva;
|
||||
int gic_fd, ret;
|
||||
|
||||
vm = vm_create_with_vcpus(2, gcode, vcpus);
|
||||
@@ -996,7 +997,7 @@ static void test_vgic_two_cpus(void *gcode)
|
||||
vcpu_init_descriptor_tables(vcpus[1]);
|
||||
|
||||
/* Setup the guest args page (so it gets the args). */
|
||||
args_gva = vm_vaddr_alloc_page(vm);
|
||||
args_gva = vm_alloc_page(vm);
|
||||
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
|
||||
vcpu_args_set(vcpus[0], 2, args_gva, 0);
|
||||
vcpu_args_set(vcpus[1], 2, args_gva, 1);
|
||||
@@ -1033,7 +1034,7 @@ static void help(const char *name)
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint32_t nr_irqs = 64;
|
||||
u32 nr_irqs = 64;
|
||||
bool default_args = true;
|
||||
bool level_sensitive = false;
|
||||
int opt;
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#define GIC_LPI_OFFSET 8192
|
||||
|
||||
static size_t nr_iterations = 1000;
|
||||
static vm_paddr_t gpa_base;
|
||||
static gpa_t gpa_base;
|
||||
|
||||
static struct kvm_vm *vm;
|
||||
static struct kvm_vcpu **vcpus;
|
||||
@@ -35,14 +35,14 @@ static struct test_data {
|
||||
u32 nr_devices;
|
||||
u32 nr_event_ids;
|
||||
|
||||
vm_paddr_t device_table;
|
||||
vm_paddr_t collection_table;
|
||||
vm_paddr_t cmdq_base;
|
||||
gpa_t device_table;
|
||||
gpa_t collection_table;
|
||||
gpa_t cmdq_base;
|
||||
void *cmdq_base_va;
|
||||
vm_paddr_t itt_tables;
|
||||
gpa_t itt_tables;
|
||||
|
||||
vm_paddr_t lpi_prop_table;
|
||||
vm_paddr_t lpi_pend_tables;
|
||||
gpa_t lpi_prop_table;
|
||||
gpa_t lpi_pend_tables;
|
||||
} test_data = {
|
||||
.nr_cpus = 1,
|
||||
.nr_devices = 1,
|
||||
@@ -73,7 +73,7 @@ static void guest_setup_its_mappings(void)
|
||||
/* Round-robin the LPIs to all of the vCPUs in the VM */
|
||||
coll_id = 0;
|
||||
for (device_id = 0; device_id < nr_devices; device_id++) {
|
||||
vm_paddr_t itt_base = test_data.itt_tables + (device_id * SZ_64K);
|
||||
gpa_t itt_base = test_data.itt_tables + (device_id * SZ_64K);
|
||||
|
||||
its_send_mapd_cmd(test_data.cmdq_base_va, device_id,
|
||||
itt_base, SZ_64K, true);
|
||||
@@ -188,7 +188,7 @@ static void setup_test_data(void)
|
||||
size_t pages_per_64k = vm_calc_num_guest_pages(vm->mode, SZ_64K);
|
||||
u32 nr_devices = test_data.nr_devices;
|
||||
u32 nr_cpus = test_data.nr_cpus;
|
||||
vm_paddr_t cmdq_base;
|
||||
gpa_t cmdq_base;
|
||||
|
||||
test_data.device_table = vm_phy_pages_alloc(vm, pages_per_64k,
|
||||
gpa_base,
|
||||
@@ -224,7 +224,7 @@ static void setup_gic(void)
|
||||
|
||||
static void signal_lpi(u32 device_id, u32 event_id)
|
||||
{
|
||||
vm_paddr_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER;
|
||||
gpa_t db_addr = GITS_BASE_GPA + GITS_TRANSLATER;
|
||||
|
||||
struct kvm_msi msi = {
|
||||
.address_lo = db_addr,
|
||||
|
||||
@@ -17,10 +17,10 @@
|
||||
struct vm_gic {
|
||||
struct kvm_vm *vm;
|
||||
int gic_fd;
|
||||
uint32_t gic_dev_type;
|
||||
u32 gic_dev_type;
|
||||
};
|
||||
|
||||
static uint64_t max_phys_size;
|
||||
static u64 max_phys_size;
|
||||
|
||||
#define GUEST_CMD_IRQ_CDIA 10
|
||||
#define GUEST_CMD_IRQ_DIEOI 11
|
||||
@@ -96,7 +96,7 @@ static void vm_gic_destroy(struct vm_gic *v)
|
||||
kvm_vm_free(v->vm);
|
||||
}
|
||||
|
||||
static void test_vgic_v5_ppis(uint32_t gic_dev_type)
|
||||
static void test_vgic_v5_ppis(u32 gic_dev_type)
|
||||
{
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
struct ucall uc;
|
||||
@@ -173,7 +173,7 @@ static void test_vgic_v5_ppis(uint32_t gic_dev_type)
|
||||
/*
|
||||
* Returns 0 if it's possible to create GIC device of a given type (V5).
|
||||
*/
|
||||
int test_kvm_device(uint32_t gic_dev_type)
|
||||
int test_kvm_device(u32 gic_dev_type)
|
||||
{
|
||||
struct kvm_vcpu *vcpus[NR_VCPUS];
|
||||
struct vm_gic v;
|
||||
@@ -199,7 +199,7 @@ int test_kvm_device(uint32_t gic_dev_type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void run_tests(uint32_t gic_dev_type)
|
||||
void run_tests(u32 gic_dev_type)
|
||||
{
|
||||
pr_info("Test VGICv5 PPIs\n");
|
||||
test_vgic_v5_ppis(gic_dev_type);
|
||||
|
||||
@@ -33,20 +33,20 @@ struct vpmu_vm {
|
||||
static struct vpmu_vm vpmu_vm;
|
||||
|
||||
struct pmreg_sets {
|
||||
uint64_t set_reg_id;
|
||||
uint64_t clr_reg_id;
|
||||
u64 set_reg_id;
|
||||
u64 clr_reg_id;
|
||||
};
|
||||
|
||||
#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
|
||||
|
||||
static uint64_t get_pmcr_n(uint64_t pmcr)
|
||||
static u64 get_pmcr_n(u64 pmcr)
|
||||
{
|
||||
return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
|
||||
}
|
||||
|
||||
static uint64_t get_counters_mask(uint64_t n)
|
||||
static u64 get_counters_mask(u64 n)
|
||||
{
|
||||
uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
|
||||
if (n)
|
||||
mask |= GENMASK(n - 1, 0);
|
||||
@@ -89,7 +89,7 @@ static inline void write_sel_evtyper(int sel, unsigned long val)
|
||||
|
||||
static void pmu_disable_reset(void)
|
||||
{
|
||||
uint64_t pmcr = read_sysreg(pmcr_el0);
|
||||
u64 pmcr = read_sysreg(pmcr_el0);
|
||||
|
||||
/* Reset all counters, disabling them */
|
||||
pmcr &= ~ARMV8_PMU_PMCR_E;
|
||||
@@ -169,7 +169,7 @@ struct pmc_accessor pmc_accessors[] = {
|
||||
|
||||
#define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \
|
||||
{ \
|
||||
uint64_t _tval = read_sysreg(regname); \
|
||||
u64 _tval = read_sysreg(regname); \
|
||||
\
|
||||
if (set_expected) \
|
||||
__GUEST_ASSERT((_tval & mask), \
|
||||
@@ -185,7 +185,7 @@ struct pmc_accessor pmc_accessors[] = {
|
||||
* Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
|
||||
* are set or cleared as specified in @set_expected.
|
||||
*/
|
||||
static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
|
||||
static void check_bitmap_pmu_regs(u64 mask, bool set_expected)
|
||||
{
|
||||
GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
|
||||
GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
|
||||
@@ -207,7 +207,7 @@ static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
|
||||
*/
|
||||
static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
|
||||
{
|
||||
uint64_t pmcr_n, test_bit = BIT(pmc_idx);
|
||||
u64 pmcr_n, test_bit = BIT(pmc_idx);
|
||||
bool set_expected = false;
|
||||
|
||||
if (set_op) {
|
||||
@@ -232,7 +232,7 @@ static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
|
||||
*/
|
||||
static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
|
||||
{
|
||||
uint64_t write_data, read_data;
|
||||
u64 write_data, read_data;
|
||||
|
||||
/* Disable all PMCs and reset all PMCs to zero. */
|
||||
pmu_disable_reset();
|
||||
@@ -287,11 +287,11 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
|
||||
}
|
||||
|
||||
#define INVALID_EC (-1ul)
|
||||
uint64_t expected_ec = INVALID_EC;
|
||||
u64 expected_ec = INVALID_EC;
|
||||
|
||||
static void guest_sync_handler(struct ex_regs *regs)
|
||||
{
|
||||
uint64_t esr, ec;
|
||||
u64 esr, ec;
|
||||
|
||||
esr = read_sysreg(esr_el1);
|
||||
ec = ESR_ELx_EC(esr);
|
||||
@@ -351,9 +351,9 @@ static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
|
||||
* if reading/writing PMU registers for implemented or unimplemented
|
||||
* counters works as expected.
|
||||
*/
|
||||
static void guest_code(uint64_t expected_pmcr_n)
|
||||
static void guest_code(u64 expected_pmcr_n)
|
||||
{
|
||||
uint64_t pmcr, pmcr_n, unimp_mask;
|
||||
u64 pmcr, pmcr_n, unimp_mask;
|
||||
int i, pmc;
|
||||
|
||||
__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
|
||||
@@ -402,12 +402,12 @@ static void guest_code(uint64_t expected_pmcr_n)
|
||||
static void create_vpmu_vm(void *guest_code)
|
||||
{
|
||||
struct kvm_vcpu_init init;
|
||||
uint8_t pmuver, ec;
|
||||
uint64_t dfr0, irq = 23;
|
||||
u8 pmuver, ec;
|
||||
u64 dfr0, irq = 23;
|
||||
struct kvm_device_attr irq_attr = {
|
||||
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
|
||||
.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
|
||||
.addr = (uint64_t)&irq,
|
||||
.addr = (u64)&irq,
|
||||
};
|
||||
|
||||
/* The test creates the vpmu_vm multiple times. Ensure a clean state */
|
||||
@@ -443,7 +443,7 @@ static void destroy_vpmu_vm(void)
|
||||
kvm_vm_free(vpmu_vm.vm);
|
||||
}
|
||||
|
||||
static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
|
||||
static void run_vcpu(struct kvm_vcpu *vcpu, u64 pmcr_n)
|
||||
{
|
||||
struct ucall uc;
|
||||
|
||||
@@ -489,9 +489,9 @@ static void test_create_vpmu_vm_with_nr_counters(unsigned int nr_counters, bool
|
||||
* Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
|
||||
* and run the test.
|
||||
*/
|
||||
static void run_access_test(uint64_t pmcr_n)
|
||||
static void run_access_test(u64 pmcr_n)
|
||||
{
|
||||
uint64_t sp;
|
||||
u64 sp;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vcpu_init init;
|
||||
|
||||
@@ -514,7 +514,7 @@ static void run_access_test(uint64_t pmcr_n)
|
||||
aarch64_vcpu_setup(vcpu, &init);
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), sp);
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code);
|
||||
|
||||
run_vcpu(vcpu, pmcr_n);
|
||||
|
||||
@@ -531,12 +531,12 @@ static struct pmreg_sets validity_check_reg_sets[] = {
|
||||
* Create a VM, and check if KVM handles the userspace accesses of
|
||||
* the PMU register sets in @validity_check_reg_sets[] correctly.
|
||||
*/
|
||||
static void run_pmregs_validity_test(uint64_t pmcr_n)
|
||||
static void run_pmregs_validity_test(u64 pmcr_n)
|
||||
{
|
||||
int i;
|
||||
struct kvm_vcpu *vcpu;
|
||||
uint64_t set_reg_id, clr_reg_id, reg_val;
|
||||
uint64_t valid_counters_mask, max_counters_mask;
|
||||
u64 set_reg_id, clr_reg_id, reg_val;
|
||||
u64 valid_counters_mask, max_counters_mask;
|
||||
|
||||
test_create_vpmu_vm_with_nr_counters(pmcr_n, false);
|
||||
vcpu = vpmu_vm.vcpu;
|
||||
@@ -588,7 +588,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
|
||||
* the vCPU to @pmcr_n, which is larger than the host value.
|
||||
* The attempt should fail as @pmcr_n is too big to set for the vCPU.
|
||||
*/
|
||||
static void run_error_test(uint64_t pmcr_n)
|
||||
static void run_error_test(u64 pmcr_n)
|
||||
{
|
||||
pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
|
||||
|
||||
@@ -600,9 +600,9 @@ static void run_error_test(uint64_t pmcr_n)
|
||||
* Return the default number of implemented PMU event counters excluding
|
||||
* the cycle counter (i.e. PMCR_EL0.N value) for the guest.
|
||||
*/
|
||||
static uint64_t get_pmcr_n_limit(void)
|
||||
static u64 get_pmcr_n_limit(void)
|
||||
{
|
||||
uint64_t pmcr;
|
||||
u64 pmcr;
|
||||
|
||||
create_vpmu_vm(guest_code);
|
||||
pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
|
||||
@@ -624,7 +624,7 @@ static bool kvm_supports_nr_counters_attr(void)
|
||||
|
||||
int main(void)
|
||||
{
|
||||
uint64_t i, pmcr_n;
|
||||
u64 i, pmcr_n;
|
||||
|
||||
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
|
||||
TEST_REQUIRE(kvm_supports_vgic_v3());
|
||||
|
||||
@@ -14,16 +14,16 @@
|
||||
|
||||
struct kvm_coalesced_io {
|
||||
struct kvm_coalesced_mmio_ring *ring;
|
||||
uint32_t ring_size;
|
||||
uint64_t mmio_gpa;
|
||||
uint64_t *mmio;
|
||||
u32 ring_size;
|
||||
u64 mmio_gpa;
|
||||
u64 *mmio;
|
||||
|
||||
/*
|
||||
* x86-only, but define pio_port for all architectures to minimize the
|
||||
* amount of #ifdeffery and complexity, without having to sacrifice
|
||||
* verbose error messages.
|
||||
*/
|
||||
uint8_t pio_port;
|
||||
u8 pio_port;
|
||||
};
|
||||
|
||||
static struct kvm_coalesced_io kvm_builtin_io_ring;
|
||||
@@ -70,13 +70,13 @@ static void guest_code(struct kvm_coalesced_io *io)
|
||||
|
||||
static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
|
||||
struct kvm_coalesced_io *io,
|
||||
uint32_t ring_start,
|
||||
uint32_t expected_exit)
|
||||
u32 ring_start,
|
||||
u32 expected_exit)
|
||||
{
|
||||
const bool want_pio = expected_exit == KVM_EXIT_IO;
|
||||
struct kvm_coalesced_mmio_ring *ring = io->ring;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t pio_value;
|
||||
u32 pio_value;
|
||||
|
||||
WRITE_ONCE(ring->first, ring_start);
|
||||
WRITE_ONCE(ring->last, ring_start);
|
||||
@@ -88,13 +88,13 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
|
||||
* data_offset is garbage, e.g. an MMIO gpa.
|
||||
*/
|
||||
if (run->exit_reason == KVM_EXIT_IO)
|
||||
pio_value = *(uint32_t *)((void *)run + run->io.data_offset);
|
||||
pio_value = *(u32 *)((void *)run + run->io.data_offset);
|
||||
else
|
||||
pio_value = 0;
|
||||
|
||||
TEST_ASSERT((!want_pio && (run->exit_reason == KVM_EXIT_MMIO && run->mmio.is_write &&
|
||||
run->mmio.phys_addr == io->mmio_gpa && run->mmio.len == 8 &&
|
||||
*(uint64_t *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) ||
|
||||
*(u64 *)run->mmio.data == io->mmio_gpa + io->ring_size - 1)) ||
|
||||
(want_pio && (run->exit_reason == KVM_EXIT_IO && run->io.port == io->pio_port &&
|
||||
run->io.direction == KVM_EXIT_IO_OUT && run->io.count == 1 &&
|
||||
pio_value == io->pio_port + io->ring_size - 1)),
|
||||
@@ -105,14 +105,14 @@ static void vcpu_run_and_verify_io_exit(struct kvm_vcpu *vcpu,
|
||||
want_pio ? (unsigned long long)io->pio_port : io->mmio_gpa,
|
||||
(want_pio ? io->pio_port : io->mmio_gpa) + io->ring_size - 1, run->exit_reason,
|
||||
run->exit_reason == KVM_EXIT_MMIO ? "MMIO" : run->exit_reason == KVM_EXIT_IO ? "PIO" : "other",
|
||||
run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(uint64_t *)run->mmio.data,
|
||||
run->mmio.phys_addr, run->mmio.is_write, run->mmio.len, *(u64 *)run->mmio.data,
|
||||
run->io.port, run->io.direction, run->io.size, run->io.count, pio_value);
|
||||
}
|
||||
|
||||
static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
|
||||
struct kvm_coalesced_io *io,
|
||||
uint32_t ring_start,
|
||||
uint32_t expected_exit)
|
||||
u32 ring_start,
|
||||
u32 expected_exit)
|
||||
{
|
||||
struct kvm_coalesced_mmio_ring *ring = io->ring;
|
||||
int i;
|
||||
@@ -124,18 +124,18 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
|
||||
ring->first, ring->last, io->ring_size, ring_start);
|
||||
|
||||
for (i = 0; i < io->ring_size - 1; i++) {
|
||||
uint32_t idx = (ring->first + i) % io->ring_size;
|
||||
u32 idx = (ring->first + i) % io->ring_size;
|
||||
struct kvm_coalesced_mmio *entry = &ring->coalesced_mmio[idx];
|
||||
|
||||
#ifdef __x86_64__
|
||||
if (i & 1)
|
||||
TEST_ASSERT(entry->phys_addr == io->pio_port &&
|
||||
entry->len == 4 && entry->pio &&
|
||||
*(uint32_t *)entry->data == io->pio_port + i,
|
||||
*(u32 *)entry->data == io->pio_port + i,
|
||||
"Wanted 4-byte port I/O 0x%x = 0x%x in entry %u, got %u-byte %s 0x%llx = 0x%x",
|
||||
io->pio_port, io->pio_port + i, i,
|
||||
entry->len, entry->pio ? "PIO" : "MMIO",
|
||||
entry->phys_addr, *(uint32_t *)entry->data);
|
||||
entry->phys_addr, *(u32 *)entry->data);
|
||||
else
|
||||
#endif
|
||||
TEST_ASSERT(entry->phys_addr == io->mmio_gpa &&
|
||||
@@ -143,12 +143,12 @@ static void vcpu_run_and_verify_coalesced_io(struct kvm_vcpu *vcpu,
|
||||
"Wanted 8-byte MMIO to 0x%lx = %lx in entry %u, got %u-byte %s 0x%llx = 0x%lx",
|
||||
io->mmio_gpa, io->mmio_gpa + i, i,
|
||||
entry->len, entry->pio ? "PIO" : "MMIO",
|
||||
entry->phys_addr, *(uint64_t *)entry->data);
|
||||
entry->phys_addr, *(u64 *)entry->data);
|
||||
}
|
||||
}
|
||||
|
||||
static void test_coalesced_io(struct kvm_vcpu *vcpu,
|
||||
struct kvm_coalesced_io *io, uint32_t ring_start)
|
||||
struct kvm_coalesced_io *io, u32 ring_start)
|
||||
{
|
||||
struct kvm_coalesced_mmio_ring *ring = io->ring;
|
||||
|
||||
@@ -219,11 +219,11 @@ int main(int argc, char *argv[])
|
||||
* the MMIO GPA identity mapped in the guest.
|
||||
*/
|
||||
.mmio_gpa = 4ull * SZ_1G,
|
||||
.mmio = (uint64_t *)(4ull * SZ_1G),
|
||||
.mmio = (u64 *)(4ull * SZ_1G),
|
||||
.pio_port = 0x80,
|
||||
};
|
||||
|
||||
virt_map(vm, (uint64_t)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1);
|
||||
virt_map(vm, (u64)kvm_builtin_io_ring.mmio, kvm_builtin_io_ring.mmio_gpa, 1);
|
||||
|
||||
sync_global_to_guest(vm, kvm_builtin_io_ring);
|
||||
vcpu_args_set(vcpu, 1, &kvm_builtin_io_ring);
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#ifdef __NR_userfaultfd
|
||||
|
||||
static int nr_vcpus = 1;
|
||||
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
|
||||
static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
|
||||
|
||||
static size_t demand_paging_size;
|
||||
static char *guest_data_prototype;
|
||||
@@ -58,7 +58,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
|
||||
struct uffd_msg *msg)
|
||||
{
|
||||
pid_t tid = syscall(__NR_gettid);
|
||||
uint64_t addr = msg->arg.pagefault.address;
|
||||
u64 addr = msg->arg.pagefault.address;
|
||||
struct timespec start;
|
||||
struct timespec ts_diff;
|
||||
int r;
|
||||
@@ -68,7 +68,7 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
|
||||
if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) {
|
||||
struct uffdio_copy copy;
|
||||
|
||||
copy.src = (uint64_t)guest_data_prototype;
|
||||
copy.src = (u64)guest_data_prototype;
|
||||
copy.dst = addr;
|
||||
copy.len = demand_paging_size;
|
||||
copy.mode = 0;
|
||||
@@ -138,7 +138,7 @@ struct test_params {
|
||||
bool partition_vcpu_memory_access;
|
||||
};
|
||||
|
||||
static void prefault_mem(void *alias, uint64_t len)
|
||||
static void prefault_mem(void *alias, u64 len)
|
||||
{
|
||||
size_t p;
|
||||
|
||||
@@ -154,7 +154,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
struct memstress_vcpu_args *vcpu_args;
|
||||
struct test_params *p = arg;
|
||||
struct uffd_desc **uffd_descs = NULL;
|
||||
uint64_t uffd_region_size;
|
||||
u64 uffd_region_size;
|
||||
struct timespec start;
|
||||
struct timespec ts_diff;
|
||||
double vcpu_paging_rate;
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#define TEST_HOST_LOOP_N 2UL
|
||||
|
||||
static int nr_vcpus = 1;
|
||||
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
|
||||
static u64 guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
|
||||
static bool run_vcpus_while_disabling_dirty_logging;
|
||||
|
||||
/* Host variables */
|
||||
@@ -37,7 +37,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = vcpu_args->vcpu;
|
||||
int vcpu_idx = vcpu_args->vcpu_idx;
|
||||
uint64_t pages_count = 0;
|
||||
u64 pages_count = 0;
|
||||
struct kvm_run *run;
|
||||
struct timespec start;
|
||||
struct timespec ts_diff;
|
||||
@@ -93,11 +93,11 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
|
||||
|
||||
struct test_params {
|
||||
unsigned long iterations;
|
||||
uint64_t phys_offset;
|
||||
u64 phys_offset;
|
||||
bool partition_vcpu_memory_access;
|
||||
enum vm_mem_backing_src_type backing_src;
|
||||
int slots;
|
||||
uint32_t write_percent;
|
||||
u32 write_percent;
|
||||
bool random_access;
|
||||
};
|
||||
|
||||
@@ -106,9 +106,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
struct test_params *p = arg;
|
||||
struct kvm_vm *vm;
|
||||
unsigned long **bitmaps;
|
||||
uint64_t guest_num_pages;
|
||||
uint64_t host_num_pages;
|
||||
uint64_t pages_per_slot;
|
||||
u64 guest_num_pages;
|
||||
u64 host_num_pages;
|
||||
u64 pages_per_slot;
|
||||
struct timespec start;
|
||||
struct timespec ts_diff;
|
||||
struct timespec get_dirty_log_total = (struct timespec){0};
|
||||
|
||||
@@ -74,11 +74,11 @@
|
||||
* the host. READ/WRITE_ONCE() should also be used with anything
|
||||
* that may change.
|
||||
*/
|
||||
static uint64_t host_page_size;
|
||||
static uint64_t guest_page_size;
|
||||
static uint64_t guest_num_pages;
|
||||
static uint64_t iteration;
|
||||
static uint64_t nr_writes;
|
||||
static u64 host_page_size;
|
||||
static u64 guest_page_size;
|
||||
static u64 guest_num_pages;
|
||||
static u64 iteration;
|
||||
static u64 nr_writes;
|
||||
static bool vcpu_stop;
|
||||
|
||||
/*
|
||||
@@ -86,13 +86,13 @@ static bool vcpu_stop;
|
||||
* This will be set to the topmost valid physical address minus
|
||||
* the test memory size.
|
||||
*/
|
||||
static uint64_t guest_test_phys_mem;
|
||||
static u64 guest_test_phys_mem;
|
||||
|
||||
/*
|
||||
* Guest virtual memory offset of the testing memory slot.
|
||||
* Must not conflict with identity mapped test code.
|
||||
*/
|
||||
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
|
||||
/*
|
||||
* Continuously write to the first 8 bytes of a random pages within
|
||||
@@ -100,10 +100,10 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
*/
|
||||
static void guest_code(void)
|
||||
{
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
|
||||
#ifdef __s390x__
|
||||
uint64_t i;
|
||||
u64 i;
|
||||
|
||||
/*
|
||||
* On s390x, all pages of a 1M segment are initially marked as dirty
|
||||
@@ -113,7 +113,7 @@ static void guest_code(void)
|
||||
*/
|
||||
for (i = 0; i < guest_num_pages; i++) {
|
||||
addr = guest_test_virt_mem + i * guest_page_size;
|
||||
vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
|
||||
vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration));
|
||||
nr_writes++;
|
||||
}
|
||||
#endif
|
||||
@@ -125,7 +125,7 @@ static void guest_code(void)
|
||||
* guest_page_size;
|
||||
addr = align_down(addr, host_page_size);
|
||||
|
||||
vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
|
||||
vcpu_arch_put_guest(*(u64 *)addr, READ_ONCE(iteration));
|
||||
nr_writes++;
|
||||
}
|
||||
|
||||
@@ -138,11 +138,11 @@ static bool host_quit;
|
||||
|
||||
/* Points to the test VM memory region on which we track dirty logs */
|
||||
static void *host_test_mem;
|
||||
static uint64_t host_num_pages;
|
||||
static u64 host_num_pages;
|
||||
|
||||
/* For statistics only */
|
||||
static uint64_t host_dirty_count;
|
||||
static uint64_t host_clear_count;
|
||||
static u64 host_dirty_count;
|
||||
static u64 host_clear_count;
|
||||
|
||||
/* Whether dirty ring reset is requested, or finished */
|
||||
static sem_t sem_vcpu_stop;
|
||||
@@ -169,7 +169,7 @@ static bool dirty_ring_vcpu_ring_full;
|
||||
* dirty gfn we've collected, so that if a mismatch of data found later in the
|
||||
* verifying process, we let it pass.
|
||||
*/
|
||||
static uint64_t dirty_ring_last_page = -1ULL;
|
||||
static u64 dirty_ring_last_page = -1ULL;
|
||||
|
||||
/*
|
||||
* In addition to the above, it is possible (especially if this
|
||||
@@ -213,7 +213,7 @@ static uint64_t dirty_ring_last_page = -1ULL;
|
||||
* and also don't fail when it is reported in the next iteration, together with
|
||||
* an outdated iteration count.
|
||||
*/
|
||||
static uint64_t dirty_ring_prev_iteration_last_page;
|
||||
static u64 dirty_ring_prev_iteration_last_page;
|
||||
|
||||
enum log_mode_t {
|
||||
/* Only use KVM_GET_DIRTY_LOG for logging */
|
||||
@@ -236,7 +236,7 @@ static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
|
||||
/* Logging mode for current run */
|
||||
static enum log_mode_t host_log_mode;
|
||||
static pthread_t vcpu_thread;
|
||||
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
|
||||
static u32 test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
|
||||
|
||||
static bool clear_log_supported(void)
|
||||
{
|
||||
@@ -255,15 +255,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm)
|
||||
}
|
||||
|
||||
static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
||||
void *bitmap, uint32_t num_pages,
|
||||
uint32_t *unused)
|
||||
void *bitmap, u32 num_pages,
|
||||
u32 *unused)
|
||||
{
|
||||
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
|
||||
}
|
||||
|
||||
static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
||||
void *bitmap, uint32_t num_pages,
|
||||
uint32_t *unused)
|
||||
void *bitmap, u32 num_pages,
|
||||
u32 *unused)
|
||||
{
|
||||
kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
|
||||
kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
|
||||
@@ -297,8 +297,8 @@ static bool dirty_ring_supported(void)
|
||||
|
||||
static void dirty_ring_create_vm_done(struct kvm_vm *vm)
|
||||
{
|
||||
uint64_t pages;
|
||||
uint32_t limit;
|
||||
u64 pages;
|
||||
u32 limit;
|
||||
|
||||
/*
|
||||
* We rely on vcpu exit due to full dirty ring state. Adjust
|
||||
@@ -333,12 +333,12 @@ static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
|
||||
smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
|
||||
}
|
||||
|
||||
static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
|
||||
int slot, void *bitmap,
|
||||
uint32_t num_pages, uint32_t *fetch_index)
|
||||
static u32 dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
|
||||
int slot, void *bitmap,
|
||||
u32 num_pages, u32 *fetch_index)
|
||||
{
|
||||
struct kvm_dirty_gfn *cur;
|
||||
uint32_t count = 0;
|
||||
u32 count = 0;
|
||||
|
||||
while (true) {
|
||||
cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
|
||||
@@ -359,10 +359,10 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
|
||||
}
|
||||
|
||||
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
||||
void *bitmap, uint32_t num_pages,
|
||||
uint32_t *ring_buf_idx)
|
||||
void *bitmap, u32 num_pages,
|
||||
u32 *ring_buf_idx)
|
||||
{
|
||||
uint32_t count, cleared;
|
||||
u32 count, cleared;
|
||||
|
||||
/* Only have one vcpu */
|
||||
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
|
||||
@@ -404,8 +404,8 @@ struct log_mode {
|
||||
void (*create_vm_done)(struct kvm_vm *vm);
|
||||
/* Hook to collect the dirty pages into the bitmap provided */
|
||||
void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
|
||||
void *bitmap, uint32_t num_pages,
|
||||
uint32_t *ring_buf_idx);
|
||||
void *bitmap, u32 num_pages,
|
||||
u32 *ring_buf_idx);
|
||||
/* Hook to call when after each vcpu run */
|
||||
void (*after_vcpu_run)(struct kvm_vcpu *vcpu);
|
||||
} log_modes[LOG_MODE_NUM] = {
|
||||
@@ -459,8 +459,8 @@ static void log_mode_create_vm_done(struct kvm_vm *vm)
|
||||
}
|
||||
|
||||
static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
|
||||
void *bitmap, uint32_t num_pages,
|
||||
uint32_t *ring_buf_idx)
|
||||
void *bitmap, u32 num_pages,
|
||||
u32 *ring_buf_idx)
|
||||
{
|
||||
struct log_mode *mode = &log_modes[host_log_mode];
|
||||
|
||||
@@ -494,11 +494,11 @@ static void *vcpu_worker(void *data)
|
||||
|
||||
static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap)
|
||||
{
|
||||
uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0;
|
||||
uint64_t step = vm_num_host_pages(mode, 1);
|
||||
u64 page, nr_dirty_pages = 0, nr_clean_pages = 0;
|
||||
u64 step = vm_num_host_pages(mode, 1);
|
||||
|
||||
for (page = 0; page < host_num_pages; page += step) {
|
||||
uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size);
|
||||
u64 val = *(u64 *)(host_test_mem + page * host_page_size);
|
||||
bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]);
|
||||
|
||||
/*
|
||||
@@ -575,7 +575,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap)
|
||||
}
|
||||
|
||||
static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
|
||||
uint64_t extra_mem_pages, void *guest_code)
|
||||
u64 extra_mem_pages, void *guest_code)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
|
||||
@@ -592,7 +592,7 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
|
||||
struct test_params {
|
||||
unsigned long iterations;
|
||||
unsigned long interval;
|
||||
uint64_t phys_offset;
|
||||
u64 phys_offset;
|
||||
};
|
||||
|
||||
static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
@@ -601,7 +601,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
unsigned long *bmap[2];
|
||||
uint32_t ring_buf_idx = 0;
|
||||
u32 ring_buf_idx = 0;
|
||||
int sem_val;
|
||||
|
||||
if (!log_mode_supported()) {
|
||||
@@ -667,7 +667,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
|
||||
|
||||
/* Cache the HVA pointer of the region */
|
||||
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
|
||||
host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem);
|
||||
|
||||
/* Export the shared variables to the guest */
|
||||
sync_global_to_guest(vm, host_page_size);
|
||||
|
||||
@@ -216,7 +216,7 @@ static void run_test(struct vcpu_reg_list *c)
|
||||
* since we don't know the capabilities of any new registers.
|
||||
*/
|
||||
for_each_present_blessed_reg(i) {
|
||||
uint8_t addr[2048 / 8];
|
||||
u8 addr[2048 / 8];
|
||||
struct kvm_one_reg reg = {
|
||||
.id = reg_list->reg[i],
|
||||
.addr = (__u64)&addr,
|
||||
|
||||
@@ -171,7 +171,7 @@ static void test_numa_allocation(int fd, size_t total_size)
|
||||
kvm_munmap(mem, total_size);
|
||||
}
|
||||
|
||||
static void test_collapse(int fd, uint64_t flags)
|
||||
static void test_collapse(int fd, u64 flags)
|
||||
{
|
||||
const size_t pmd_size = get_trans_hugepagesz();
|
||||
void *reserved_addr;
|
||||
@@ -346,7 +346,7 @@ static void test_invalid_punch_hole(int fd, size_t total_size)
|
||||
}
|
||||
|
||||
static void test_create_guest_memfd_invalid_sizes(struct kvm_vm *vm,
|
||||
uint64_t guest_memfd_flags)
|
||||
u64 guest_memfd_flags)
|
||||
{
|
||||
size_t size;
|
||||
int fd;
|
||||
@@ -389,8 +389,8 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
|
||||
|
||||
static void test_guest_memfd_flags(struct kvm_vm *vm)
|
||||
{
|
||||
uint64_t valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
|
||||
uint64_t flag;
|
||||
u64 valid_flags = vm_check_cap(vm, KVM_CAP_GUEST_MEMFD_FLAGS);
|
||||
u64 flag;
|
||||
int fd;
|
||||
|
||||
for (flag = BIT(0); flag; flag <<= 1) {
|
||||
@@ -419,7 +419,7 @@ do { \
|
||||
#define gmem_test(__test, __vm, __flags) \
|
||||
__gmem_test(__test, __vm, __flags, page_size * 4)
|
||||
|
||||
static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
|
||||
static void __test_guest_memfd(struct kvm_vm *vm, u64 flags)
|
||||
{
|
||||
test_create_guest_memfd_multiple(vm);
|
||||
test_create_guest_memfd_invalid_sizes(vm, flags);
|
||||
@@ -452,7 +452,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
|
||||
static void test_guest_memfd(unsigned long vm_type)
|
||||
{
|
||||
struct kvm_vm *vm = vm_create_barebones_type(vm_type);
|
||||
uint64_t flags;
|
||||
u64 flags;
|
||||
|
||||
test_guest_memfd_flags(vm);
|
||||
|
||||
@@ -470,7 +470,7 @@ static void test_guest_memfd(unsigned long vm_type)
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void guest_code(uint8_t *mem, uint64_t size)
|
||||
static void guest_code(u8 *mem, u64 size)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
@@ -489,12 +489,12 @@ static void test_guest_memfd_guest(void)
|
||||
* the guest's code, stack, and page tables, and low memory contains
|
||||
* the PCI hole and other MMIO regions that need to be avoided.
|
||||
*/
|
||||
const uint64_t gpa = SZ_4G;
|
||||
const gpa_t gpa = SZ_4G;
|
||||
const int slot = 1;
|
||||
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
uint8_t *mem;
|
||||
u8 *mem;
|
||||
size_t size;
|
||||
int fd, i;
|
||||
|
||||
|
||||
@@ -16,22 +16,22 @@
|
||||
#include "ucall_common.h"
|
||||
|
||||
struct guest_vals {
|
||||
uint64_t a;
|
||||
uint64_t b;
|
||||
uint64_t type;
|
||||
u64 a;
|
||||
u64 b;
|
||||
u64 type;
|
||||
};
|
||||
|
||||
static struct guest_vals vals;
|
||||
|
||||
/* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */
|
||||
#define TYPE_LIST \
|
||||
TYPE(test_type_i64, I64, "%ld", int64_t) \
|
||||
TYPE(test_type_u64, U64u, "%lu", uint64_t) \
|
||||
TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \
|
||||
TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \
|
||||
TYPE(test_type_u32, U32u, "%u", uint32_t) \
|
||||
TYPE(test_type_x32, U32x, "0x%x", uint32_t) \
|
||||
TYPE(test_type_X32, U32X, "0x%X", uint32_t) \
|
||||
TYPE(test_type_i64, I64, "%ld", s64) \
|
||||
TYPE(test_type_u64, U64u, "%lu", u64) \
|
||||
TYPE(test_type_x64, U64x, "0x%lx", u64) \
|
||||
TYPE(test_type_X64, U64X, "0x%lX", u64) \
|
||||
TYPE(test_type_u32, U32u, "%u", u32) \
|
||||
TYPE(test_type_x32, U32x, "0x%x", u32) \
|
||||
TYPE(test_type_X32, U32X, "0x%X", u32) \
|
||||
TYPE(test_type_int, INT, "%d", int) \
|
||||
TYPE(test_type_char, CHAR, "%c", char) \
|
||||
TYPE(test_type_str, STR, "'%s'", const char *) \
|
||||
@@ -56,7 +56,7 @@ static void fn(struct kvm_vcpu *vcpu, T a, T b) \
|
||||
\
|
||||
snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \
|
||||
snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \
|
||||
vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \
|
||||
vals = (struct guest_vals){ (u64)a, (u64)b, TYPE_##ext }; \
|
||||
sync_global_to_guest(vcpu->vm, vals); \
|
||||
run_test(vcpu, expected_printf, expected_assert); \
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ static inline void check_join(pthread_t thread, void **retval)
|
||||
TEST_ASSERT(r == 0, "%s: failed to join thread", __func__);
|
||||
}
|
||||
|
||||
static void run_test(uint32_t run)
|
||||
static void run_test(u32 run)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
@@ -88,7 +88,7 @@ static void run_test(uint32_t run)
|
||||
pthread_t threads[VCPU_NUM];
|
||||
pthread_t throw_away;
|
||||
void *b;
|
||||
uint32_t i, j;
|
||||
u32 i, j;
|
||||
|
||||
CPU_ZERO(&cpu_set);
|
||||
for (i = 0; i < VCPU_NUM; i++)
|
||||
@@ -149,7 +149,7 @@ void wait_for_child_setup(pid_t pid)
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint32_t i;
|
||||
u32 i;
|
||||
int s, r;
|
||||
pid_t pid;
|
||||
|
||||
|
||||
@@ -18,20 +18,20 @@ enum arch_timer {
|
||||
#define CTL_ISTATUS (1 << 2)
|
||||
|
||||
#define msec_to_cycles(msec) \
|
||||
(timer_get_cntfrq() * (uint64_t)(msec) / 1000)
|
||||
(timer_get_cntfrq() * (u64)(msec) / 1000)
|
||||
|
||||
#define usec_to_cycles(usec) \
|
||||
(timer_get_cntfrq() * (uint64_t)(usec) / 1000000)
|
||||
(timer_get_cntfrq() * (u64)(usec) / 1000000)
|
||||
|
||||
#define cycles_to_usec(cycles) \
|
||||
((uint64_t)(cycles) * 1000000 / timer_get_cntfrq())
|
||||
((u64)(cycles) * 1000000 / timer_get_cntfrq())
|
||||
|
||||
static inline uint32_t timer_get_cntfrq(void)
|
||||
static inline u32 timer_get_cntfrq(void)
|
||||
{
|
||||
return read_sysreg(cntfrq_el0);
|
||||
}
|
||||
|
||||
static inline uint64_t timer_get_cntct(enum arch_timer timer)
|
||||
static inline u64 timer_get_cntct(enum arch_timer timer)
|
||||
{
|
||||
isb();
|
||||
|
||||
@@ -48,7 +48,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
|
||||
static inline void timer_set_cval(enum arch_timer timer, u64 cval)
|
||||
{
|
||||
switch (timer) {
|
||||
case VIRTUAL:
|
||||
@@ -64,7 +64,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline uint64_t timer_get_cval(enum arch_timer timer)
|
||||
static inline u64 timer_get_cval(enum arch_timer timer)
|
||||
{
|
||||
switch (timer) {
|
||||
case VIRTUAL:
|
||||
@@ -79,7 +79,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void timer_set_tval(enum arch_timer timer, int32_t tval)
|
||||
static inline void timer_set_tval(enum arch_timer timer, s32 tval)
|
||||
{
|
||||
switch (timer) {
|
||||
case VIRTUAL:
|
||||
@@ -95,7 +95,7 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval)
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline int32_t timer_get_tval(enum arch_timer timer)
|
||||
static inline s32 timer_get_tval(enum arch_timer timer)
|
||||
{
|
||||
isb();
|
||||
switch (timer) {
|
||||
@@ -111,7 +111,7 @@ static inline int32_t timer_get_tval(enum arch_timer timer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
|
||||
static inline void timer_set_ctl(enum arch_timer timer, u32 ctl)
|
||||
{
|
||||
switch (timer) {
|
||||
case VIRTUAL:
|
||||
@@ -127,7 +127,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline uint32_t timer_get_ctl(enum arch_timer timer)
|
||||
static inline u32 timer_get_ctl(enum arch_timer timer)
|
||||
{
|
||||
switch (timer) {
|
||||
case VIRTUAL:
|
||||
@@ -142,15 +142,15 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec)
|
||||
static inline void timer_set_next_cval_ms(enum arch_timer timer, u32 msec)
|
||||
{
|
||||
uint64_t now_ct = timer_get_cntct(timer);
|
||||
uint64_t next_ct = now_ct + msec_to_cycles(msec);
|
||||
u64 now_ct = timer_get_cntct(timer);
|
||||
u64 next_ct = now_ct + msec_to_cycles(msec);
|
||||
|
||||
timer_set_cval(timer, next_ct);
|
||||
}
|
||||
|
||||
static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec)
|
||||
static inline void timer_set_next_tval_ms(enum arch_timer timer, u32 msec)
|
||||
{
|
||||
timer_set_tval(timer, msec_to_cycles(msec));
|
||||
}
|
||||
|
||||
@@ -8,10 +8,10 @@
|
||||
|
||||
#include "arch_timer.h"
|
||||
|
||||
static inline void __delay(uint64_t cycles)
|
||||
static inline void __delay(u64 cycles)
|
||||
{
|
||||
enum arch_timer timer = VIRTUAL;
|
||||
uint64_t start = timer_get_cntct(timer);
|
||||
u64 start = timer_get_cntct(timer);
|
||||
|
||||
while ((timer_get_cntct(timer) - start) < cycles)
|
||||
cpu_relax();
|
||||
|
||||
@@ -48,8 +48,8 @@ void gic_set_dir(unsigned int intid);
|
||||
* split is true, EOI drops the priority and deactivates the interrupt.
|
||||
*/
|
||||
void gic_set_eoi_split(bool split);
|
||||
void gic_set_priority_mask(uint64_t mask);
|
||||
void gic_set_priority(uint32_t intid, uint32_t prio);
|
||||
void gic_set_priority_mask(u64 mask);
|
||||
void gic_set_priority(u32 intid, u32 prio);
|
||||
void gic_irq_set_active(unsigned int intid);
|
||||
void gic_irq_clear_active(unsigned int intid);
|
||||
bool gic_irq_get_active(unsigned int intid);
|
||||
@@ -59,7 +59,7 @@ bool gic_irq_get_pending(unsigned int intid);
|
||||
void gic_irq_set_config(unsigned int intid, bool is_edge);
|
||||
void gic_irq_set_group(unsigned int intid, bool group);
|
||||
|
||||
void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
|
||||
vm_paddr_t pend_table);
|
||||
void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size,
|
||||
gpa_t pend_table);
|
||||
|
||||
#endif /* SELFTEST_KVM_GIC_H */
|
||||
|
||||
@@ -5,11 +5,10 @@
|
||||
|
||||
#include <linux/sizes.h>
|
||||
|
||||
void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz,
|
||||
vm_paddr_t device_tbl, size_t device_tbl_sz,
|
||||
vm_paddr_t cmdq, size_t cmdq_size);
|
||||
void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl,
|
||||
size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size);
|
||||
|
||||
void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base,
|
||||
void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base,
|
||||
size_t itt_size, bool valid);
|
||||
void its_send_mapc_cmd(void *cmdq_base, u32 vcpu_id, u32 collection_id, bool valid);
|
||||
void its_send_mapti_cmd(void *cmdq_base, u32 device_id, u32 event_id,
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
#define PTE_ADDR_51_50_LPA2_SHIFT 8
|
||||
|
||||
void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
|
||||
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
|
||||
struct kvm_vcpu_init *init, void *guest_code);
|
||||
|
||||
struct ex_regs {
|
||||
@@ -167,8 +167,8 @@ enum {
|
||||
(v) == VECTOR_SYNC_LOWER_64 || \
|
||||
(v) == VECTOR_SYNC_LOWER_32)
|
||||
|
||||
void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
|
||||
uint32_t *ipa16k, uint32_t *ipa64k);
|
||||
void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k,
|
||||
u32 *ipa16k, u32 *ipa64k);
|
||||
|
||||
void vm_init_descriptor_tables(struct kvm_vm *vm);
|
||||
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
|
||||
@@ -179,8 +179,8 @@ void vm_install_exception_handler(struct kvm_vm *vm,
|
||||
void vm_install_sync_handler(struct kvm_vm *vm,
|
||||
int vector, int ec, handler_fn handler);
|
||||
|
||||
uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level);
|
||||
uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
|
||||
u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level);
|
||||
u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva);
|
||||
|
||||
static inline void cpu_relax(void)
|
||||
{
|
||||
@@ -287,9 +287,9 @@ struct arm_smccc_res {
|
||||
* @res: pointer to write the return values from registers x0-x3
|
||||
*
|
||||
*/
|
||||
void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
|
||||
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
|
||||
uint64_t arg6, struct arm_smccc_res *res);
|
||||
void smccc_hvc(u32 function_id, u64 arg0, u64 arg1,
|
||||
u64 arg2, u64 arg3, u64 arg4, u64 arg5,
|
||||
u64 arg6, struct arm_smccc_res *res);
|
||||
|
||||
/**
|
||||
* smccc_smc - Invoke a SMCCC function using the smc conduit
|
||||
@@ -298,9 +298,9 @@ void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
|
||||
* @res: pointer to write the return values from registers x0-x3
|
||||
*
|
||||
*/
|
||||
void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
|
||||
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
|
||||
uint64_t arg6, struct arm_smccc_res *res);
|
||||
void smccc_smc(u32 function_id, u64 arg0, u64 arg1,
|
||||
u64 arg2, u64 arg3, u64 arg4, u64 arg5,
|
||||
u64 arg6, struct arm_smccc_res *res);
|
||||
|
||||
/* Execute a Wait For Interrupt instruction. */
|
||||
void wfi(void);
|
||||
|
||||
@@ -10,9 +10,9 @@
|
||||
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
|
||||
* VM), it must not be accessed from host code.
|
||||
*/
|
||||
extern vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
extern gva_t *ucall_exit_mmio_addr;
|
||||
|
||||
static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
static inline void ucall_arch_do_ucall(gva_t uc)
|
||||
{
|
||||
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
|
||||
}
|
||||
|
||||
@@ -11,27 +11,27 @@
|
||||
#include "kvm_util.h"
|
||||
|
||||
#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \
|
||||
(((uint64_t)(count) << 52) | \
|
||||
((uint64_t)((base) >> 16) << 16) | \
|
||||
((uint64_t)(flags) << 12) | \
|
||||
(((u64)(count) << 52) | \
|
||||
((u64)((base) >> 16) << 16) | \
|
||||
((u64)(flags) << 12) | \
|
||||
index)
|
||||
|
||||
bool kvm_supports_vgic_v3(void);
|
||||
int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
|
||||
int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs);
|
||||
void __vgic_v3_init(int fd);
|
||||
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs);
|
||||
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs);
|
||||
|
||||
#define VGIC_MAX_RESERVED 1023
|
||||
|
||||
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
|
||||
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
|
||||
void kvm_irq_set_level_info(int gic_fd, u32 intid, int level);
|
||||
int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level);
|
||||
|
||||
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
|
||||
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
|
||||
void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level);
|
||||
int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level);
|
||||
|
||||
/* The vcpu arg only applies to private interrupts. */
|
||||
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
|
||||
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
|
||||
void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu);
|
||||
void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu);
|
||||
|
||||
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ struct kvm_binary_stats {
|
||||
|
||||
struct kvm_vcpu {
|
||||
struct list_head list;
|
||||
uint32_t id;
|
||||
u32 id;
|
||||
int fd;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
@@ -70,8 +70,8 @@ struct kvm_vcpu {
|
||||
#endif
|
||||
struct kvm_binary_stats stats;
|
||||
struct kvm_dirty_gfn *dirty_gfns;
|
||||
uint32_t fetch_index;
|
||||
uint32_t dirty_gfns_count;
|
||||
u32 fetch_index;
|
||||
u32 dirty_gfns_count;
|
||||
};
|
||||
|
||||
struct userspace_mem_regions {
|
||||
@@ -90,7 +90,7 @@ enum kvm_mem_region_type {
|
||||
|
||||
struct kvm_mmu {
|
||||
bool pgd_created;
|
||||
uint64_t pgd;
|
||||
u64 pgd;
|
||||
int pgtable_levels;
|
||||
|
||||
struct kvm_mmu_arch arch;
|
||||
@@ -105,16 +105,16 @@ struct kvm_vm {
|
||||
unsigned int page_shift;
|
||||
unsigned int pa_bits;
|
||||
unsigned int va_bits;
|
||||
uint64_t max_gfn;
|
||||
u64 max_gfn;
|
||||
struct list_head vcpus;
|
||||
struct userspace_mem_regions regions;
|
||||
struct sparsebit *vpages_valid;
|
||||
struct sparsebit *vpages_mapped;
|
||||
bool has_irqchip;
|
||||
vm_paddr_t ucall_mmio_addr;
|
||||
vm_vaddr_t handlers;
|
||||
uint32_t dirty_ring_size;
|
||||
uint64_t gpa_tag_mask;
|
||||
gpa_t ucall_mmio_addr;
|
||||
gva_t handlers;
|
||||
u32 dirty_ring_size;
|
||||
gpa_t gpa_tag_mask;
|
||||
|
||||
/*
|
||||
* "mmu" is the guest's stage-1, with a short name because the vast
|
||||
@@ -132,7 +132,7 @@ struct kvm_vm {
|
||||
* allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
|
||||
* memslot.
|
||||
*/
|
||||
uint32_t memslots[NR_MEM_REGIONS];
|
||||
u32 memslots[NR_MEM_REGIONS];
|
||||
};
|
||||
|
||||
struct vcpu_reg_sublist {
|
||||
@@ -164,7 +164,7 @@ struct vcpu_reg_list {
|
||||
else
|
||||
|
||||
struct userspace_mem_region *
|
||||
memslot2region(struct kvm_vm *vm, uint32_t memslot);
|
||||
memslot2region(struct kvm_vm *vm, u32 memslot);
|
||||
|
||||
static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
|
||||
enum kvm_mem_region_type type)
|
||||
@@ -213,13 +213,13 @@ enum vm_guest_mode {
|
||||
};
|
||||
|
||||
struct vm_shape {
|
||||
uint32_t type;
|
||||
uint8_t mode;
|
||||
uint8_t pad0;
|
||||
uint16_t pad1;
|
||||
u32 type;
|
||||
u8 mode;
|
||||
u8 pad0;
|
||||
u16 pad1;
|
||||
};
|
||||
|
||||
kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
|
||||
kvm_static_assert(sizeof(struct vm_shape) == sizeof(u64));
|
||||
|
||||
#define VM_TYPE_DEFAULT 0
|
||||
|
||||
@@ -404,21 +404,22 @@ static inline int vm_check_cap(struct kvm_vm *vm, long cap)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
|
||||
static inline int __vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
|
||||
{
|
||||
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
|
||||
|
||||
return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
|
||||
}
|
||||
static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
|
||||
|
||||
static inline void vm_enable_cap(struct kvm_vm *vm, u32 cap, u64 arg0)
|
||||
{
|
||||
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
|
||||
|
||||
vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
|
||||
}
|
||||
|
||||
static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
|
||||
uint64_t size, uint64_t attributes)
|
||||
static inline void vm_set_memory_attributes(struct kvm_vm *vm, gpa_t gpa,
|
||||
u64 size, u64 attributes)
|
||||
{
|
||||
struct kvm_memory_attributes attr = {
|
||||
.attributes = attributes,
|
||||
@@ -438,35 +439,35 @@ static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
|
||||
}
|
||||
|
||||
|
||||
static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
|
||||
uint64_t size)
|
||||
static inline void vm_mem_set_private(struct kvm_vm *vm, gpa_t gpa,
|
||||
u64 size)
|
||||
{
|
||||
vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
|
||||
}
|
||||
|
||||
static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
|
||||
uint64_t size)
|
||||
static inline void vm_mem_set_shared(struct kvm_vm *vm, gpa_t gpa,
|
||||
u64 size)
|
||||
{
|
||||
vm_set_memory_attributes(vm, gpa, size, 0);
|
||||
}
|
||||
|
||||
void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
|
||||
void vm_guest_mem_fallocate(struct kvm_vm *vm, gpa_t gpa, u64 size,
|
||||
bool punch_hole);
|
||||
|
||||
static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
|
||||
uint64_t size)
|
||||
static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, gpa_t gpa,
|
||||
u64 size)
|
||||
{
|
||||
vm_guest_mem_fallocate(vm, gpa, size, true);
|
||||
}
|
||||
|
||||
static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
|
||||
uint64_t size)
|
||||
static inline void vm_guest_mem_allocate(struct kvm_vm *vm, gpa_t gpa,
|
||||
u64 size)
|
||||
{
|
||||
vm_guest_mem_fallocate(vm, gpa, size, false);
|
||||
}
|
||||
|
||||
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
|
||||
const char *vm_guest_mode_string(uint32_t i);
|
||||
void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size);
|
||||
const char *vm_guest_mode_string(u32 i);
|
||||
|
||||
void kvm_vm_free(struct kvm_vm *vmp);
|
||||
void kvm_vm_restart(struct kvm_vm *vmp);
|
||||
@@ -474,7 +475,7 @@ void kvm_vm_release(struct kvm_vm *vmp);
|
||||
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
|
||||
int kvm_memfd_alloc(size_t size, bool hugepages);
|
||||
|
||||
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
|
||||
void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
|
||||
|
||||
static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
|
||||
{
|
||||
@@ -484,7 +485,7 @@ static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
|
||||
}
|
||||
|
||||
static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
|
||||
uint64_t first_page, uint32_t num_pages)
|
||||
u64 first_page, u32 num_pages)
|
||||
{
|
||||
struct kvm_clear_dirty_log args = {
|
||||
.dirty_bitmap = log,
|
||||
@@ -496,14 +497,14 @@ static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log
|
||||
vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
|
||||
}
|
||||
|
||||
static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
|
||||
static inline u32 kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
|
||||
{
|
||||
return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
|
||||
}
|
||||
|
||||
static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
|
||||
uint64_t address,
|
||||
uint64_t size, bool pio)
|
||||
u64 address,
|
||||
u64 size, bool pio)
|
||||
{
|
||||
struct kvm_coalesced_mmio_zone zone = {
|
||||
.addr = address,
|
||||
@@ -515,8 +516,8 @@ static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm,
|
||||
}
|
||||
|
||||
static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm,
|
||||
uint64_t address,
|
||||
uint64_t size, bool pio)
|
||||
u64 address,
|
||||
u64 size, bool pio)
|
||||
{
|
||||
struct kvm_coalesced_mmio_zone zone = {
|
||||
.addr = address,
|
||||
@@ -535,8 +536,8 @@ static inline int vm_get_stats_fd(struct kvm_vm *vm)
|
||||
return fd;
|
||||
}
|
||||
|
||||
static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
|
||||
uint32_t flags)
|
||||
static inline int __kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd,
|
||||
u32 flags)
|
||||
{
|
||||
struct kvm_irqfd irqfd = {
|
||||
.fd = eventfd,
|
||||
@@ -548,20 +549,19 @@ static inline int __kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
|
||||
return __vm_ioctl(vm, KVM_IRQFD, &irqfd);
|
||||
}
|
||||
|
||||
static inline void kvm_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd,
|
||||
uint32_t flags)
|
||||
static inline void kvm_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd, u32 flags)
|
||||
{
|
||||
int ret = __kvm_irqfd(vm, gsi, eventfd, flags);
|
||||
|
||||
TEST_ASSERT_VM_VCPU_IOCTL(!ret, KVM_IRQFD, ret, vm);
|
||||
}
|
||||
|
||||
static inline void kvm_assign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
|
||||
static inline void kvm_assign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
|
||||
{
|
||||
kvm_irqfd(vm, gsi, eventfd, 0);
|
||||
}
|
||||
|
||||
static inline void kvm_deassign_irqfd(struct kvm_vm *vm, uint32_t gsi, int eventfd)
|
||||
static inline void kvm_deassign_irqfd(struct kvm_vm *vm, u32 gsi, int eventfd)
|
||||
{
|
||||
kvm_irqfd(vm, gsi, eventfd, KVM_IRQFD_FLAG_DEASSIGN);
|
||||
}
|
||||
@@ -610,15 +610,15 @@ static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc
|
||||
}
|
||||
|
||||
void read_stat_data(int stats_fd, struct kvm_stats_header *header,
|
||||
struct kvm_stats_desc *desc, uint64_t *data,
|
||||
struct kvm_stats_desc *desc, u64 *data,
|
||||
size_t max_elements);
|
||||
|
||||
void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
|
||||
uint64_t *data, size_t max_elements);
|
||||
u64 *data, size_t max_elements);
|
||||
|
||||
#define __get_stat(stats, stat) \
|
||||
({ \
|
||||
uint64_t data; \
|
||||
u64 data; \
|
||||
\
|
||||
kvm_get_stat(stats, #stat, &data, 1); \
|
||||
data; \
|
||||
@@ -664,8 +664,8 @@ static inline bool is_smt_on(void)
|
||||
|
||||
void vm_create_irqchip(struct kvm_vm *vm);
|
||||
|
||||
static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
|
||||
uint64_t flags)
|
||||
static inline int __vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
|
||||
u64 flags)
|
||||
{
|
||||
struct kvm_create_guest_memfd guest_memfd = {
|
||||
.size = size,
|
||||
@@ -675,8 +675,8 @@ static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
|
||||
return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
|
||||
}
|
||||
|
||||
static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
|
||||
uint64_t flags)
|
||||
static inline int vm_create_guest_memfd(struct kvm_vm *vm, u64 size,
|
||||
u64 flags)
|
||||
{
|
||||
int fd = __vm_create_guest_memfd(vm, size, flags);
|
||||
|
||||
@@ -684,24 +684,23 @@ static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
|
||||
return fd;
|
||||
}
|
||||
|
||||
void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva);
|
||||
int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva);
|
||||
void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva,
|
||||
uint32_t guest_memfd, uint64_t guest_memfd_offset);
|
||||
int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva,
|
||||
uint32_t guest_memfd, uint64_t guest_memfd_offset);
|
||||
void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva);
|
||||
int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva);
|
||||
void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva,
|
||||
u32 guest_memfd, u64 guest_memfd_offset);
|
||||
int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva,
|
||||
u32 guest_memfd, u64 guest_memfd_offset);
|
||||
|
||||
void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||
enum vm_mem_backing_src_type src_type,
|
||||
uint64_t gpa, uint32_t slot, uint64_t npages,
|
||||
uint32_t flags);
|
||||
gpa_t gpa, u32 slot, u64 npages, u32 flags);
|
||||
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags,
|
||||
int guest_memfd_fd, uint64_t guest_memfd_offset);
|
||||
gpa_t gpa, u32 slot, u64 npages, u32 flags,
|
||||
int guest_memfd_fd, u64 guest_memfd_offset);
|
||||
|
||||
#ifndef vm_arch_has_protected_memory
|
||||
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
|
||||
@@ -710,36 +709,34 @@ static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
|
||||
}
|
||||
#endif
|
||||
|
||||
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
|
||||
void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot);
|
||||
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
|
||||
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
|
||||
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
|
||||
void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
|
||||
vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
|
||||
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
|
||||
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type);
|
||||
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
|
||||
vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type);
|
||||
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
|
||||
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
|
||||
enum kvm_mem_region_type type);
|
||||
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
|
||||
void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags);
|
||||
void vm_mem_region_reload(struct kvm_vm *vm, u32 slot);
|
||||
void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa);
|
||||
void vm_mem_region_delete(struct kvm_vm *vm, u32 slot);
|
||||
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
|
||||
void vm_populate_gva_bitmap(struct kvm_vm *vm);
|
||||
gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva);
|
||||
gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva);
|
||||
gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
|
||||
enum kvm_mem_region_type type);
|
||||
gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
|
||||
enum kvm_mem_region_type type);
|
||||
gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages);
|
||||
gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type);
|
||||
gva_t vm_alloc_page(struct kvm_vm *vm);
|
||||
|
||||
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
|
||||
unsigned int npages);
|
||||
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
|
||||
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
|
||||
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
|
||||
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
|
||||
void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa);
|
||||
void *addr_gva2hva(struct kvm_vm *vm, gva_t gva);
|
||||
gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
|
||||
void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa);
|
||||
|
||||
#ifndef vcpu_arch_put_guest
|
||||
#define vcpu_arch_put_guest(mem, val) do { (mem) = (val); } while (0)
|
||||
#endif
|
||||
|
||||
static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
static inline gpa_t vm_untag_gpa(struct kvm_vm *vm, gpa_t gpa)
|
||||
{
|
||||
return gpa & ~vm->gpa_tag_mask;
|
||||
}
|
||||
@@ -755,8 +752,8 @@ static inline int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
|
||||
struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
|
||||
uint64_t arg0)
|
||||
static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, u32 cap,
|
||||
u64 arg0)
|
||||
{
|
||||
struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
|
||||
|
||||
@@ -811,31 +808,34 @@ static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
|
||||
}
|
||||
|
||||
static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
|
||||
static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id, void *addr)
|
||||
{
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (u64)addr };
|
||||
|
||||
return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
|
||||
}
|
||||
static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
|
||||
|
||||
static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
|
||||
{
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
|
||||
|
||||
return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
|
||||
}
|
||||
static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
|
||||
|
||||
static inline u64 vcpu_get_reg(struct kvm_vcpu *vcpu, u64 id)
|
||||
{
|
||||
uint64_t val;
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
|
||||
u64 val;
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
|
||||
|
||||
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
|
||||
|
||||
vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
|
||||
return val;
|
||||
}
|
||||
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
|
||||
|
||||
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
|
||||
{
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
|
||||
struct kvm_one_reg reg = { .id = id, .addr = (u64)&val };
|
||||
|
||||
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
|
||||
|
||||
@@ -880,75 +880,75 @@ static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
|
||||
return fd;
|
||||
}
|
||||
|
||||
int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
|
||||
int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr);
|
||||
|
||||
static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
|
||||
static inline void kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
|
||||
{
|
||||
int ret = __kvm_has_device_attr(dev_fd, group, attr);
|
||||
|
||||
TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
|
||||
}
|
||||
|
||||
int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
|
||||
int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val);
|
||||
|
||||
static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
|
||||
uint64_t attr, void *val)
|
||||
static inline void kvm_device_attr_get(int dev_fd, u32 group,
|
||||
u64 attr, void *val)
|
||||
{
|
||||
int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
|
||||
|
||||
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
|
||||
}
|
||||
|
||||
int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
|
||||
int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val);
|
||||
|
||||
static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
|
||||
uint64_t attr, void *val)
|
||||
static inline void kvm_device_attr_set(int dev_fd, u32 group,
|
||||
u64 attr, void *val)
|
||||
{
|
||||
int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
|
||||
|
||||
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
|
||||
}
|
||||
|
||||
static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
|
||||
uint64_t attr)
|
||||
static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
|
||||
u64 attr)
|
||||
{
|
||||
return __kvm_has_device_attr(vcpu->fd, group, attr);
|
||||
}
|
||||
|
||||
static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
|
||||
uint64_t attr)
|
||||
static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, u32 group,
|
||||
u64 attr)
|
||||
{
|
||||
kvm_has_device_attr(vcpu->fd, group, attr);
|
||||
}
|
||||
|
||||
static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
|
||||
uint64_t attr, void *val)
|
||||
static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
|
||||
u64 attr, void *val)
|
||||
{
|
||||
return __kvm_device_attr_get(vcpu->fd, group, attr, val);
|
||||
}
|
||||
|
||||
static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
|
||||
uint64_t attr, void *val)
|
||||
static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, u32 group,
|
||||
u64 attr, void *val)
|
||||
{
|
||||
kvm_device_attr_get(vcpu->fd, group, attr, val);
|
||||
}
|
||||
|
||||
static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
|
||||
uint64_t attr, void *val)
|
||||
static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
|
||||
u64 attr, void *val)
|
||||
{
|
||||
return __kvm_device_attr_set(vcpu->fd, group, attr, val);
|
||||
}
|
||||
|
||||
static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
|
||||
uint64_t attr, void *val)
|
||||
static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, u32 group,
|
||||
u64 attr, void *val)
|
||||
{
|
||||
kvm_device_attr_set(vcpu->fd, group, attr, val);
|
||||
}
|
||||
|
||||
int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
|
||||
int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
|
||||
int __kvm_test_create_device(struct kvm_vm *vm, u64 type);
|
||||
int __kvm_create_device(struct kvm_vm *vm, u64 type);
|
||||
|
||||
static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
|
||||
static inline int kvm_create_device(struct kvm_vm *vm, u64 type)
|
||||
{
|
||||
int fd = __kvm_create_device(vm, type);
|
||||
|
||||
@@ -964,7 +964,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
|
||||
* Input Args:
|
||||
* vcpu - vCPU
|
||||
* num - number of arguments
|
||||
* ... - arguments, each of type uint64_t
|
||||
* ... - arguments, each of type u64
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
@@ -972,40 +972,38 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
|
||||
*
|
||||
* Sets the first @num input parameters for the function at @vcpu's entry point,
|
||||
* per the C calling convention of the architecture, to the values given as
|
||||
* variable args. Each of the variable args is expected to be of type uint64_t.
|
||||
* variable args. Each of the variable args is expected to be of type u64.
|
||||
* The maximum @num can be is specific to the architecture.
|
||||
*/
|
||||
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
|
||||
|
||||
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
|
||||
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
|
||||
void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
|
||||
int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level);
|
||||
|
||||
#define KVM_MAX_IRQ_ROUTES 4096
|
||||
|
||||
struct kvm_irq_routing *kvm_gsi_routing_create(void);
|
||||
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
|
||||
uint32_t gsi, uint32_t pin);
|
||||
u32 gsi, u32 pin);
|
||||
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
|
||||
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
|
||||
|
||||
const char *exit_reason_str(unsigned int exit_reason);
|
||||
|
||||
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
|
||||
uint32_t memslot);
|
||||
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot,
|
||||
bool protected);
|
||||
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
|
||||
gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot);
|
||||
gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, gpa_t min_gpa,
|
||||
u32 memslot, bool protected);
|
||||
gpa_t vm_alloc_page_table(struct kvm_vm *vm);
|
||||
|
||||
static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot)
|
||||
static inline gpa_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
gpa_t min_gpa, u32 memslot)
|
||||
{
|
||||
/*
|
||||
* By default, allocate memory as protected for VMs that support
|
||||
* protected memory, as the majority of memory for such VMs is
|
||||
* protected, i.e. using shared memory is effectively opt-in.
|
||||
*/
|
||||
return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
|
||||
return __vm_phy_pages_alloc(vm, num, min_gpa, memslot,
|
||||
vm_arch_has_protected_memory(vm));
|
||||
}
|
||||
|
||||
@@ -1016,8 +1014,8 @@ static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
* calculate the amount of memory needed for per-vCPU data, e.g. stacks.
|
||||
*/
|
||||
struct kvm_vm *____vm_create(struct vm_shape shape);
|
||||
struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
|
||||
uint64_t nr_extra_pages);
|
||||
struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
|
||||
u64 nr_extra_pages);
|
||||
|
||||
static inline struct kvm_vm *vm_create_barebones(void)
|
||||
{
|
||||
@@ -1034,16 +1032,16 @@ static inline struct kvm_vm *vm_create_barebones_type(unsigned long type)
|
||||
return ____vm_create(shape);
|
||||
}
|
||||
|
||||
static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
|
||||
static inline struct kvm_vm *vm_create(u32 nr_runnable_vcpus)
|
||||
{
|
||||
return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
|
||||
}
|
||||
|
||||
struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
|
||||
uint64_t extra_mem_pages,
|
||||
struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
|
||||
u64 extra_mem_pages,
|
||||
void *guest_code, struct kvm_vcpu *vcpus[]);
|
||||
|
||||
static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
|
||||
static inline struct kvm_vm *vm_create_with_vcpus(u32 nr_vcpus,
|
||||
void *guest_code,
|
||||
struct kvm_vcpu *vcpus[])
|
||||
{
|
||||
@@ -1054,7 +1052,7 @@ static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
|
||||
|
||||
struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
|
||||
struct kvm_vcpu **vcpu,
|
||||
uint64_t extra_mem_pages,
|
||||
u64 extra_mem_pages,
|
||||
void *guest_code);
|
||||
|
||||
/*
|
||||
@@ -1062,7 +1060,7 @@ struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
|
||||
* additional pages of guest memory. Returns the VM and vCPU (via out param).
|
||||
*/
|
||||
static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
|
||||
uint64_t extra_mem_pages,
|
||||
u64 extra_mem_pages,
|
||||
void *guest_code)
|
||||
{
|
||||
return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
|
||||
@@ -1084,7 +1082,7 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape
|
||||
|
||||
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
|
||||
|
||||
void kvm_set_files_rlimit(uint32_t nr_vcpus);
|
||||
void kvm_set_files_rlimit(u32 nr_vcpus);
|
||||
|
||||
int __pin_task_to_cpu(pthread_t task, int cpu);
|
||||
|
||||
@@ -1115,7 +1113,7 @@ static inline int pin_self_to_any_cpu(void)
|
||||
}
|
||||
|
||||
void kvm_print_vcpu_pinning_help(void);
|
||||
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
|
||||
void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
|
||||
int nr_vcpus);
|
||||
|
||||
unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
|
||||
@@ -1131,12 +1129,12 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
|
||||
}
|
||||
|
||||
#define sync_global_to_guest(vm, g) ({ \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
|
||||
memcpy(_p, &(g), sizeof(g)); \
|
||||
})
|
||||
|
||||
#define sync_global_from_guest(vm, g) ({ \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
|
||||
memcpy(&(g), _p, sizeof(g)); \
|
||||
})
|
||||
|
||||
@@ -1147,7 +1145,7 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
|
||||
* undesirable to change the host's copy of the global.
|
||||
*/
|
||||
#define write_guest_global(vm, g, val) ({ \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
|
||||
typeof(g) *_p = addr_gva2hva(vm, (gva_t)&(g)); \
|
||||
typeof(g) _val = val; \
|
||||
\
|
||||
memcpy(_p, &(_val), sizeof(g)); \
|
||||
@@ -1156,10 +1154,10 @@ vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
|
||||
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
|
||||
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
|
||||
uint8_t indent);
|
||||
u8 indent);
|
||||
|
||||
static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
|
||||
uint8_t indent)
|
||||
u8 indent)
|
||||
{
|
||||
vcpu_arch_dump(stream, vcpu, indent);
|
||||
}
|
||||
@@ -1171,10 +1169,10 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
|
||||
* vm - Virtual Machine
|
||||
* vcpu_id - The id of the VCPU to add to the VM.
|
||||
*/
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id);
|
||||
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
|
||||
|
||||
static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
|
||||
void *guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
|
||||
@@ -1185,10 +1183,10 @@ static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
}
|
||||
|
||||
/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
|
||||
struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
|
||||
struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id);
|
||||
|
||||
static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
|
||||
uint32_t vcpu_id)
|
||||
u32 vcpu_id)
|
||||
{
|
||||
return vm_arch_vcpu_recreate(vm, vcpu_id);
|
||||
}
|
||||
@@ -1203,27 +1201,15 @@ static inline void virt_pgd_alloc(struct kvm_vm *vm)
|
||||
}
|
||||
|
||||
/*
|
||||
* VM Virtual Page Map
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vaddr - VM Virtual Address
|
||||
* paddr - VM Physical Address
|
||||
* memslot - Memory region slot for new virtual translation tables
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: None
|
||||
*
|
||||
* Within @vm, creates a virtual translation for the page starting
|
||||
* at @vaddr to the page starting at @paddr.
|
||||
* at @gva to the page starting at @gpa.
|
||||
*/
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa);
|
||||
|
||||
static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
static inline void virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
|
||||
{
|
||||
virt_arch_pg_map(vm, vaddr, paddr);
|
||||
sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
|
||||
virt_arch_pg_map(vm, gva, gpa);
|
||||
sparsebit_set(vm->vpages_mapped, gva >> vm->page_shift);
|
||||
}
|
||||
|
||||
|
||||
@@ -1242,9 +1228,9 @@ static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr
|
||||
* Returns the VM physical address of the translated VM virtual
|
||||
* address given by @gva.
|
||||
*/
|
||||
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
|
||||
gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva);
|
||||
|
||||
static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
static inline gpa_t addr_gva2gpa(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
return addr_arch_gva2gpa(vm, gva);
|
||||
}
|
||||
@@ -1264,9 +1250,9 @@ static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
* Dumps to the FILE stream given by @stream, the contents of all the
|
||||
* virtual translation tables for the VM given by @vm.
|
||||
*/
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent);
|
||||
|
||||
static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
static inline void virt_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
|
||||
{
|
||||
virt_arch_dump(stream, vm, indent);
|
||||
}
|
||||
@@ -1277,7 +1263,7 @@ static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
|
||||
return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
|
||||
}
|
||||
|
||||
static inline uint64_t vm_page_align(struct kvm_vm *vm, uint64_t v)
|
||||
static inline u64 vm_page_align(struct kvm_vm *vm, u64 v)
|
||||
{
|
||||
return (v + vm->page_size - 1) & ~(vm->page_size - 1);
|
||||
}
|
||||
@@ -1293,9 +1279,9 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm, unsigned int nr_vcpus);
|
||||
void kvm_arch_vm_finalize_vcpus(struct kvm_vm *vm);
|
||||
void kvm_arch_vm_release(struct kvm_vm *vm);
|
||||
|
||||
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
|
||||
bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa);
|
||||
|
||||
uint32_t guest_get_vcpuid(void);
|
||||
u32 guest_get_vcpuid(void);
|
||||
|
||||
bool kvm_arch_has_default_irqchip(void);
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#ifndef SELFTEST_KVM_UTIL_TYPES_H
|
||||
#define SELFTEST_KVM_UTIL_TYPES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Provide a version of static_assert() that is guaranteed to have an optional
|
||||
* message param. _GNU_SOURCE is defined for all KVM selftests, _GNU_SOURCE
|
||||
@@ -14,9 +16,9 @@
|
||||
#define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
|
||||
#define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
|
||||
|
||||
typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
|
||||
typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
|
||||
typedef u64 gpa_t; /* Virtual Machine (Guest) physical address */
|
||||
typedef u64 gva_t; /* Virtual Machine (Guest) virtual address */
|
||||
|
||||
#define INVALID_GPA (~(uint64_t)0)
|
||||
#define INVALID_GPA (~(u64)0)
|
||||
|
||||
#endif /* SELFTEST_KVM_UTIL_TYPES_H */
|
||||
|
||||
@@ -70,9 +70,9 @@ static inline void timer_set_next_cmp_ms(unsigned int msec, bool period)
|
||||
csr_write(val, LOONGARCH_CSR_TCFG);
|
||||
}
|
||||
|
||||
static inline void __delay(uint64_t cycles)
|
||||
static inline void __delay(u64 cycles)
|
||||
{
|
||||
uint64_t start = timer_get_cycles();
|
||||
u64 start = timer_get_cycles();
|
||||
|
||||
while ((timer_get_cycles() - start) < cycles)
|
||||
cpu_relax();
|
||||
|
||||
@@ -10,9 +10,9 @@
|
||||
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
|
||||
* VM), it must not be accessed from host code.
|
||||
*/
|
||||
extern vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
extern gva_t *ucall_exit_mmio_addr;
|
||||
|
||||
static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
static inline void ucall_arch_do_ucall(gva_t uc)
|
||||
{
|
||||
WRITE_ONCE(*ucall_exit_mmio_addr, uc);
|
||||
}
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
#define MEMSTRESS_MEM_SLOT_INDEX 1
|
||||
|
||||
struct memstress_vcpu_args {
|
||||
uint64_t gpa;
|
||||
uint64_t gva;
|
||||
uint64_t pages;
|
||||
gpa_t gpa;
|
||||
gva_t gva;
|
||||
u64 pages;
|
||||
|
||||
/* Only used by the host userspace part of the vCPU thread */
|
||||
struct kvm_vcpu *vcpu;
|
||||
@@ -32,11 +32,11 @@ struct memstress_vcpu_args {
|
||||
struct memstress_args {
|
||||
struct kvm_vm *vm;
|
||||
/* The starting address and size of the guest test region. */
|
||||
uint64_t gpa;
|
||||
uint64_t size;
|
||||
uint64_t guest_page_size;
|
||||
uint32_t random_seed;
|
||||
uint32_t write_percent;
|
||||
gpa_t gpa;
|
||||
u64 size;
|
||||
u64 guest_page_size;
|
||||
u32 random_seed;
|
||||
u32 write_percent;
|
||||
|
||||
/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
|
||||
bool nested;
|
||||
@@ -45,7 +45,7 @@ struct memstress_args {
|
||||
/* True if all vCPUs are pinned to pCPUs */
|
||||
bool pin_vcpus;
|
||||
/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
|
||||
uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];
|
||||
u32 vcpu_to_pcpu[KVM_MAX_VCPUS];
|
||||
|
||||
/* Test is done, stop running vCPUs. */
|
||||
bool stop_vcpus;
|
||||
@@ -56,27 +56,27 @@ struct memstress_args {
|
||||
extern struct memstress_args memstress_args;
|
||||
|
||||
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
|
||||
uint64_t vcpu_memory_bytes, int slots,
|
||||
u64 vcpu_memory_bytes, int slots,
|
||||
enum vm_mem_backing_src_type backing_src,
|
||||
bool partition_vcpu_memory_access);
|
||||
void memstress_destroy_vm(struct kvm_vm *vm);
|
||||
|
||||
void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
|
||||
void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent);
|
||||
void memstress_set_random_access(struct kvm_vm *vm, bool random_access);
|
||||
|
||||
void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
|
||||
void memstress_join_vcpu_threads(int vcpus);
|
||||
void memstress_guest_code(uint32_t vcpu_id);
|
||||
void memstress_guest_code(u32 vcpu_id);
|
||||
|
||||
uint64_t memstress_nested_pages(int nr_vcpus);
|
||||
u64 memstress_nested_pages(int nr_vcpus);
|
||||
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
|
||||
|
||||
void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots);
|
||||
void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots);
|
||||
void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots);
|
||||
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
|
||||
int slots, uint64_t pages_per_slot);
|
||||
unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot);
|
||||
int slots, u64 pages_per_slot);
|
||||
unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot);
|
||||
void memstress_free_bitmaps(unsigned long *bitmaps[], int slots);
|
||||
|
||||
#endif /* SELFTEST_KVM_MEMSTRESS_H */
|
||||
|
||||
@@ -14,25 +14,25 @@
|
||||
static unsigned long timer_freq;
|
||||
|
||||
#define msec_to_cycles(msec) \
|
||||
((timer_freq) * (uint64_t)(msec) / 1000)
|
||||
((timer_freq) * (u64)(msec) / 1000)
|
||||
|
||||
#define usec_to_cycles(usec) \
|
||||
((timer_freq) * (uint64_t)(usec) / 1000000)
|
||||
((timer_freq) * (u64)(usec) / 1000000)
|
||||
|
||||
#define cycles_to_usec(cycles) \
|
||||
((uint64_t)(cycles) * 1000000 / (timer_freq))
|
||||
((u64)(cycles) * 1000000 / (timer_freq))
|
||||
|
||||
static inline uint64_t timer_get_cycles(void)
|
||||
static inline u64 timer_get_cycles(void)
|
||||
{
|
||||
return csr_read(CSR_TIME);
|
||||
}
|
||||
|
||||
static inline void timer_set_cmp(uint64_t cval)
|
||||
static inline void timer_set_cmp(u64 cval)
|
||||
{
|
||||
csr_write(CSR_STIMECMP, cval);
|
||||
}
|
||||
|
||||
static inline uint64_t timer_get_cmp(void)
|
||||
static inline u64 timer_get_cmp(void)
|
||||
{
|
||||
return csr_read(CSR_STIMECMP);
|
||||
}
|
||||
@@ -47,17 +47,17 @@ static inline void timer_irq_disable(void)
|
||||
csr_clear(CSR_SIE, IE_TIE);
|
||||
}
|
||||
|
||||
static inline void timer_set_next_cmp_ms(uint32_t msec)
|
||||
static inline void timer_set_next_cmp_ms(u32 msec)
|
||||
{
|
||||
uint64_t now_ct = timer_get_cycles();
|
||||
uint64_t next_ct = now_ct + msec_to_cycles(msec);
|
||||
u64 now_ct = timer_get_cycles();
|
||||
u64 next_ct = now_ct + msec_to_cycles(msec);
|
||||
|
||||
timer_set_cmp(next_ct);
|
||||
}
|
||||
|
||||
static inline void __delay(uint64_t cycles)
|
||||
static inline void __delay(u64 cycles)
|
||||
{
|
||||
uint64_t start = timer_get_cycles();
|
||||
u64 start = timer_get_cycles();
|
||||
|
||||
while ((timer_get_cycles() - start) < cycles)
|
||||
cpu_relax();
|
||||
|
||||
@@ -25,8 +25,7 @@
|
||||
#define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3)
|
||||
#define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT)
|
||||
|
||||
static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
|
||||
uint64_t idx, uint64_t size)
|
||||
static inline u64 __kvm_reg_id(u64 type, u64 subtype, u64 idx, u64 size)
|
||||
{
|
||||
return KVM_REG_RISCV | type | subtype | idx | size;
|
||||
}
|
||||
@@ -62,14 +61,14 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype,
|
||||
KVM_REG_RISCV_SBI_SINGLE, \
|
||||
idx, KVM_REG_SIZE_ULONG)
|
||||
|
||||
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext);
|
||||
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext);
|
||||
|
||||
static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext)
|
||||
static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, u64 isa_ext)
|
||||
{
|
||||
return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext));
|
||||
}
|
||||
|
||||
static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext)
|
||||
static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, u64 sbi_ext)
|
||||
{
|
||||
return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext));
|
||||
}
|
||||
|
||||
@@ -7,11 +7,11 @@
|
||||
|
||||
#define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI
|
||||
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
static inline void ucall_arch_do_ucall(gva_t uc)
|
||||
{
|
||||
sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
|
||||
KVM_RISCV_SELFTESTS_SBI_UCALL,
|
||||
|
||||
@@ -8,6 +8,6 @@
|
||||
#ifndef SELFTEST_KVM_DIAG318_TEST_HANDLER
|
||||
#define SELFTEST_KVM_DIAG318_TEST_HANDLER
|
||||
|
||||
uint64_t get_diag318_info(void);
|
||||
u64 get_diag318_info(void);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
/* alt_stfle_fac_list[16] + stfle_fac_list[16] */
|
||||
#define NB_STFL_DOUBLEWORDS 32
|
||||
|
||||
extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
|
||||
extern u64 stfl_doublewords[NB_STFL_DOUBLEWORDS];
|
||||
extern bool stfle_flag;
|
||||
|
||||
static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr)
|
||||
@@ -24,7 +24,7 @@ static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr)
|
||||
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
|
||||
}
|
||||
|
||||
static inline void stfle(uint64_t *fac, unsigned int nb_doublewords)
|
||||
static inline void stfle(u64 *fac, unsigned int nb_doublewords)
|
||||
{
|
||||
register unsigned long r0 asm("0") = nb_doublewords - 1;
|
||||
|
||||
|
||||
@@ -6,11 +6,11 @@
|
||||
|
||||
#define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC
|
||||
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
|
||||
static inline void ucall_arch_do_ucall(gva_t uc)
|
||||
{
|
||||
/* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
|
||||
asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
*
|
||||
* Header file that describes API to the sparsebit library.
|
||||
* This library provides a memory efficient means of storing
|
||||
* the settings of bits indexed via a uint64_t. Memory usage
|
||||
* the settings of bits indexed via a u64. Memory usage
|
||||
* is reasonable, significantly less than (2^64 / 8) bytes, as
|
||||
* long as bits that are mostly set or mostly cleared are close
|
||||
* to each other. This library is efficient in memory usage
|
||||
@@ -25,8 +25,8 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
struct sparsebit;
|
||||
typedef uint64_t sparsebit_idx_t;
|
||||
typedef uint64_t sparsebit_num_t;
|
||||
typedef u64 sparsebit_idx_t;
|
||||
typedef u64 sparsebit_num_t;
|
||||
|
||||
struct sparsebit *sparsebit_alloc(void);
|
||||
void sparsebit_free(struct sparsebit **sbitp);
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
#include <sys/mman.h>
|
||||
#include "kselftest.h"
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define msecs_to_usecs(msec) ((msec) * 1000ULL)
|
||||
|
||||
static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; }
|
||||
@@ -99,25 +101,25 @@ do { \
|
||||
|
||||
size_t parse_size(const char *size);
|
||||
|
||||
int64_t timespec_to_ns(struct timespec ts);
|
||||
struct timespec timespec_add_ns(struct timespec ts, int64_t ns);
|
||||
s64 timespec_to_ns(struct timespec ts);
|
||||
struct timespec timespec_add_ns(struct timespec ts, s64 ns);
|
||||
struct timespec timespec_add(struct timespec ts1, struct timespec ts2);
|
||||
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2);
|
||||
struct timespec timespec_elapsed(struct timespec start);
|
||||
struct timespec timespec_div(struct timespec ts, int divisor);
|
||||
|
||||
struct guest_random_state {
|
||||
uint32_t seed;
|
||||
u32 seed;
|
||||
};
|
||||
|
||||
extern uint32_t guest_random_seed;
|
||||
extern u32 guest_random_seed;
|
||||
extern struct guest_random_state guest_rng;
|
||||
|
||||
struct guest_random_state new_guest_random_state(uint32_t seed);
|
||||
uint32_t guest_random_u32(struct guest_random_state *state);
|
||||
struct guest_random_state new_guest_random_state(u32 seed);
|
||||
u32 guest_random_u32(struct guest_random_state *state);
|
||||
|
||||
static inline bool __guest_random_bool(struct guest_random_state *state,
|
||||
uint8_t percent)
|
||||
u8 percent)
|
||||
{
|
||||
return (guest_random_u32(state) % 100) < percent;
|
||||
}
|
||||
@@ -127,9 +129,9 @@ static inline bool guest_random_bool(struct guest_random_state *state)
|
||||
return __guest_random_bool(state, 50);
|
||||
}
|
||||
|
||||
static inline uint64_t guest_random_u64(struct guest_random_state *state)
|
||||
static inline u64 guest_random_u64(struct guest_random_state *state)
|
||||
{
|
||||
return ((uint64_t)guest_random_u32(state) << 32) | guest_random_u32(state);
|
||||
return ((u64)guest_random_u32(state) << 32) | guest_random_u32(state);
|
||||
}
|
||||
|
||||
enum vm_mem_backing_src_type {
|
||||
@@ -158,7 +160,7 @@ enum vm_mem_backing_src_type {
|
||||
|
||||
struct vm_mem_backing_src_alias {
|
||||
const char *name;
|
||||
uint32_t flag;
|
||||
u32 flag;
|
||||
};
|
||||
|
||||
#define MIN_RUN_DELAY_NS 200000UL
|
||||
@@ -166,9 +168,9 @@ struct vm_mem_backing_src_alias {
|
||||
bool thp_configured(void);
|
||||
size_t get_trans_hugepagesz(void);
|
||||
size_t get_def_hugetlb_pagesz(void);
|
||||
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
|
||||
size_t get_backing_src_pagesz(uint32_t i);
|
||||
bool is_backing_src_hugetlb(uint32_t i);
|
||||
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i);
|
||||
size_t get_backing_src_pagesz(u32 i);
|
||||
bool is_backing_src_hugetlb(u32 i);
|
||||
void backing_src_help(const char *flag);
|
||||
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
|
||||
long get_run_delay(void);
|
||||
@@ -189,18 +191,18 @@ static inline bool backing_src_can_be_huge(enum vm_mem_backing_src_type t)
|
||||
}
|
||||
|
||||
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
|
||||
static inline uint64_t align_up(uint64_t x, uint64_t size)
|
||||
static inline u64 align_up(u64 x, u64 size)
|
||||
{
|
||||
uint64_t mask = size - 1;
|
||||
u64 mask = size - 1;
|
||||
|
||||
TEST_ASSERT(size != 0 && !(size & (size - 1)),
|
||||
"size not a power of 2: %lu", size);
|
||||
return ((x + mask) & ~mask);
|
||||
}
|
||||
|
||||
static inline uint64_t align_down(uint64_t x, uint64_t size)
|
||||
static inline u64 align_down(u64 x, u64 size)
|
||||
{
|
||||
uint64_t x_aligned_up = align_up(x, size);
|
||||
u64 x_aligned_up = align_up(x, size);
|
||||
|
||||
if (x == x_aligned_up)
|
||||
return x;
|
||||
@@ -215,7 +217,7 @@ static inline void *align_ptr_up(void *x, size_t size)
|
||||
|
||||
int atoi_paranoid(const char *num_str);
|
||||
|
||||
static inline uint32_t atoi_positive(const char *name, const char *num_str)
|
||||
static inline u32 atoi_positive(const char *name, const char *num_str)
|
||||
{
|
||||
int num = atoi_paranoid(num_str);
|
||||
|
||||
@@ -223,7 +225,7 @@ static inline uint32_t atoi_positive(const char *name, const char *num_str)
|
||||
return num;
|
||||
}
|
||||
|
||||
static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
|
||||
static inline u32 atoi_non_negative(const char *name, const char *num_str)
|
||||
{
|
||||
int num = atoi_paranoid(num_str);
|
||||
|
||||
|
||||
@@ -18,21 +18,21 @@
|
||||
|
||||
/* Timer test cmdline parameters */
|
||||
struct test_args {
|
||||
uint32_t nr_vcpus;
|
||||
uint32_t nr_iter;
|
||||
uint32_t timer_period_ms;
|
||||
uint32_t migration_freq_ms;
|
||||
uint32_t timer_err_margin_us;
|
||||
u32 nr_vcpus;
|
||||
u32 nr_iter;
|
||||
u32 timer_period_ms;
|
||||
u32 migration_freq_ms;
|
||||
u32 timer_err_margin_us;
|
||||
/* Members of struct kvm_arm_counter_offset */
|
||||
uint64_t counter_offset;
|
||||
uint64_t reserved;
|
||||
u64 counter_offset;
|
||||
u64 reserved;
|
||||
};
|
||||
|
||||
/* Shared variables between host and guest */
|
||||
struct test_vcpu_shared_data {
|
||||
uint32_t nr_iter;
|
||||
u32 nr_iter;
|
||||
int guest_stage;
|
||||
uint64_t xcnt;
|
||||
u64 xcnt;
|
||||
};
|
||||
|
||||
extern struct test_args test_args;
|
||||
|
||||
@@ -21,26 +21,26 @@ enum {
|
||||
#define UCALL_BUFFER_LEN 1024
|
||||
|
||||
struct ucall {
|
||||
uint64_t cmd;
|
||||
uint64_t args[UCALL_MAX_ARGS];
|
||||
u64 cmd;
|
||||
u64 args[UCALL_MAX_ARGS];
|
||||
char buffer[UCALL_BUFFER_LEN];
|
||||
|
||||
/* Host virtual address of this struct. */
|
||||
struct ucall *hva;
|
||||
};
|
||||
|
||||
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
|
||||
void ucall_arch_do_ucall(vm_vaddr_t uc);
|
||||
void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa);
|
||||
void ucall_arch_do_ucall(gva_t uc);
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
|
||||
|
||||
void ucall(uint64_t cmd, int nargs, ...);
|
||||
__printf(2, 3) void ucall_fmt(uint64_t cmd, const char *fmt, ...);
|
||||
__printf(5, 6) void ucall_assert(uint64_t cmd, const char *exp,
|
||||
void ucall(u64 cmd, int nargs, ...);
|
||||
__printf(2, 3) void ucall_fmt(u64 cmd, const char *fmt, ...);
|
||||
__printf(5, 6) void ucall_assert(u64 cmd, const char *exp,
|
||||
const char *file, unsigned int line,
|
||||
const char *fmt, ...);
|
||||
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
|
||||
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
|
||||
int ucall_nr_pages_required(uint64_t page_size);
|
||||
u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
|
||||
void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa);
|
||||
int ucall_nr_pages_required(u64 page_size);
|
||||
|
||||
/*
|
||||
* Perform userspace call without any associated data. This bare call avoids
|
||||
@@ -48,7 +48,7 @@ int ucall_nr_pages_required(uint64_t page_size);
|
||||
* the full ucall() are problematic and/or unwanted. Note, this will come out
|
||||
* as UCALL_NONE on the backend.
|
||||
*/
|
||||
#define GUEST_UCALL_NONE() ucall_arch_do_ucall((vm_vaddr_t)NULL)
|
||||
#define GUEST_UCALL_NONE() ucall_arch_do_ucall((gva_t)NULL)
|
||||
|
||||
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
|
||||
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
|
||||
|
||||
@@ -25,7 +25,7 @@ struct uffd_reader_args {
|
||||
|
||||
struct uffd_desc {
|
||||
int uffd;
|
||||
uint64_t num_readers;
|
||||
u64 num_readers;
|
||||
/* Holds the write ends of the pipes for killing the readers. */
|
||||
int *pipefds;
|
||||
pthread_t *readers;
|
||||
@@ -33,8 +33,8 @@ struct uffd_desc {
|
||||
};
|
||||
|
||||
struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
|
||||
void *hva, uint64_t len,
|
||||
uint64_t num_readers,
|
||||
void *hva, u64 len,
|
||||
u64 num_readers,
|
||||
uffd_handler_t handler);
|
||||
|
||||
void uffd_stop_demand_paging(struct uffd_desc *uffd);
|
||||
|
||||
@@ -79,42 +79,42 @@ void apic_disable(void);
|
||||
void xapic_enable(void);
|
||||
void x2apic_enable(void);
|
||||
|
||||
static inline uint32_t get_bsp_flag(void)
|
||||
static inline u32 get_bsp_flag(void)
|
||||
{
|
||||
return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP;
|
||||
}
|
||||
|
||||
static inline uint32_t xapic_read_reg(unsigned int reg)
|
||||
static inline u32 xapic_read_reg(unsigned int reg)
|
||||
{
|
||||
return ((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2];
|
||||
return ((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2];
|
||||
}
|
||||
|
||||
static inline void xapic_write_reg(unsigned int reg, uint32_t val)
|
||||
static inline void xapic_write_reg(unsigned int reg, u32 val)
|
||||
{
|
||||
((volatile uint32_t *)APIC_DEFAULT_GPA)[reg >> 2] = val;
|
||||
((volatile u32 *)APIC_DEFAULT_GPA)[reg >> 2] = val;
|
||||
}
|
||||
|
||||
static inline uint64_t x2apic_read_reg(unsigned int reg)
|
||||
static inline u64 x2apic_read_reg(unsigned int reg)
|
||||
{
|
||||
return rdmsr(APIC_BASE_MSR + (reg >> 4));
|
||||
}
|
||||
|
||||
static inline uint8_t x2apic_write_reg_safe(unsigned int reg, uint64_t value)
|
||||
static inline u8 x2apic_write_reg_safe(unsigned int reg, u64 value)
|
||||
{
|
||||
return wrmsr_safe(APIC_BASE_MSR + (reg >> 4), value);
|
||||
}
|
||||
|
||||
static inline void x2apic_write_reg(unsigned int reg, uint64_t value)
|
||||
static inline void x2apic_write_reg(unsigned int reg, u64 value)
|
||||
{
|
||||
uint8_t fault = x2apic_write_reg_safe(reg, value);
|
||||
u8 fault = x2apic_write_reg_safe(reg, value);
|
||||
|
||||
__GUEST_ASSERT(!fault, "Unexpected fault 0x%x on WRMSR(%x) = %lx\n",
|
||||
fault, APIC_BASE_MSR + (reg >> 4), value);
|
||||
}
|
||||
|
||||
static inline void x2apic_write_reg_fault(unsigned int reg, uint64_t value)
|
||||
static inline void x2apic_write_reg_fault(unsigned int reg, u64 value)
|
||||
{
|
||||
uint8_t fault = x2apic_write_reg_safe(reg, value);
|
||||
u8 fault = x2apic_write_reg_safe(reg, value);
|
||||
|
||||
__GUEST_ASSERT(fault == GP_VECTOR,
|
||||
"Wanted #GP on WRMSR(%x) = %lx, got 0x%x\n",
|
||||
|
||||
@@ -10,9 +10,9 @@
|
||||
#include "hyperv.h"
|
||||
#include "vmx.h"
|
||||
|
||||
#define u16 uint16_t
|
||||
#define u32 uint32_t
|
||||
#define u64 uint64_t
|
||||
#define u16 u16
|
||||
#define u32 u32
|
||||
#define u64 u64
|
||||
|
||||
#define EVMCS_VERSION 1
|
||||
|
||||
@@ -245,7 +245,7 @@ static inline void evmcs_enable(void)
|
||||
enable_evmcs = true;
|
||||
}
|
||||
|
||||
static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
|
||||
static inline int evmcs_vmptrld(u64 vmcs_pa, void *vmcs)
|
||||
{
|
||||
current_vp_assist->current_nested_vmcs = vmcs_pa;
|
||||
current_vp_assist->enlighten_vmentry = 1;
|
||||
@@ -265,7 +265,7 @@ static inline bool load_evmcs(struct hyperv_test_pages *hv)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int evmcs_vmptrst(uint64_t *value)
|
||||
static inline int evmcs_vmptrst(u64 *value)
|
||||
{
|
||||
*value = current_vp_assist->current_nested_vmcs &
|
||||
~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
|
||||
@@ -273,7 +273,7 @@ static inline int evmcs_vmptrst(uint64_t *value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
|
||||
static inline int evmcs_vmread(u64 encoding, u64 *value)
|
||||
{
|
||||
switch (encoding) {
|
||||
case GUEST_RIP:
|
||||
@@ -672,7 +672,7 @@ static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value)
|
||||
static inline int evmcs_vmwrite(u64 encoding, u64 value)
|
||||
{
|
||||
switch (encoding) {
|
||||
case GUEST_RIP:
|
||||
@@ -1226,9 +1226,9 @@ static inline int evmcs_vmlaunch(void)
|
||||
"pop %%rbp;"
|
||||
: [ret]"=&a"(ret)
|
||||
: [host_rsp]"r"
|
||||
((uint64_t)¤t_evmcs->host_rsp),
|
||||
((u64)¤t_evmcs->host_rsp),
|
||||
[host_rip]"r"
|
||||
((uint64_t)¤t_evmcs->host_rip)
|
||||
((u64)¤t_evmcs->host_rip)
|
||||
: "memory", "cc", "rbx", "r8", "r9", "r10",
|
||||
"r11", "r12", "r13", "r14", "r15");
|
||||
return ret;
|
||||
@@ -1265,9 +1265,9 @@ static inline int evmcs_vmresume(void)
|
||||
"pop %%rbp;"
|
||||
: [ret]"=&a"(ret)
|
||||
: [host_rsp]"r"
|
||||
((uint64_t)¤t_evmcs->host_rsp),
|
||||
((u64)¤t_evmcs->host_rsp),
|
||||
[host_rip]"r"
|
||||
((uint64_t)¤t_evmcs->host_rip)
|
||||
((u64)¤t_evmcs->host_rip)
|
||||
: "memory", "cc", "rbx", "r8", "r9", "r10",
|
||||
"r11", "r12", "r13", "r14", "r15");
|
||||
return ret;
|
||||
|
||||
@@ -254,12 +254,12 @@
|
||||
* Issue a Hyper-V hypercall. Returns exception vector raised or 0, 'hv_status'
|
||||
* is set to the hypercall status (if no exception occurred).
|
||||
*/
|
||||
static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address,
|
||||
vm_vaddr_t output_address,
|
||||
uint64_t *hv_status)
|
||||
static inline u8 __hyperv_hypercall(u64 control, gva_t input_address,
|
||||
gva_t output_address,
|
||||
u64 *hv_status)
|
||||
{
|
||||
uint64_t error_code;
|
||||
uint8_t vector;
|
||||
u64 error_code;
|
||||
u8 vector;
|
||||
|
||||
/* Note both the hypercall and the "asm safe" clobber r9-r11. */
|
||||
asm volatile("mov %[output_address], %%r8\n\t"
|
||||
@@ -274,11 +274,11 @@ static inline uint8_t __hyperv_hypercall(u64 control, vm_vaddr_t input_address,
|
||||
}
|
||||
|
||||
/* Issue a Hyper-V hypercall and assert that it succeeded. */
|
||||
static inline void hyperv_hypercall(u64 control, vm_vaddr_t input_address,
|
||||
vm_vaddr_t output_address)
|
||||
static inline void hyperv_hypercall(u64 control, gva_t input_address,
|
||||
gva_t output_address)
|
||||
{
|
||||
uint64_t hv_status;
|
||||
uint8_t vector;
|
||||
u64 hv_status;
|
||||
u8 vector;
|
||||
|
||||
vector = __hyperv_hypercall(control, input_address, output_address, &hv_status);
|
||||
|
||||
@@ -327,27 +327,27 @@ struct hv_vp_assist_page {
|
||||
|
||||
extern struct hv_vp_assist_page *current_vp_assist;
|
||||
|
||||
int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist);
|
||||
int enable_vp_assist(u64 vp_assist_pa, void *vp_assist);
|
||||
|
||||
struct hyperv_test_pages {
|
||||
/* VP assist page */
|
||||
void *vp_assist_hva;
|
||||
uint64_t vp_assist_gpa;
|
||||
u64 vp_assist_gpa;
|
||||
void *vp_assist;
|
||||
|
||||
/* Partition assist page */
|
||||
void *partition_assist_hva;
|
||||
uint64_t partition_assist_gpa;
|
||||
u64 partition_assist_gpa;
|
||||
void *partition_assist;
|
||||
|
||||
/* Enlightened VMCS */
|
||||
void *enlightened_vmcs_hva;
|
||||
uint64_t enlightened_vmcs_gpa;
|
||||
u64 enlightened_vmcs_gpa;
|
||||
void *enlightened_vmcs;
|
||||
};
|
||||
|
||||
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
|
||||
vm_vaddr_t *p_hv_pages_gva);
|
||||
gva_t *p_hv_pages_gva);
|
||||
|
||||
/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
|
||||
#define HV_INVARIANT_TSC_EXPOSED BIT_ULL(0)
|
||||
|
||||
@@ -11,19 +11,19 @@
|
||||
extern bool is_forced_emulation_enabled;
|
||||
|
||||
struct pte_masks {
|
||||
uint64_t present;
|
||||
uint64_t writable;
|
||||
uint64_t user;
|
||||
uint64_t readable;
|
||||
uint64_t executable;
|
||||
uint64_t accessed;
|
||||
uint64_t dirty;
|
||||
uint64_t huge;
|
||||
uint64_t nx;
|
||||
uint64_t c;
|
||||
uint64_t s;
|
||||
u64 present;
|
||||
u64 writable;
|
||||
u64 user;
|
||||
u64 readable;
|
||||
u64 executable;
|
||||
u64 accessed;
|
||||
u64 dirty;
|
||||
u64 huge;
|
||||
u64 nx;
|
||||
u64 c;
|
||||
u64 s;
|
||||
|
||||
uint64_t always_set;
|
||||
u64 always_set;
|
||||
};
|
||||
|
||||
struct kvm_mmu_arch {
|
||||
@@ -33,12 +33,12 @@ struct kvm_mmu_arch {
|
||||
struct kvm_mmu;
|
||||
|
||||
struct kvm_vm_arch {
|
||||
vm_vaddr_t gdt;
|
||||
vm_vaddr_t tss;
|
||||
vm_vaddr_t idt;
|
||||
gva_t gdt;
|
||||
gva_t tss;
|
||||
gva_t idt;
|
||||
|
||||
uint64_t c_bit;
|
||||
uint64_t s_bit;
|
||||
u64 c_bit;
|
||||
u64 s_bit;
|
||||
int sev_fd;
|
||||
bool is_pt_protected;
|
||||
};
|
||||
@@ -62,7 +62,7 @@ do { \
|
||||
: "+m" (mem) \
|
||||
: "r" (val) : "memory"); \
|
||||
} else { \
|
||||
uint64_t __old = READ_ONCE(mem); \
|
||||
u64 __old = READ_ONCE(mem); \
|
||||
\
|
||||
__asm__ __volatile__(KVM_FEP LOCK_PREFIX "cmpxchg %[new], %[ptr]" \
|
||||
: [ptr] "+m" (mem), [old] "+a" (__old) \
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
#define SELFTEST_KVM_PMU_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
|
||||
@@ -104,14 +104,15 @@ enum amd_pmu_zen_events {
|
||||
NR_AMD_ZEN_EVENTS,
|
||||
};
|
||||
|
||||
extern const uint64_t intel_pmu_arch_events[];
|
||||
extern const uint64_t amd_pmu_zen_events[];
|
||||
extern const u64 intel_pmu_arch_events[];
|
||||
extern const u64 amd_pmu_zen_events[];
|
||||
|
||||
enum pmu_errata {
|
||||
INSTRUCTIONS_RETIRED_OVERCOUNT,
|
||||
BRANCHES_RETIRED_OVERCOUNT,
|
||||
};
|
||||
extern uint64_t pmu_errata_mask;
|
||||
|
||||
extern u64 pmu_errata_mask;
|
||||
|
||||
void kvm_init_pmu_errata(void);
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ extern bool host_cpu_is_intel;
|
||||
extern bool host_cpu_is_amd;
|
||||
extern bool host_cpu_is_hygon;
|
||||
extern bool host_cpu_is_amd_compatible;
|
||||
extern uint64_t guest_tsc_khz;
|
||||
extern u64 guest_tsc_khz;
|
||||
|
||||
#ifndef MAX_NR_CPUID_ENTRIES
|
||||
#define MAX_NR_CPUID_ENTRIES 100
|
||||
@@ -399,17 +399,17 @@ struct gpr64_regs {
|
||||
};
|
||||
|
||||
struct desc64 {
|
||||
uint16_t limit0;
|
||||
uint16_t base0;
|
||||
u16 limit0;
|
||||
u16 base0;
|
||||
unsigned base1:8, type:4, s:1, dpl:2, p:1;
|
||||
unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8;
|
||||
uint32_t base3;
|
||||
uint32_t zero1;
|
||||
u32 base3;
|
||||
u32 zero1;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct desc_ptr {
|
||||
uint16_t size;
|
||||
uint64_t address;
|
||||
u16 size;
|
||||
u64 address;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct kvm_x86_state {
|
||||
@@ -427,18 +427,18 @@ struct kvm_x86_state {
|
||||
struct kvm_msrs msrs;
|
||||
};
|
||||
|
||||
static inline uint64_t get_desc64_base(const struct desc64 *desc)
|
||||
static inline u64 get_desc64_base(const struct desc64 *desc)
|
||||
{
|
||||
return (uint64_t)desc->base3 << 32 |
|
||||
(uint64_t)desc->base2 << 24 |
|
||||
(uint64_t)desc->base1 << 16 |
|
||||
(uint64_t)desc->base0;
|
||||
return (u64)desc->base3 << 32 |
|
||||
(u64)desc->base2 << 24 |
|
||||
(u64)desc->base1 << 16 |
|
||||
(u64)desc->base0;
|
||||
}
|
||||
|
||||
static inline uint64_t rdtsc(void)
|
||||
static inline u64 rdtsc(void)
|
||||
{
|
||||
uint32_t eax, edx;
|
||||
uint64_t tsc_val;
|
||||
u32 eax, edx;
|
||||
u64 tsc_val;
|
||||
/*
|
||||
* The lfence is to wait (on Intel CPUs) until all previous
|
||||
* instructions have been executed. If software requires RDTSC to be
|
||||
@@ -446,39 +446,39 @@ static inline uint64_t rdtsc(void)
|
||||
* execute LFENCE immediately after RDTSC
|
||||
*/
|
||||
__asm__ __volatile__("lfence; rdtsc; lfence" : "=a"(eax), "=d"(edx));
|
||||
tsc_val = ((uint64_t)edx) << 32 | eax;
|
||||
tsc_val = ((u64)edx) << 32 | eax;
|
||||
return tsc_val;
|
||||
}
|
||||
|
||||
static inline uint64_t rdtscp(uint32_t *aux)
|
||||
static inline u64 rdtscp(u32 *aux)
|
||||
{
|
||||
uint32_t eax, edx;
|
||||
u32 eax, edx;
|
||||
|
||||
__asm__ __volatile__("rdtscp" : "=a"(eax), "=d"(edx), "=c"(*aux));
|
||||
return ((uint64_t)edx) << 32 | eax;
|
||||
return ((u64)edx) << 32 | eax;
|
||||
}
|
||||
|
||||
static inline uint64_t rdmsr(uint32_t msr)
|
||||
static inline u64 rdmsr(u32 msr)
|
||||
{
|
||||
uint32_t a, d;
|
||||
u32 a, d;
|
||||
|
||||
__asm__ __volatile__("rdmsr" : "=a"(a), "=d"(d) : "c"(msr) : "memory");
|
||||
|
||||
return a | ((uint64_t) d << 32);
|
||||
return a | ((u64)d << 32);
|
||||
}
|
||||
|
||||
static inline void wrmsr(uint32_t msr, uint64_t value)
|
||||
static inline void wrmsr(u32 msr, u64 value)
|
||||
{
|
||||
uint32_t a = value;
|
||||
uint32_t d = value >> 32;
|
||||
u32 a = value;
|
||||
u32 d = value >> 32;
|
||||
|
||||
__asm__ __volatile__("wrmsr" :: "a"(a), "d"(d), "c"(msr) : "memory");
|
||||
}
|
||||
|
||||
|
||||
static inline uint16_t inw(uint16_t port)
|
||||
static inline u16 inw(u16 port)
|
||||
{
|
||||
uint16_t tmp;
|
||||
u16 tmp;
|
||||
|
||||
__asm__ __volatile__("in %%dx, %%ax"
|
||||
: /* output */ "=a" (tmp)
|
||||
@@ -487,120 +487,120 @@ static inline uint16_t inw(uint16_t port)
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static inline uint16_t get_es(void)
|
||||
static inline u16 get_es(void)
|
||||
{
|
||||
uint16_t es;
|
||||
u16 es;
|
||||
|
||||
__asm__ __volatile__("mov %%es, %[es]"
|
||||
: /* output */ [es]"=rm"(es));
|
||||
return es;
|
||||
}
|
||||
|
||||
static inline uint16_t get_cs(void)
|
||||
static inline u16 get_cs(void)
|
||||
{
|
||||
uint16_t cs;
|
||||
u16 cs;
|
||||
|
||||
__asm__ __volatile__("mov %%cs, %[cs]"
|
||||
: /* output */ [cs]"=rm"(cs));
|
||||
return cs;
|
||||
}
|
||||
|
||||
static inline uint16_t get_ss(void)
|
||||
static inline u16 get_ss(void)
|
||||
{
|
||||
uint16_t ss;
|
||||
u16 ss;
|
||||
|
||||
__asm__ __volatile__("mov %%ss, %[ss]"
|
||||
: /* output */ [ss]"=rm"(ss));
|
||||
return ss;
|
||||
}
|
||||
|
||||
static inline uint16_t get_ds(void)
|
||||
static inline u16 get_ds(void)
|
||||
{
|
||||
uint16_t ds;
|
||||
u16 ds;
|
||||
|
||||
__asm__ __volatile__("mov %%ds, %[ds]"
|
||||
: /* output */ [ds]"=rm"(ds));
|
||||
return ds;
|
||||
}
|
||||
|
||||
static inline uint16_t get_fs(void)
|
||||
static inline u16 get_fs(void)
|
||||
{
|
||||
uint16_t fs;
|
||||
u16 fs;
|
||||
|
||||
__asm__ __volatile__("mov %%fs, %[fs]"
|
||||
: /* output */ [fs]"=rm"(fs));
|
||||
return fs;
|
||||
}
|
||||
|
||||
static inline uint16_t get_gs(void)
|
||||
static inline u16 get_gs(void)
|
||||
{
|
||||
uint16_t gs;
|
||||
u16 gs;
|
||||
|
||||
__asm__ __volatile__("mov %%gs, %[gs]"
|
||||
: /* output */ [gs]"=rm"(gs));
|
||||
return gs;
|
||||
}
|
||||
|
||||
static inline uint16_t get_tr(void)
|
||||
static inline u16 get_tr(void)
|
||||
{
|
||||
uint16_t tr;
|
||||
u16 tr;
|
||||
|
||||
__asm__ __volatile__("str %[tr]"
|
||||
: /* output */ [tr]"=rm"(tr));
|
||||
return tr;
|
||||
}
|
||||
|
||||
static inline uint64_t get_cr0(void)
|
||||
static inline u64 get_cr0(void)
|
||||
{
|
||||
uint64_t cr0;
|
||||
u64 cr0;
|
||||
|
||||
__asm__ __volatile__("mov %%cr0, %[cr0]"
|
||||
: /* output */ [cr0]"=r"(cr0));
|
||||
return cr0;
|
||||
}
|
||||
|
||||
static inline void set_cr0(uint64_t val)
|
||||
static inline void set_cr0(u64 val)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline uint64_t get_cr3(void)
|
||||
static inline u64 get_cr3(void)
|
||||
{
|
||||
uint64_t cr3;
|
||||
u64 cr3;
|
||||
|
||||
__asm__ __volatile__("mov %%cr3, %[cr3]"
|
||||
: /* output */ [cr3]"=r"(cr3));
|
||||
return cr3;
|
||||
}
|
||||
|
||||
static inline void set_cr3(uint64_t val)
|
||||
static inline void set_cr3(u64 val)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline uint64_t get_cr4(void)
|
||||
static inline u64 get_cr4(void)
|
||||
{
|
||||
uint64_t cr4;
|
||||
u64 cr4;
|
||||
|
||||
__asm__ __volatile__("mov %%cr4, %[cr4]"
|
||||
: /* output */ [cr4]"=r"(cr4));
|
||||
return cr4;
|
||||
}
|
||||
|
||||
static inline void set_cr4(uint64_t val)
|
||||
static inline void set_cr4(u64 val)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
|
||||
}
|
||||
|
||||
static inline uint64_t get_cr8(void)
|
||||
static inline u64 get_cr8(void)
|
||||
{
|
||||
uint64_t cr8;
|
||||
u64 cr8;
|
||||
|
||||
__asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
|
||||
return cr8;
|
||||
}
|
||||
|
||||
static inline void set_cr8(uint64_t val)
|
||||
static inline void set_cr8(u64 val)
|
||||
{
|
||||
__asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
|
||||
}
|
||||
@@ -651,14 +651,14 @@ static inline struct desc_ptr get_idt(void)
|
||||
return idt;
|
||||
}
|
||||
|
||||
static inline void outl(uint16_t port, uint32_t value)
|
||||
static inline void outl(u16 port, u32 value)
|
||||
{
|
||||
__asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
|
||||
}
|
||||
|
||||
static inline void __cpuid(uint32_t function, uint32_t index,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
static inline void __cpuid(u32 function, u32 index,
|
||||
u32 *eax, u32 *ebx,
|
||||
u32 *ecx, u32 *edx)
|
||||
{
|
||||
*eax = function;
|
||||
*ecx = index;
|
||||
@@ -672,35 +672,35 @@ static inline void __cpuid(uint32_t function, uint32_t index,
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void cpuid(uint32_t function,
|
||||
uint32_t *eax, uint32_t *ebx,
|
||||
uint32_t *ecx, uint32_t *edx)
|
||||
static inline void cpuid(u32 function,
|
||||
u32 *eax, u32 *ebx,
|
||||
u32 *ecx, u32 *edx)
|
||||
{
|
||||
return __cpuid(function, 0, eax, ebx, ecx, edx);
|
||||
}
|
||||
|
||||
static inline uint32_t this_cpu_fms(void)
|
||||
static inline u32 this_cpu_fms(void)
|
||||
{
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
return eax;
|
||||
}
|
||||
|
||||
static inline uint32_t this_cpu_family(void)
|
||||
static inline u32 this_cpu_family(void)
|
||||
{
|
||||
return x86_family(this_cpu_fms());
|
||||
}
|
||||
|
||||
static inline uint32_t this_cpu_model(void)
|
||||
static inline u32 this_cpu_model(void)
|
||||
{
|
||||
return x86_model(this_cpu_fms());
|
||||
}
|
||||
|
||||
static inline bool this_cpu_vendor_string_is(const char *vendor)
|
||||
{
|
||||
const uint32_t *chunk = (const uint32_t *)vendor;
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
const u32 *chunk = (const u32 *)vendor;
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
cpuid(0, &eax, &ebx, &ecx, &edx);
|
||||
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
|
||||
@@ -724,10 +724,9 @@ static inline bool this_cpu_is_hygon(void)
|
||||
return this_cpu_vendor_string_is("HygonGenuine");
|
||||
}
|
||||
|
||||
static inline uint32_t __this_cpu_has(uint32_t function, uint32_t index,
|
||||
uint8_t reg, uint8_t lo, uint8_t hi)
|
||||
static inline u32 __this_cpu_has(u32 function, u32 index, u8 reg, u8 lo, u8 hi)
|
||||
{
|
||||
uint32_t gprs[4];
|
||||
u32 gprs[4];
|
||||
|
||||
__cpuid(function, index,
|
||||
&gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
|
||||
@@ -742,7 +741,7 @@ static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
|
||||
feature.reg, feature.bit, feature.bit);
|
||||
}
|
||||
|
||||
static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
|
||||
static inline u32 this_cpu_property(struct kvm_x86_cpu_property property)
|
||||
{
|
||||
return __this_cpu_has(property.function, property.index,
|
||||
property.reg, property.lo_bit, property.hi_bit);
|
||||
@@ -750,7 +749,7 @@ static inline uint32_t this_cpu_property(struct kvm_x86_cpu_property property)
|
||||
|
||||
static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
|
||||
{
|
||||
uint32_t max_leaf;
|
||||
u32 max_leaf;
|
||||
|
||||
switch (property.function & 0xc0000000) {
|
||||
case 0:
|
||||
@@ -770,7 +769,7 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
|
||||
|
||||
static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
|
||||
{
|
||||
uint32_t nr_bits;
|
||||
u32 nr_bits;
|
||||
|
||||
if (feature.f.reg == KVM_CPUID_EBX) {
|
||||
nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
|
||||
@@ -782,13 +781,13 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
|
||||
return nr_bits > feature.f.bit || this_cpu_has(feature.f);
|
||||
}
|
||||
|
||||
static __always_inline uint64_t this_cpu_supported_xcr0(void)
|
||||
static __always_inline u64 this_cpu_supported_xcr0(void)
|
||||
{
|
||||
if (!this_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
|
||||
return 0;
|
||||
|
||||
return this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
|
||||
((uint64_t)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
|
||||
((u64)this_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
|
||||
}
|
||||
|
||||
typedef u32 __attribute__((vector_size(16))) sse128_t;
|
||||
@@ -867,7 +866,7 @@ static inline void cpu_relax(void)
|
||||
|
||||
static inline void udelay(unsigned long usec)
|
||||
{
|
||||
uint64_t start, now, cycles;
|
||||
u64 start, now, cycles;
|
||||
|
||||
GUEST_ASSERT(guest_tsc_khz);
|
||||
cycles = guest_tsc_khz / 1000 * usec;
|
||||
@@ -898,8 +897,8 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state);
|
||||
|
||||
const struct kvm_msr_list *kvm_get_msr_index_list(void);
|
||||
const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
|
||||
bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
|
||||
uint64_t kvm_get_feature_msr(uint64_t msr_index);
|
||||
bool kvm_msr_is_in_save_restore_list(u32 msr_index);
|
||||
u64 kvm_get_feature_msr(u64 msr_index);
|
||||
|
||||
static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
|
||||
struct kvm_msrs *msrs)
|
||||
@@ -954,20 +953,20 @@ static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
|
||||
}
|
||||
|
||||
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
|
||||
uint32_t function, uint32_t index);
|
||||
u32 function, u32 index);
|
||||
const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
|
||||
|
||||
static inline uint32_t kvm_cpu_fms(void)
|
||||
static inline u32 kvm_cpu_fms(void)
|
||||
{
|
||||
return get_cpuid_entry(kvm_get_supported_cpuid(), 0x1, 0)->eax;
|
||||
}
|
||||
|
||||
static inline uint32_t kvm_cpu_family(void)
|
||||
static inline u32 kvm_cpu_family(void)
|
||||
{
|
||||
return x86_family(kvm_cpu_fms());
|
||||
}
|
||||
|
||||
static inline uint32_t kvm_cpu_model(void)
|
||||
static inline u32 kvm_cpu_model(void)
|
||||
{
|
||||
return x86_model(kvm_cpu_fms());
|
||||
}
|
||||
@@ -980,17 +979,17 @@ static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
|
||||
return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
|
||||
}
|
||||
|
||||
uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
|
||||
struct kvm_x86_cpu_property property);
|
||||
u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
|
||||
struct kvm_x86_cpu_property property);
|
||||
|
||||
static inline uint32_t kvm_cpu_property(struct kvm_x86_cpu_property property)
|
||||
static inline u32 kvm_cpu_property(struct kvm_x86_cpu_property property)
|
||||
{
|
||||
return kvm_cpuid_property(kvm_get_supported_cpuid(), property);
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
|
||||
{
|
||||
uint32_t max_leaf;
|
||||
u32 max_leaf;
|
||||
|
||||
switch (property.function & 0xc0000000) {
|
||||
case 0:
|
||||
@@ -1010,7 +1009,7 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
|
||||
|
||||
static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
|
||||
{
|
||||
uint32_t nr_bits;
|
||||
u32 nr_bits;
|
||||
|
||||
if (feature.f.reg == KVM_CPUID_EBX) {
|
||||
nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
|
||||
@@ -1022,13 +1021,13 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
|
||||
return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
|
||||
}
|
||||
|
||||
static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
|
||||
static __always_inline u64 kvm_cpu_supported_xcr0(void)
|
||||
{
|
||||
if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
|
||||
return 0;
|
||||
|
||||
return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
|
||||
((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
|
||||
((u64)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
|
||||
}
|
||||
|
||||
static inline size_t kvm_cpuid2_size(int nr_entries)
|
||||
@@ -1062,8 +1061,8 @@ static inline void vcpu_get_cpuid(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
|
||||
uint32_t function,
|
||||
uint32_t index)
|
||||
u32 function,
|
||||
u32 index)
|
||||
{
|
||||
TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first (or equivalent)");
|
||||
|
||||
@@ -1074,7 +1073,7 @@ static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *v
|
||||
}
|
||||
|
||||
static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
|
||||
uint32_t function)
|
||||
u32 function)
|
||||
{
|
||||
return __vcpu_get_cpuid_entry(vcpu, function, 0);
|
||||
}
|
||||
@@ -1104,10 +1103,10 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
|
||||
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
|
||||
struct kvm_x86_cpu_property property,
|
||||
uint32_t value);
|
||||
void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
|
||||
u32 value);
|
||||
void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, u8 maxphyaddr);
|
||||
|
||||
void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
|
||||
void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function);
|
||||
|
||||
static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
|
||||
struct kvm_x86_cpu_feature feature)
|
||||
@@ -1135,8 +1134,8 @@ static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
|
||||
vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
|
||||
int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
|
||||
u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index);
|
||||
int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value);
|
||||
|
||||
/*
|
||||
* Assert on an MSR access(es) and pretty print the MSR name when possible.
|
||||
@@ -1161,14 +1160,14 @@ do { \
|
||||
* is changing, etc. This is NOT an exhaustive list! The intent is to filter
|
||||
* out MSRs that are not durable _and_ that a selftest wants to write.
|
||||
*/
|
||||
static inline bool is_durable_msr(uint32_t msr)
|
||||
static inline bool is_durable_msr(u32 msr)
|
||||
{
|
||||
return msr != MSR_IA32_TSC;
|
||||
}
|
||||
|
||||
#define vcpu_set_msr(vcpu, msr, val) \
|
||||
do { \
|
||||
uint64_t r, v = val; \
|
||||
u64 r, v = val; \
|
||||
\
|
||||
TEST_ASSERT_MSR(_vcpu_set_msr(vcpu, msr, v) == 1, \
|
||||
"KVM_SET_MSRS failed on %s, value = 0x%lx", msr, #msr, v); \
|
||||
@@ -1182,28 +1181,28 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
|
||||
void kvm_init_vm_address_properties(struct kvm_vm *vm);
|
||||
|
||||
struct ex_regs {
|
||||
uint64_t rax, rcx, rdx, rbx;
|
||||
uint64_t rbp, rsi, rdi;
|
||||
uint64_t r8, r9, r10, r11;
|
||||
uint64_t r12, r13, r14, r15;
|
||||
uint64_t vector;
|
||||
uint64_t error_code;
|
||||
uint64_t rip;
|
||||
uint64_t cs;
|
||||
uint64_t rflags;
|
||||
u64 rax, rcx, rdx, rbx;
|
||||
u64 rbp, rsi, rdi;
|
||||
u64 r8, r9, r10, r11;
|
||||
u64 r12, r13, r14, r15;
|
||||
u64 vector;
|
||||
u64 error_code;
|
||||
u64 rip;
|
||||
u64 cs;
|
||||
u64 rflags;
|
||||
};
|
||||
|
||||
struct idt_entry {
|
||||
uint16_t offset0;
|
||||
uint16_t selector;
|
||||
uint16_t ist : 3;
|
||||
uint16_t : 5;
|
||||
uint16_t type : 4;
|
||||
uint16_t : 1;
|
||||
uint16_t dpl : 2;
|
||||
uint16_t p : 1;
|
||||
uint16_t offset1;
|
||||
uint32_t offset2; uint32_t reserved;
|
||||
u16 offset0;
|
||||
u16 selector;
|
||||
u16 ist : 3;
|
||||
u16 : 5;
|
||||
u16 type : 4;
|
||||
u16 : 1;
|
||||
u16 dpl : 2;
|
||||
u16 p : 1;
|
||||
u16 offset1;
|
||||
u32 offset2; u32 reserved;
|
||||
};
|
||||
|
||||
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
@@ -1262,8 +1261,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
|
||||
#define kvm_asm_safe(insn, inputs...) \
|
||||
({ \
|
||||
uint64_t ign_error_code; \
|
||||
uint8_t vector; \
|
||||
u64 ign_error_code; \
|
||||
u8 vector; \
|
||||
\
|
||||
asm volatile(KVM_ASM_SAFE(insn) \
|
||||
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
|
||||
@@ -1274,7 +1273,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
|
||||
#define kvm_asm_safe_ec(insn, error_code, inputs...) \
|
||||
({ \
|
||||
uint8_t vector; \
|
||||
u8 vector; \
|
||||
\
|
||||
asm volatile(KVM_ASM_SAFE(insn) \
|
||||
: KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
|
||||
@@ -1285,8 +1284,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
|
||||
#define kvm_asm_safe_fep(insn, inputs...) \
|
||||
({ \
|
||||
uint64_t ign_error_code; \
|
||||
uint8_t vector; \
|
||||
u64 ign_error_code; \
|
||||
u8 vector; \
|
||||
\
|
||||
asm volatile(KVM_ASM_SAFE_FEP(insn) \
|
||||
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
|
||||
@@ -1297,7 +1296,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
|
||||
#define kvm_asm_safe_ec_fep(insn, error_code, inputs...) \
|
||||
({ \
|
||||
uint8_t vector; \
|
||||
u8 vector; \
|
||||
\
|
||||
asm volatile(KVM_ASM_SAFE_FEP(insn) \
|
||||
: KVM_ASM_SAFE_OUTPUTS(vector, error_code) \
|
||||
@@ -1307,11 +1306,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
})
|
||||
|
||||
#define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP) \
|
||||
static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
|
||||
static inline u8 insn##_safe ##_fep(u32 idx, u64 *val) \
|
||||
{ \
|
||||
uint64_t error_code; \
|
||||
uint8_t vector; \
|
||||
uint32_t a, d; \
|
||||
u64 error_code; \
|
||||
u8 vector; \
|
||||
u32 a, d; \
|
||||
\
|
||||
asm volatile(KVM_ASM_SAFE##_FEP(#insn) \
|
||||
: "=a"(a), "=d"(d), \
|
||||
@@ -1319,7 +1318,7 @@ static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val) \
|
||||
: "c"(idx) \
|
||||
: KVM_ASM_SAFE_CLOBBERS); \
|
||||
\
|
||||
*val = (uint64_t)a | ((uint64_t)d << 32); \
|
||||
*val = (u64)a | ((u64)d << 32); \
|
||||
return vector; \
|
||||
}
|
||||
|
||||
@@ -1335,12 +1334,12 @@ BUILD_READ_U64_SAFE_HELPERS(rdmsr)
|
||||
BUILD_READ_U64_SAFE_HELPERS(rdpmc)
|
||||
BUILD_READ_U64_SAFE_HELPERS(xgetbv)
|
||||
|
||||
static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
|
||||
static inline u8 wrmsr_safe(u32 msr, u64 val)
|
||||
{
|
||||
return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
|
||||
}
|
||||
|
||||
static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
|
||||
static inline u8 xsetbv_safe(u32 index, u64 value)
|
||||
{
|
||||
u32 eax = value;
|
||||
u32 edx = value >> 32;
|
||||
@@ -1395,23 +1394,20 @@ static inline bool kvm_is_lbrv_enabled(void)
|
||||
return !!get_kvm_amd_param_integer("lbrv");
|
||||
}
|
||||
|
||||
uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr);
|
||||
u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva);
|
||||
|
||||
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
|
||||
uint64_t a3);
|
||||
uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
|
||||
void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
|
||||
u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3);
|
||||
u64 __xen_hypercall(u64 nr, u64 a0, void *a1);
|
||||
void xen_hypercall(u64 nr, u64 a0, void *a1);
|
||||
|
||||
static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa,
|
||||
uint64_t size, uint64_t flags)
|
||||
static inline u64 __kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
|
||||
{
|
||||
return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
|
||||
}
|
||||
|
||||
static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
|
||||
uint64_t flags)
|
||||
static inline void kvm_hypercall_map_gpa_range(gpa_t gpa, u64 size, u64 flags)
|
||||
{
|
||||
uint64_t ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
|
||||
u64 ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
|
||||
|
||||
GUEST_ASSERT(!ret);
|
||||
}
|
||||
@@ -1456,7 +1452,7 @@ static inline void cli(void)
|
||||
asm volatile ("cli");
|
||||
}
|
||||
|
||||
void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
|
||||
void __vm_xsave_require_permission(u64 xfeature, const char *name);
|
||||
|
||||
#define vm_xsave_require_permission(xfeature) \
|
||||
__vm_xsave_require_permission(xfeature, #xfeature)
|
||||
@@ -1511,17 +1507,17 @@ enum pg_level {
|
||||
void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
|
||||
struct pte_masks *pte_masks);
|
||||
|
||||
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
|
||||
uint64_t paddr, int level);
|
||||
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
uint64_t nr_bytes, int level);
|
||||
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
|
||||
gpa_t gpa, int level);
|
||||
void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
|
||||
u64 nr_bytes, int level);
|
||||
|
||||
void vm_enable_tdp(struct kvm_vm *vm);
|
||||
bool kvm_cpu_has_tdp(void);
|
||||
void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr, uint64_t size);
|
||||
void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size);
|
||||
void tdp_identity_map_default_memslots(struct kvm_vm *vm);
|
||||
void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size);
|
||||
uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa);
|
||||
void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size);
|
||||
u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa);
|
||||
|
||||
/*
|
||||
* Basic CPU control in CR0
|
||||
|
||||
@@ -46,16 +46,16 @@ static inline bool is_sev_vm(struct kvm_vm *vm)
|
||||
return is_sev_es_vm(vm) || vm->type == KVM_X86_SEV_VM;
|
||||
}
|
||||
|
||||
void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
|
||||
void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
|
||||
void sev_vm_launch(struct kvm_vm *vm, u32 policy);
|
||||
void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement);
|
||||
void sev_vm_launch_finish(struct kvm_vm *vm);
|
||||
void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy);
|
||||
void snp_vm_launch_start(struct kvm_vm *vm, u64 policy);
|
||||
void snp_vm_launch_update(struct kvm_vm *vm);
|
||||
void snp_vm_launch_finish(struct kvm_vm *vm);
|
||||
|
||||
struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
|
||||
struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code,
|
||||
struct kvm_vcpu **cpu);
|
||||
void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement);
|
||||
void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement);
|
||||
|
||||
kvm_static_assert(SEV_RET_SUCCESS == 0);
|
||||
|
||||
@@ -85,7 +85,7 @@ static inline u64 snp_default_policy(void)
|
||||
unsigned long raw; \
|
||||
} sev_cmd = { .c = { \
|
||||
.id = (cmd), \
|
||||
.data = (uint64_t)(arg), \
|
||||
.data = (u64)(arg), \
|
||||
.sev_fd = (vm)->arch.sev_fd, \
|
||||
} }; \
|
||||
\
|
||||
@@ -120,8 +120,8 @@ static inline void sev_register_encrypted_memory(struct kvm_vm *vm,
|
||||
vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
|
||||
}
|
||||
|
||||
static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
|
||||
uint64_t size)
|
||||
static inline void sev_launch_update_data(struct kvm_vm *vm, gpa_t gpa,
|
||||
u64 size)
|
||||
{
|
||||
struct kvm_sev_launch_update_data update_data = {
|
||||
.uaddr = (unsigned long)addr_gpa2hva(vm, gpa),
|
||||
@@ -131,8 +131,8 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
|
||||
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data);
|
||||
}
|
||||
|
||||
static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
|
||||
uint64_t hva, uint64_t size, uint8_t type)
|
||||
static inline void snp_launch_update_data(struct kvm_vm *vm, gpa_t gpa,
|
||||
u64 hva, u64 size, u8 type)
|
||||
{
|
||||
struct kvm_sev_snp_launch_update update_data = {
|
||||
.uaddr = hva,
|
||||
|
||||
@@ -8,8 +8,7 @@
|
||||
#define SMRAM_MEMSLOT ((1 << 16) | 1)
|
||||
#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
|
||||
|
||||
void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
|
||||
uint64_t smram_gpa,
|
||||
void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa,
|
||||
const void *smi_handler, size_t handler_size);
|
||||
|
||||
void inject_smi(struct kvm_vcpu *vcpu);
|
||||
|
||||
@@ -16,20 +16,20 @@ struct svm_test_data {
|
||||
/* VMCB */
|
||||
struct vmcb *vmcb; /* gva */
|
||||
void *vmcb_hva;
|
||||
uint64_t vmcb_gpa;
|
||||
u64 vmcb_gpa;
|
||||
|
||||
/* host state-save area */
|
||||
struct vmcb_save_area *save_area; /* gva */
|
||||
void *save_area_hva;
|
||||
uint64_t save_area_gpa;
|
||||
u64 save_area_gpa;
|
||||
|
||||
/* MSR-Bitmap */
|
||||
void *msr; /* gva */
|
||||
void *msr_hva;
|
||||
uint64_t msr_gpa;
|
||||
u64 msr_gpa;
|
||||
|
||||
/* NPT */
|
||||
uint64_t ncr3_gpa;
|
||||
u64 ncr3_gpa;
|
||||
};
|
||||
|
||||
static inline void vmmcall(void)
|
||||
@@ -56,9 +56,9 @@ static inline void vmmcall(void)
|
||||
"clgi\n" \
|
||||
)
|
||||
|
||||
struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
|
||||
struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva);
|
||||
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
|
||||
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
|
||||
void run_guest(struct vmcb *vmcb, u64 vmcb_gpa);
|
||||
|
||||
static inline bool kvm_cpu_has_npt(void)
|
||||
{
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
#define UCALL_EXIT_REASON KVM_EXIT_IO
|
||||
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
static inline void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
@@ -285,16 +285,16 @@ enum vmcs_field {
|
||||
};
|
||||
|
||||
struct vmx_msr_entry {
|
||||
uint32_t index;
|
||||
uint32_t reserved;
|
||||
uint64_t value;
|
||||
u32 index;
|
||||
u32 reserved;
|
||||
u64 value;
|
||||
} __attribute__ ((aligned(16)));
|
||||
|
||||
#include "evmcs.h"
|
||||
|
||||
static inline int vmxon(uint64_t phys)
|
||||
static inline int vmxon(u64 phys)
|
||||
{
|
||||
uint8_t ret;
|
||||
u8 ret;
|
||||
|
||||
__asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
|
||||
: [ret]"=rm"(ret)
|
||||
@@ -309,9 +309,9 @@ static inline void vmxoff(void)
|
||||
__asm__ __volatile__("vmxoff");
|
||||
}
|
||||
|
||||
static inline int vmclear(uint64_t vmcs_pa)
|
||||
static inline int vmclear(u64 vmcs_pa)
|
||||
{
|
||||
uint8_t ret;
|
||||
u8 ret;
|
||||
|
||||
__asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
|
||||
: [ret]"=rm"(ret)
|
||||
@@ -321,9 +321,9 @@ static inline int vmclear(uint64_t vmcs_pa)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int vmptrld(uint64_t vmcs_pa)
|
||||
static inline int vmptrld(u64 vmcs_pa)
|
||||
{
|
||||
uint8_t ret;
|
||||
u8 ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return -1;
|
||||
@@ -336,10 +336,10 @@ static inline int vmptrld(uint64_t vmcs_pa)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int vmptrst(uint64_t *value)
|
||||
static inline int vmptrst(u64 *value)
|
||||
{
|
||||
uint64_t tmp;
|
||||
uint8_t ret;
|
||||
u64 tmp;
|
||||
u8 ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmptrst(value);
|
||||
@@ -356,9 +356,9 @@ static inline int vmptrst(uint64_t *value)
|
||||
* A wrapper around vmptrst that ignores errors and returns zero if the
|
||||
* vmptrst instruction fails.
|
||||
*/
|
||||
static inline uint64_t vmptrstz(void)
|
||||
static inline u64 vmptrstz(void)
|
||||
{
|
||||
uint64_t value = 0;
|
||||
u64 value = 0;
|
||||
vmptrst(&value);
|
||||
return value;
|
||||
}
|
||||
@@ -391,8 +391,8 @@ static inline int vmlaunch(void)
|
||||
"pop %%rcx;"
|
||||
"pop %%rbp;"
|
||||
: [ret]"=&a"(ret)
|
||||
: [host_rsp]"r"((uint64_t)HOST_RSP),
|
||||
[host_rip]"r"((uint64_t)HOST_RIP)
|
||||
: [host_rsp]"r"((u64)HOST_RSP),
|
||||
[host_rip]"r"((u64)HOST_RIP)
|
||||
: "memory", "cc", "rbx", "r8", "r9", "r10",
|
||||
"r11", "r12", "r13", "r14", "r15");
|
||||
return ret;
|
||||
@@ -426,8 +426,8 @@ static inline int vmresume(void)
|
||||
"pop %%rcx;"
|
||||
"pop %%rbp;"
|
||||
: [ret]"=&a"(ret)
|
||||
: [host_rsp]"r"((uint64_t)HOST_RSP),
|
||||
[host_rip]"r"((uint64_t)HOST_RIP)
|
||||
: [host_rsp]"r"((u64)HOST_RSP),
|
||||
[host_rip]"r"((u64)HOST_RIP)
|
||||
: "memory", "cc", "rbx", "r8", "r9", "r10",
|
||||
"r11", "r12", "r13", "r14", "r15");
|
||||
return ret;
|
||||
@@ -447,10 +447,10 @@ static inline void vmcall(void)
|
||||
"r10", "r11", "r12", "r13", "r14", "r15");
|
||||
}
|
||||
|
||||
static inline int vmread(uint64_t encoding, uint64_t *value)
|
||||
static inline int vmread(u64 encoding, u64 *value)
|
||||
{
|
||||
uint64_t tmp;
|
||||
uint8_t ret;
|
||||
u64 tmp;
|
||||
u8 ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmread(encoding, value);
|
||||
@@ -468,16 +468,16 @@ static inline int vmread(uint64_t encoding, uint64_t *value)
|
||||
* A wrapper around vmread that ignores errors and returns zero if the
|
||||
* vmread instruction fails.
|
||||
*/
|
||||
static inline uint64_t vmreadz(uint64_t encoding)
|
||||
static inline u64 vmreadz(u64 encoding)
|
||||
{
|
||||
uint64_t value = 0;
|
||||
u64 value = 0;
|
||||
vmread(encoding, &value);
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline int vmwrite(uint64_t encoding, uint64_t value)
|
||||
static inline int vmwrite(u64 encoding, u64 value)
|
||||
{
|
||||
uint8_t ret;
|
||||
u8 ret;
|
||||
|
||||
if (enable_evmcs)
|
||||
return evmcs_vmwrite(encoding, value);
|
||||
@@ -490,41 +490,41 @@ static inline int vmwrite(uint64_t encoding, uint64_t value)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline uint32_t vmcs_revision(void)
|
||||
static inline u32 vmcs_revision(void)
|
||||
{
|
||||
return rdmsr(MSR_IA32_VMX_BASIC);
|
||||
}
|
||||
|
||||
struct vmx_pages {
|
||||
void *vmxon_hva;
|
||||
uint64_t vmxon_gpa;
|
||||
u64 vmxon_gpa;
|
||||
void *vmxon;
|
||||
|
||||
void *vmcs_hva;
|
||||
uint64_t vmcs_gpa;
|
||||
u64 vmcs_gpa;
|
||||
void *vmcs;
|
||||
|
||||
void *msr_hva;
|
||||
uint64_t msr_gpa;
|
||||
u64 msr_gpa;
|
||||
void *msr;
|
||||
|
||||
void *shadow_vmcs_hva;
|
||||
uint64_t shadow_vmcs_gpa;
|
||||
u64 shadow_vmcs_gpa;
|
||||
void *shadow_vmcs;
|
||||
|
||||
void *vmread_hva;
|
||||
uint64_t vmread_gpa;
|
||||
u64 vmread_gpa;
|
||||
void *vmread;
|
||||
|
||||
void *vmwrite_hva;
|
||||
uint64_t vmwrite_gpa;
|
||||
u64 vmwrite_gpa;
|
||||
void *vmwrite;
|
||||
|
||||
void *apic_access_hva;
|
||||
uint64_t apic_access_gpa;
|
||||
u64 apic_access_gpa;
|
||||
void *apic_access;
|
||||
|
||||
uint64_t eptp_gpa;
|
||||
u64 eptp_gpa;
|
||||
};
|
||||
|
||||
union vmx_basic {
|
||||
@@ -550,7 +550,7 @@ union vmx_ctrl_msr {
|
||||
};
|
||||
};
|
||||
|
||||
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
|
||||
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, gva_t *p_vmx_gva);
|
||||
bool prepare_for_vmx_operation(struct vmx_pages *vmx);
|
||||
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
|
||||
bool load_vmcs(struct vmx_pages *vmx);
|
||||
|
||||
@@ -46,12 +46,12 @@ static const char * const test_stage_string[] = {
|
||||
|
||||
struct test_args {
|
||||
struct kvm_vm *vm;
|
||||
uint64_t guest_test_virt_mem;
|
||||
uint64_t host_page_size;
|
||||
uint64_t host_num_pages;
|
||||
uint64_t large_page_size;
|
||||
uint64_t large_num_pages;
|
||||
uint64_t host_pages_per_lpage;
|
||||
u64 guest_test_virt_mem;
|
||||
u64 host_page_size;
|
||||
u64 host_num_pages;
|
||||
u64 large_page_size;
|
||||
u64 large_num_pages;
|
||||
u64 host_pages_per_lpage;
|
||||
enum vm_mem_backing_src_type src_type;
|
||||
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
|
||||
};
|
||||
@@ -63,7 +63,7 @@ struct test_args {
|
||||
static enum test_stage guest_test_stage;
|
||||
|
||||
/* Host variables */
|
||||
static uint32_t nr_vcpus = 1;
|
||||
static u32 nr_vcpus = 1;
|
||||
static struct test_args test_args;
|
||||
static enum test_stage *current_stage;
|
||||
static bool host_quit;
|
||||
@@ -77,19 +77,19 @@ static sem_t test_stage_completed;
|
||||
* This will be set to the topmost valid physical address minus
|
||||
* the test memory size.
|
||||
*/
|
||||
static uint64_t guest_test_phys_mem;
|
||||
static u64 guest_test_phys_mem;
|
||||
|
||||
/*
|
||||
* Guest virtual memory offset of the testing memory slot.
|
||||
* Must not conflict with identity mapped test code.
|
||||
*/
|
||||
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
|
||||
static void guest_code(bool do_write)
|
||||
{
|
||||
struct test_args *p = &test_args;
|
||||
enum test_stage *current_stage = &guest_test_stage;
|
||||
uint64_t addr;
|
||||
u64 addr;
|
||||
int i, j;
|
||||
|
||||
while (true) {
|
||||
@@ -113,9 +113,9 @@ static void guest_code(bool do_write)
|
||||
case KVM_CREATE_MAPPINGS:
|
||||
for (i = 0; i < p->large_num_pages; i++) {
|
||||
if (do_write)
|
||||
*(uint64_t *)addr = 0x0123456789ABCDEF;
|
||||
*(u64 *)addr = 0x0123456789ABCDEF;
|
||||
else
|
||||
READ_ONCE(*(uint64_t *)addr);
|
||||
READ_ONCE(*(u64 *)addr);
|
||||
|
||||
addr += p->large_page_size;
|
||||
}
|
||||
@@ -131,7 +131,7 @@ static void guest_code(bool do_write)
|
||||
case KVM_UPDATE_MAPPINGS:
|
||||
if (p->src_type == VM_MEM_SRC_ANONYMOUS) {
|
||||
for (i = 0; i < p->host_num_pages; i++) {
|
||||
*(uint64_t *)addr = 0x0123456789ABCDEF;
|
||||
*(u64 *)addr = 0x0123456789ABCDEF;
|
||||
addr += p->host_page_size;
|
||||
}
|
||||
break;
|
||||
@@ -142,7 +142,7 @@ static void guest_code(bool do_write)
|
||||
* Write to the first host page in each large
|
||||
* page region, and triger break of large pages.
|
||||
*/
|
||||
*(uint64_t *)addr = 0x0123456789ABCDEF;
|
||||
*(u64 *)addr = 0x0123456789ABCDEF;
|
||||
|
||||
/*
|
||||
* Access the middle host pages in each large
|
||||
@@ -152,7 +152,7 @@ static void guest_code(bool do_write)
|
||||
*/
|
||||
addr += p->large_page_size / 2;
|
||||
for (j = 0; j < p->host_pages_per_lpage / 2; j++) {
|
||||
READ_ONCE(*(uint64_t *)addr);
|
||||
READ_ONCE(*(u64 *)addr);
|
||||
addr += p->host_page_size;
|
||||
}
|
||||
}
|
||||
@@ -167,7 +167,7 @@ static void guest_code(bool do_write)
|
||||
*/
|
||||
case KVM_ADJUST_MAPPINGS:
|
||||
for (i = 0; i < p->host_num_pages; i++) {
|
||||
READ_ONCE(*(uint64_t *)addr);
|
||||
READ_ONCE(*(u64 *)addr);
|
||||
addr += p->host_page_size;
|
||||
}
|
||||
break;
|
||||
@@ -227,8 +227,8 @@ static void *vcpu_worker(void *data)
|
||||
}
|
||||
|
||||
struct test_params {
|
||||
uint64_t phys_offset;
|
||||
uint64_t test_mem_size;
|
||||
u64 phys_offset;
|
||||
u64 test_mem_size;
|
||||
enum vm_mem_backing_src_type src_type;
|
||||
};
|
||||
|
||||
@@ -237,12 +237,12 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
|
||||
int ret;
|
||||
struct test_params *p = arg;
|
||||
enum vm_mem_backing_src_type src_type = p->src_type;
|
||||
uint64_t large_page_size = get_backing_src_pagesz(src_type);
|
||||
uint64_t guest_page_size = vm_guest_mode_params[mode].page_size;
|
||||
uint64_t host_page_size = getpagesize();
|
||||
uint64_t test_mem_size = p->test_mem_size;
|
||||
uint64_t guest_num_pages;
|
||||
uint64_t alignment;
|
||||
u64 large_page_size = get_backing_src_pagesz(src_type);
|
||||
u64 guest_page_size = vm_guest_mode_params[mode].page_size;
|
||||
u64 host_page_size = getpagesize();
|
||||
u64 test_mem_size = p->test_mem_size;
|
||||
u64 guest_num_pages;
|
||||
u64 alignment;
|
||||
void *host_test_mem;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
@@ -281,7 +281,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
|
||||
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
|
||||
|
||||
/* Cache the HVA pointer of the region */
|
||||
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
|
||||
host_test_mem = addr_gpa2hva(vm, (gpa_t)guest_test_phys_mem);
|
||||
|
||||
/* Export shared structure test_args to guest */
|
||||
sync_global_to_guest(vm, test_args);
|
||||
@@ -292,7 +292,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
|
||||
ret = sem_init(&test_stage_completed, 0, 0);
|
||||
TEST_ASSERT(ret == 0, "Error in sem_init");
|
||||
|
||||
current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage));
|
||||
current_stage = addr_gva2hva(vm, (gva_t)(&guest_test_stage));
|
||||
*current_stage = NUM_TEST_STAGES;
|
||||
|
||||
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
|
||||
@@ -304,7 +304,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
|
||||
pr_info("Guest physical test memory offset: 0x%lx\n",
|
||||
guest_test_phys_mem);
|
||||
pr_info("Host virtual test memory offset: 0x%lx\n",
|
||||
(uint64_t)host_test_mem);
|
||||
(u64)host_test_mem);
|
||||
pr_info("Number of testing vCPUs: %d\n", nr_vcpus);
|
||||
|
||||
return vm;
|
||||
|
||||
@@ -50,7 +50,7 @@ static void gic_dist_init(enum gic_type type, unsigned int nr_cpus)
|
||||
|
||||
void gic_init(enum gic_type type, unsigned int nr_cpus)
|
||||
{
|
||||
uint32_t cpu = guest_get_vcpuid();
|
||||
u32 cpu = guest_get_vcpuid();
|
||||
|
||||
GUEST_ASSERT(type < GIC_TYPE_MAX);
|
||||
GUEST_ASSERT(nr_cpus);
|
||||
@@ -73,7 +73,7 @@ void gic_irq_disable(unsigned int intid)
|
||||
|
||||
unsigned int gic_get_and_ack_irq(void)
|
||||
{
|
||||
uint64_t irqstat;
|
||||
u64 irqstat;
|
||||
unsigned int intid;
|
||||
|
||||
GUEST_ASSERT(gic_common_ops);
|
||||
@@ -102,7 +102,7 @@ void gic_set_eoi_split(bool split)
|
||||
gic_common_ops->gic_set_eoi_split(split);
|
||||
}
|
||||
|
||||
void gic_set_priority_mask(uint64_t pmr)
|
||||
void gic_set_priority_mask(u64 pmr)
|
||||
{
|
||||
GUEST_ASSERT(gic_common_ops);
|
||||
gic_common_ops->gic_set_priority_mask(pmr);
|
||||
|
||||
@@ -12,20 +12,20 @@ struct gic_common_ops {
|
||||
void (*gic_cpu_init)(unsigned int cpu);
|
||||
void (*gic_irq_enable)(unsigned int intid);
|
||||
void (*gic_irq_disable)(unsigned int intid);
|
||||
uint64_t (*gic_read_iar)(void);
|
||||
void (*gic_write_eoir)(uint32_t irq);
|
||||
void (*gic_write_dir)(uint32_t irq);
|
||||
u64 (*gic_read_iar)(void);
|
||||
void (*gic_write_eoir)(u32 irq);
|
||||
void (*gic_write_dir)(u32 irq);
|
||||
void (*gic_set_eoi_split)(bool split);
|
||||
void (*gic_set_priority_mask)(uint64_t mask);
|
||||
void (*gic_set_priority)(uint32_t intid, uint32_t prio);
|
||||
void (*gic_irq_set_active)(uint32_t intid);
|
||||
void (*gic_irq_clear_active)(uint32_t intid);
|
||||
bool (*gic_irq_get_active)(uint32_t intid);
|
||||
void (*gic_irq_set_pending)(uint32_t intid);
|
||||
void (*gic_irq_clear_pending)(uint32_t intid);
|
||||
bool (*gic_irq_get_pending)(uint32_t intid);
|
||||
void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
|
||||
void (*gic_irq_set_group)(uint32_t intid, bool group);
|
||||
void (*gic_set_priority_mask)(u64 mask);
|
||||
void (*gic_set_priority)(u32 intid, u32 prio);
|
||||
void (*gic_irq_set_active)(u32 intid);
|
||||
void (*gic_irq_clear_active)(u32 intid);
|
||||
bool (*gic_irq_get_active)(u32 intid);
|
||||
void (*gic_irq_set_pending)(u32 intid);
|
||||
void (*gic_irq_clear_pending)(u32 intid);
|
||||
bool (*gic_irq_get_pending)(u32 intid);
|
||||
void (*gic_irq_set_config)(u32 intid, bool is_edge);
|
||||
void (*gic_irq_set_group)(u32 intid, bool group);
|
||||
};
|
||||
|
||||
extern const struct gic_common_ops gicv3_ops;
|
||||
|
||||
@@ -50,13 +50,13 @@ static void gicv3_gicd_wait_for_rwp(void)
|
||||
}
|
||||
}
|
||||
|
||||
static inline volatile void *gicr_base_cpu(uint32_t cpu)
|
||||
static inline volatile void *gicr_base_cpu(u32 cpu)
|
||||
{
|
||||
/* Align all the redistributors sequentially */
|
||||
return GICR_BASE_GVA + cpu * SZ_64K * 2;
|
||||
}
|
||||
|
||||
static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
|
||||
static void gicv3_gicr_wait_for_rwp(u32 cpu)
|
||||
{
|
||||
unsigned int count = 100000; /* 1s */
|
||||
|
||||
@@ -66,7 +66,7 @@ static void gicv3_gicr_wait_for_rwp(uint32_t cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
|
||||
static void gicv3_wait_for_rwp(u32 cpu_or_dist)
|
||||
{
|
||||
if (cpu_or_dist & DIST_BIT)
|
||||
gicv3_gicd_wait_for_rwp();
|
||||
@@ -91,34 +91,34 @@ static enum gicv3_intid_range get_intid_range(unsigned int intid)
|
||||
return INVALID_RANGE;
|
||||
}
|
||||
|
||||
static uint64_t gicv3_read_iar(void)
|
||||
static u64 gicv3_read_iar(void)
|
||||
{
|
||||
uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
|
||||
u64 irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
|
||||
|
||||
dsb(sy);
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
static void gicv3_write_eoir(uint32_t irq)
|
||||
static void gicv3_write_eoir(u32 irq)
|
||||
{
|
||||
write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void gicv3_write_dir(uint32_t irq)
|
||||
static void gicv3_write_dir(u32 irq)
|
||||
{
|
||||
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void gicv3_set_priority_mask(uint64_t mask)
|
||||
static void gicv3_set_priority_mask(u64 mask)
|
||||
{
|
||||
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
|
||||
}
|
||||
|
||||
static void gicv3_set_eoi_split(bool split)
|
||||
{
|
||||
uint32_t val;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* All other fields are read-only, so no need to read CTLR first. In
|
||||
@@ -129,29 +129,29 @@ static void gicv3_set_eoi_split(bool split)
|
||||
isb();
|
||||
}
|
||||
|
||||
uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
|
||||
u32 gicv3_reg_readl(u32 cpu_or_dist, u64 offset)
|
||||
{
|
||||
volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
|
||||
: sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
|
||||
return readl(base + offset);
|
||||
}
|
||||
|
||||
void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
|
||||
void gicv3_reg_writel(u32 cpu_or_dist, u64 offset, u32 reg_val)
|
||||
{
|
||||
volatile void *base = cpu_or_dist & DIST_BIT ? GICD_BASE_GVA
|
||||
: sgi_base_from_redist(gicr_base_cpu(cpu_or_dist));
|
||||
writel(reg_val, base + offset);
|
||||
}
|
||||
|
||||
uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
|
||||
u32 gicv3_getl_fields(u32 cpu_or_dist, u64 offset, u32 mask)
|
||||
{
|
||||
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
|
||||
}
|
||||
|
||||
void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
|
||||
uint32_t mask, uint32_t reg_val)
|
||||
void gicv3_setl_fields(u32 cpu_or_dist, u64 offset,
|
||||
u32 mask, u32 reg_val)
|
||||
{
|
||||
uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
|
||||
u32 tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
|
||||
|
||||
tmp |= (reg_val & mask);
|
||||
gicv3_reg_writel(cpu_or_dist, offset, tmp);
|
||||
@@ -165,14 +165,14 @@ void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
|
||||
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
|
||||
* marked as "Reserved" in the Distributor map.
|
||||
*/
|
||||
static void gicv3_access_reg(uint32_t intid, uint64_t offset,
|
||||
uint32_t reg_bits, uint32_t bits_per_field,
|
||||
bool write, uint32_t *val)
|
||||
static void gicv3_access_reg(u32 intid, u64 offset,
|
||||
u32 reg_bits, u32 bits_per_field,
|
||||
bool write, u32 *val)
|
||||
{
|
||||
uint32_t cpu = guest_get_vcpuid();
|
||||
u32 cpu = guest_get_vcpuid();
|
||||
enum gicv3_intid_range intid_range = get_intid_range(intid);
|
||||
uint32_t fields_per_reg, index, mask, shift;
|
||||
uint32_t cpu_or_dist;
|
||||
u32 fields_per_reg, index, mask, shift;
|
||||
u32 cpu_or_dist;
|
||||
|
||||
GUEST_ASSERT(bits_per_field <= reg_bits);
|
||||
GUEST_ASSERT(!write || *val < (1U << bits_per_field));
|
||||
@@ -197,32 +197,32 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset,
|
||||
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
|
||||
}
|
||||
|
||||
static void gicv3_write_reg(uint32_t intid, uint64_t offset,
|
||||
uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
|
||||
static void gicv3_write_reg(u32 intid, u64 offset,
|
||||
u32 reg_bits, u32 bits_per_field, u32 val)
|
||||
{
|
||||
gicv3_access_reg(intid, offset, reg_bits,
|
||||
bits_per_field, true, &val);
|
||||
}
|
||||
|
||||
static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
|
||||
uint32_t reg_bits, uint32_t bits_per_field)
|
||||
static u32 gicv3_read_reg(u32 intid, u64 offset,
|
||||
u32 reg_bits, u32 bits_per_field)
|
||||
{
|
||||
uint32_t val;
|
||||
u32 val;
|
||||
|
||||
gicv3_access_reg(intid, offset, reg_bits,
|
||||
bits_per_field, false, &val);
|
||||
return val;
|
||||
}
|
||||
|
||||
static void gicv3_set_priority(uint32_t intid, uint32_t prio)
|
||||
static void gicv3_set_priority(u32 intid, u32 prio)
|
||||
{
|
||||
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
|
||||
}
|
||||
|
||||
/* Sets the intid to be level-sensitive or edge-triggered. */
|
||||
static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
|
||||
static void gicv3_irq_set_config(u32 intid, bool is_edge)
|
||||
{
|
||||
uint32_t val;
|
||||
u32 val;
|
||||
|
||||
/* N/A for private interrupts. */
|
||||
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
|
||||
@@ -230,57 +230,57 @@ static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
|
||||
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
|
||||
}
|
||||
|
||||
static void gicv3_irq_enable(uint32_t intid)
|
||||
static void gicv3_irq_enable(u32 intid)
|
||||
{
|
||||
bool is_spi = get_intid_range(intid) == SPI_RANGE;
|
||||
uint32_t cpu = guest_get_vcpuid();
|
||||
u32 cpu = guest_get_vcpuid();
|
||||
|
||||
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
|
||||
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
|
||||
}
|
||||
|
||||
static void gicv3_irq_disable(uint32_t intid)
|
||||
static void gicv3_irq_disable(u32 intid)
|
||||
{
|
||||
bool is_spi = get_intid_range(intid) == SPI_RANGE;
|
||||
uint32_t cpu = guest_get_vcpuid();
|
||||
u32 cpu = guest_get_vcpuid();
|
||||
|
||||
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
|
||||
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
|
||||
}
|
||||
|
||||
static void gicv3_irq_set_active(uint32_t intid)
|
||||
static void gicv3_irq_set_active(u32 intid)
|
||||
{
|
||||
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
|
||||
}
|
||||
|
||||
static void gicv3_irq_clear_active(uint32_t intid)
|
||||
static void gicv3_irq_clear_active(u32 intid)
|
||||
{
|
||||
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
|
||||
}
|
||||
|
||||
static bool gicv3_irq_get_active(uint32_t intid)
|
||||
static bool gicv3_irq_get_active(u32 intid)
|
||||
{
|
||||
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
|
||||
}
|
||||
|
||||
static void gicv3_irq_set_pending(uint32_t intid)
|
||||
static void gicv3_irq_set_pending(u32 intid)
|
||||
{
|
||||
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
|
||||
}
|
||||
|
||||
static void gicv3_irq_clear_pending(uint32_t intid)
|
||||
static void gicv3_irq_clear_pending(u32 intid)
|
||||
{
|
||||
gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
|
||||
}
|
||||
|
||||
static bool gicv3_irq_get_pending(uint32_t intid)
|
||||
static bool gicv3_irq_get_pending(u32 intid)
|
||||
{
|
||||
return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
|
||||
}
|
||||
|
||||
static void gicv3_enable_redist(volatile void *redist_base)
|
||||
{
|
||||
uint32_t val = readl(redist_base + GICR_WAKER);
|
||||
u32 val = readl(redist_base + GICR_WAKER);
|
||||
unsigned int count = 100000; /* 1s */
|
||||
|
||||
val &= ~GICR_WAKER_ProcessorSleep;
|
||||
@@ -293,10 +293,10 @@ static void gicv3_enable_redist(volatile void *redist_base)
|
||||
}
|
||||
}
|
||||
|
||||
static void gicv3_set_group(uint32_t intid, bool grp)
|
||||
static void gicv3_set_group(u32 intid, bool grp)
|
||||
{
|
||||
uint32_t cpu_or_dist;
|
||||
uint32_t val;
|
||||
u32 cpu_or_dist;
|
||||
u32 val;
|
||||
|
||||
cpu_or_dist = (get_intid_range(intid) == SPI_RANGE) ? DIST_BIT : guest_get_vcpuid();
|
||||
val = gicv3_reg_readl(cpu_or_dist, GICD_IGROUPR + (intid / 32) * 4);
|
||||
@@ -424,8 +424,8 @@ const struct gic_common_ops gicv3_ops = {
|
||||
.gic_irq_set_group = gicv3_set_group,
|
||||
};
|
||||
|
||||
void gic_rdist_enable_lpis(vm_paddr_t cfg_table, size_t cfg_table_size,
|
||||
vm_paddr_t pend_table)
|
||||
void gic_rdist_enable_lpis(gpa_t cfg_table, size_t cfg_table_size,
|
||||
gpa_t pend_table)
|
||||
{
|
||||
volatile void *rdist_base = gicr_base_cpu(guest_get_vcpuid());
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ static unsigned long its_find_baser(unsigned int type)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void its_install_table(unsigned int type, vm_paddr_t base, size_t size)
|
||||
static void its_install_table(unsigned int type, gpa_t base, size_t size)
|
||||
{
|
||||
unsigned long offset = its_find_baser(type);
|
||||
u64 baser;
|
||||
@@ -69,7 +69,7 @@ static void its_install_table(unsigned int type, vm_paddr_t base, size_t size)
|
||||
its_write_u64(offset, baser);
|
||||
}
|
||||
|
||||
static void its_install_cmdq(vm_paddr_t base, size_t size)
|
||||
static void its_install_cmdq(gpa_t base, size_t size)
|
||||
{
|
||||
u64 cbaser;
|
||||
|
||||
@@ -82,9 +82,8 @@ static void its_install_cmdq(vm_paddr_t base, size_t size)
|
||||
its_write_u64(GITS_CBASER, cbaser);
|
||||
}
|
||||
|
||||
void its_init(vm_paddr_t coll_tbl, size_t coll_tbl_sz,
|
||||
vm_paddr_t device_tbl, size_t device_tbl_sz,
|
||||
vm_paddr_t cmdq, size_t cmdq_size)
|
||||
void its_init(gpa_t coll_tbl, size_t coll_tbl_sz, gpa_t device_tbl,
|
||||
size_t device_tbl_sz, gpa_t cmdq, size_t cmdq_size)
|
||||
{
|
||||
u32 ctlr;
|
||||
|
||||
@@ -204,7 +203,7 @@ static void its_send_cmd(void *cmdq_base, struct its_cmd_block *cmd)
|
||||
}
|
||||
}
|
||||
|
||||
void its_send_mapd_cmd(void *cmdq_base, u32 device_id, vm_paddr_t itt_base,
|
||||
void its_send_mapd_cmd(void *cmdq_base, u32 device_id, gpa_t itt_base,
|
||||
size_t itt_size, bool valid)
|
||||
{
|
||||
struct its_cmd_block cmd = {};
|
||||
|
||||
@@ -19,20 +19,20 @@
|
||||
|
||||
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
|
||||
|
||||
static vm_vaddr_t exception_handlers;
|
||||
static gva_t exception_handlers;
|
||||
|
||||
static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
static u64 pgd_index(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
|
||||
uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
|
||||
u64 mask = (1UL << (vm->va_bits - shift)) - 1;
|
||||
|
||||
return (gva >> shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
static u64 pud_index(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
|
||||
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
u64 mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
|
||||
TEST_ASSERT(vm->mmu.pgtable_levels == 4,
|
||||
"Mode %d does not have 4 page table levels", vm->mode);
|
||||
@@ -40,10 +40,10 @@ static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
return (gva >> shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
static u64 pmd_index(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
|
||||
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
u64 mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
|
||||
TEST_ASSERT(vm->mmu.pgtable_levels >= 3,
|
||||
"Mode %d does not have >= 3 page table levels", vm->mode);
|
||||
@@ -51,9 +51,9 @@ static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
return (gva >> shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
static u64 pte_index(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
u64 mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
return (gva >> vm->page_shift) & mask;
|
||||
}
|
||||
|
||||
@@ -63,9 +63,9 @@ static inline bool use_lpa2_pte_format(struct kvm_vm *vm)
|
||||
(vm->pa_bits > 48 || vm->va_bits > 48);
|
||||
}
|
||||
|
||||
static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
|
||||
static u64 addr_pte(struct kvm_vm *vm, u64 pa, u64 attrs)
|
||||
{
|
||||
uint64_t pte;
|
||||
u64 pte;
|
||||
|
||||
if (use_lpa2_pte_format(vm)) {
|
||||
pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift);
|
||||
@@ -81,9 +81,9 @@ static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
|
||||
static u64 pte_addr(struct kvm_vm *vm, u64 pte)
|
||||
{
|
||||
uint64_t pa;
|
||||
u64 pa;
|
||||
|
||||
if (use_lpa2_pte_format(vm)) {
|
||||
pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift);
|
||||
@@ -97,13 +97,13 @@ static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte)
|
||||
return pa;
|
||||
}
|
||||
|
||||
static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
|
||||
static u64 ptrs_per_pgd(struct kvm_vm *vm)
|
||||
{
|
||||
unsigned int shift = (vm->mmu.pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
|
||||
return 1 << (vm->va_bits - shift);
|
||||
}
|
||||
|
||||
static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
|
||||
static u64 __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
|
||||
{
|
||||
return 1 << (vm->page_shift - 3);
|
||||
}
|
||||
@@ -121,47 +121,46 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
vm->mmu.pgd_created = true;
|
||||
}
|
||||
|
||||
static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
uint64_t flags)
|
||||
static void _virt_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
|
||||
u64 flags)
|
||||
{
|
||||
uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
|
||||
uint64_t pg_attr;
|
||||
uint64_t *ptep;
|
||||
u8 attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT);
|
||||
u64 pg_attr;
|
||||
u64 *ptep;
|
||||
|
||||
TEST_ASSERT((vaddr % vm->page_size) == 0,
|
||||
TEST_ASSERT((gva % vm->page_size) == 0,
|
||||
"Virtual address not on page boundary,\n"
|
||||
" vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
|
||||
(vaddr >> vm->page_shift)),
|
||||
"Invalid virtual address, vaddr: 0x%lx", vaddr);
|
||||
TEST_ASSERT((paddr % vm->page_size) == 0,
|
||||
"Physical address not on page boundary,\n"
|
||||
" paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
|
||||
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
|
||||
"Physical address beyond beyond maximum supported,\n"
|
||||
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
paddr, vm->max_gfn, vm->page_size);
|
||||
" gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
|
||||
"Invalid virtual address, gva: 0x%lx", gva);
|
||||
TEST_ASSERT((gpa % vm->page_size) == 0,
|
||||
"Physical address not on page boundary,\n"
|
||||
" gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
|
||||
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
|
||||
"Physical address beyond beyond maximum supported,\n"
|
||||
" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
gpa, vm->max_gfn, vm->page_size);
|
||||
|
||||
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, vaddr) * 8;
|
||||
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pgd_index(vm, gva) * 8;
|
||||
if (!*ptep)
|
||||
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
|
||||
PGD_TYPE_TABLE | PTE_VALID);
|
||||
|
||||
switch (vm->mmu.pgtable_levels) {
|
||||
case 4:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
|
||||
if (!*ptep)
|
||||
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
|
||||
PUD_TYPE_TABLE | PTE_VALID);
|
||||
/* fall through */
|
||||
case 3:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
|
||||
if (!*ptep)
|
||||
*ptep = addr_pte(vm, vm_alloc_page_table(vm),
|
||||
PMD_TYPE_TABLE | PTE_VALID);
|
||||
/* fall through */
|
||||
case 2:
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
|
||||
break;
|
||||
default:
|
||||
TEST_FAIL("Page table levels must be 2, 3, or 4");
|
||||
@@ -171,19 +170,19 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
if (!use_lpa2_pte_format(vm))
|
||||
pg_attr |= PTE_SHARED;
|
||||
|
||||
*ptep = addr_pte(vm, paddr, pg_attr);
|
||||
*ptep = addr_pte(vm, gpa, pg_attr);
|
||||
}
|
||||
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
|
||||
{
|
||||
uint64_t attr_idx = MT_NORMAL;
|
||||
u64 attr_idx = MT_NORMAL;
|
||||
|
||||
_virt_pg_map(vm, vaddr, paddr, attr_idx);
|
||||
_virt_pg_map(vm, gva, gpa, attr_idx);
|
||||
}
|
||||
|
||||
uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level)
|
||||
u64 *virt_get_pte_hva_at_level(struct kvm_vm *vm, gva_t gva, int level)
|
||||
{
|
||||
uint64_t *ptep;
|
||||
u64 *ptep;
|
||||
|
||||
if (!vm->mmu.pgd_created)
|
||||
goto unmapped_gva;
|
||||
@@ -225,23 +224,23 @@ uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
u64 *virt_get_pte_hva(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
return virt_get_pte_hva_at_level(vm, gva, 3);
|
||||
}
|
||||
|
||||
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
uint64_t *ptep = virt_get_pte_hva(vm, gva);
|
||||
u64 *ptep = virt_get_pte_hva(vm, gva);
|
||||
|
||||
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
|
||||
}
|
||||
|
||||
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
|
||||
static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
static const char * const type[] = { "", "pud", "pmd", "pte" };
|
||||
uint64_t pte, *ptep;
|
||||
u64 pte, *ptep;
|
||||
|
||||
if (level == 4)
|
||||
return;
|
||||
@@ -256,10 +255,10 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
|
||||
#endif
|
||||
}
|
||||
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
|
||||
{
|
||||
int level = 4 - (vm->mmu.pgtable_levels - 1);
|
||||
uint64_t pgd, *ptep;
|
||||
u64 pgd, *ptep;
|
||||
|
||||
if (!vm->mmu.pgd_created)
|
||||
return;
|
||||
@@ -298,7 +297,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
|
||||
{
|
||||
struct kvm_vcpu_init default_init = { .target = -1, };
|
||||
struct kvm_vm *vm = vcpu->vm;
|
||||
uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
|
||||
u64 sctlr_el1, tcr_el1, ttbr0_el1;
|
||||
|
||||
if (!init) {
|
||||
kvm_get_default_vcpu_target(vm, &default_init);
|
||||
@@ -397,9 +396,9 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
|
||||
HCR_EL2_RW | HCR_EL2_TGE | HCR_EL2_E2H);
|
||||
}
|
||||
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
|
||||
{
|
||||
uint64_t pstate, pc;
|
||||
u64 pstate, pc;
|
||||
|
||||
pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate));
|
||||
pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
|
||||
@@ -410,29 +409,29 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
|
||||
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
{
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (u64)guest_code);
|
||||
}
|
||||
|
||||
static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
|
||||
struct kvm_vcpu_init *init)
|
||||
{
|
||||
size_t stack_size;
|
||||
uint64_t stack_vaddr;
|
||||
gva_t stack_gva;
|
||||
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
|
||||
|
||||
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
|
||||
vm->page_size;
|
||||
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
|
||||
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
stack_gva = __vm_alloc(vm, stack_size,
|
||||
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
|
||||
aarch64_vcpu_setup(vcpu, init);
|
||||
|
||||
vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_vaddr + stack_size);
|
||||
vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_SP_EL1), stack_gva + stack_size);
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, u32 vcpu_id,
|
||||
struct kvm_vcpu_init *init, void *guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
|
||||
@@ -442,7 +441,7 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
return __aarch64_vcpu_add(vm, vcpu_id, NULL);
|
||||
}
|
||||
@@ -459,13 +458,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
|
||||
va_arg(ap, uint64_t));
|
||||
va_arg(ap, u64));
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
|
||||
void kvm_exit_unexpected_exception(int vector, u64 ec, bool valid_ec)
|
||||
{
|
||||
ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec);
|
||||
while (1)
|
||||
@@ -498,7 +497,7 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
extern char vectors;
|
||||
|
||||
vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (uint64_t)&vectors);
|
||||
vcpu_set_reg(vcpu, ctxt_reg_alias(vcpu, SYS_VBAR_EL1), (u64)&vectors);
|
||||
}
|
||||
|
||||
void route_exception(struct ex_regs *regs, int vector)
|
||||
@@ -536,10 +535,10 @@ void route_exception(struct ex_regs *regs, int vector)
|
||||
|
||||
void vm_init_descriptor_tables(struct kvm_vm *vm)
|
||||
{
|
||||
vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
|
||||
vm->page_size, MEM_REGION_DATA);
|
||||
vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
|
||||
MEM_REGION_DATA);
|
||||
|
||||
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
|
||||
*(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
|
||||
}
|
||||
|
||||
void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec,
|
||||
@@ -563,13 +562,13 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
handlers->exception_handlers[vector][0] = handler;
|
||||
}
|
||||
|
||||
uint32_t guest_get_vcpuid(void)
|
||||
u32 guest_get_vcpuid(void)
|
||||
{
|
||||
return read_sysreg(tpidr_el1);
|
||||
}
|
||||
|
||||
static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
|
||||
uint32_t not_sup_val, uint32_t ipa52_min_val)
|
||||
static u32 max_ipa_for_page_size(u32 vm_ipa, u32 gran,
|
||||
u32 not_sup_val, u32 ipa52_min_val)
|
||||
{
|
||||
if (gran == not_sup_val)
|
||||
return 0;
|
||||
@@ -579,16 +578,16 @@ static uint32_t max_ipa_for_page_size(uint32_t vm_ipa, uint32_t gran,
|
||||
return min(vm_ipa, 48U);
|
||||
}
|
||||
|
||||
void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
|
||||
uint32_t *ipa16k, uint32_t *ipa64k)
|
||||
void aarch64_get_supported_page_sizes(u32 ipa, u32 *ipa4k,
|
||||
u32 *ipa16k, u32 *ipa64k)
|
||||
{
|
||||
struct kvm_vcpu_init preferred_init;
|
||||
int kvm_fd, vm_fd, vcpu_fd, err;
|
||||
uint64_t val;
|
||||
uint32_t gran;
|
||||
u64 val;
|
||||
u32 gran;
|
||||
struct kvm_one_reg reg = {
|
||||
.id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
|
||||
.addr = (uint64_t)&val,
|
||||
.addr = (u64)&val,
|
||||
};
|
||||
|
||||
kvm_fd = open_kvm_dev_path_or_exit();
|
||||
@@ -646,17 +645,17 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
|
||||
: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
|
||||
|
||||
|
||||
void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
|
||||
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
|
||||
uint64_t arg6, struct arm_smccc_res *res)
|
||||
void smccc_hvc(u32 function_id, u64 arg0, u64 arg1,
|
||||
u64 arg2, u64 arg3, u64 arg4, u64 arg5,
|
||||
u64 arg6, struct arm_smccc_res *res)
|
||||
{
|
||||
__smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
|
||||
arg6, res);
|
||||
}
|
||||
|
||||
void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
|
||||
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
|
||||
uint64_t arg6, struct arm_smccc_res *res)
|
||||
void smccc_smc(u32 function_id, u64 arg0, u64 arg1,
|
||||
u64 arg2, u64 arg3, u64 arg4, u64 arg5,
|
||||
u64 arg6, struct arm_smccc_res *res)
|
||||
{
|
||||
__smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5,
|
||||
arg6, res);
|
||||
@@ -671,7 +670,7 @@ void kvm_selftest_arch_init(void)
|
||||
guest_modes_append_default();
|
||||
}
|
||||
|
||||
void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
|
||||
void vm_populate_gva_bitmap(struct kvm_vm *vm)
|
||||
{
|
||||
/*
|
||||
* arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
|
||||
|
||||
@@ -6,17 +6,17 @@
|
||||
*/
|
||||
#include "kvm_util.h"
|
||||
|
||||
vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
gva_t *ucall_exit_mmio_addr;
|
||||
|
||||
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
|
||||
{
|
||||
vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
|
||||
gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
|
||||
|
||||
virt_map(vm, mmio_gva, mmio_gpa, 1);
|
||||
|
||||
vm->ucall_mmio_addr = mmio_gpa;
|
||||
|
||||
write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
|
||||
write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva);
|
||||
}
|
||||
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
@@ -25,9 +25,9 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (run->exit_reason == KVM_EXIT_MMIO &&
|
||||
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
|
||||
TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
|
||||
TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64),
|
||||
"Unexpected ucall exit mmio address access");
|
||||
return (void *)(*((uint64_t *)run->mmio.data));
|
||||
return (void *)(*((u64 *)run->mmio.data));
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
@@ -41,10 +41,10 @@ bool kvm_supports_vgic_v3(void)
|
||||
* redistributor regions of the guest. Since it depends on the number of
|
||||
* vCPUs for the VM, it must be called after all the vCPUs have been created.
|
||||
*/
|
||||
int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
|
||||
int __vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs)
|
||||
{
|
||||
int gic_fd;
|
||||
uint64_t attr;
|
||||
u64 attr;
|
||||
unsigned int nr_gic_pages;
|
||||
|
||||
/* Distributor setup */
|
||||
@@ -77,7 +77,7 @@ void __vgic_v3_init(int fd)
|
||||
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
|
||||
}
|
||||
|
||||
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
|
||||
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, u32 nr_irqs)
|
||||
{
|
||||
unsigned int nr_vcpus_created = 0;
|
||||
struct list_head *iter;
|
||||
@@ -104,11 +104,11 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs)
|
||||
}
|
||||
|
||||
/* should only work for level sensitive interrupts */
|
||||
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
|
||||
int _kvm_irq_set_level_info(int gic_fd, u32 intid, int level)
|
||||
{
|
||||
uint64_t attr = 32 * (intid / 32);
|
||||
uint64_t index = intid % 32;
|
||||
uint64_t val;
|
||||
u64 attr = 32 * (intid / 32);
|
||||
u64 index = intid % 32;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
|
||||
@@ -122,16 +122,16 @@ int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
|
||||
void kvm_irq_set_level_info(int gic_fd, u32 intid, int level)
|
||||
{
|
||||
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
|
||||
|
||||
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret));
|
||||
}
|
||||
|
||||
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
|
||||
int _kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level)
|
||||
{
|
||||
uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
|
||||
u32 irq = intid & KVM_ARM_IRQ_NUM_MASK;
|
||||
|
||||
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
|
||||
"doesn't allow injecting SGIs. There's no mask for it.");
|
||||
@@ -144,23 +144,23 @@ int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
|
||||
return _kvm_irq_line(vm, irq, level);
|
||||
}
|
||||
|
||||
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
|
||||
void kvm_arm_irq_line(struct kvm_vm *vm, u32 intid, int level)
|
||||
{
|
||||
int ret = _kvm_arm_irq_line(vm, intid, level);
|
||||
|
||||
TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
|
||||
}
|
||||
|
||||
static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
|
||||
uint64_t reg_off)
|
||||
static void vgic_poke_irq(int gic_fd, u32 intid, struct kvm_vcpu *vcpu,
|
||||
u64 reg_off)
|
||||
{
|
||||
uint64_t reg = intid / 32;
|
||||
uint64_t index = intid % 32;
|
||||
uint64_t attr = reg_off + reg * 4;
|
||||
uint64_t val;
|
||||
u64 reg = intid / 32;
|
||||
u64 index = intid % 32;
|
||||
u64 attr = reg_off + reg * 4;
|
||||
u64 val;
|
||||
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
|
||||
|
||||
uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
|
||||
u32 group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
|
||||
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
|
||||
|
||||
if (intid_is_private) {
|
||||
@@ -183,12 +183,12 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
|
||||
kvm_device_attr_set(gic_fd, group, attr, &val);
|
||||
}
|
||||
|
||||
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
|
||||
void kvm_irq_write_ispendr(int gic_fd, u32 intid, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
|
||||
}
|
||||
|
||||
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
|
||||
void kvm_irq_write_isactiver(int gic_fd, u32 intid, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
|
||||
}
|
||||
|
||||
@@ -156,21 +156,20 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
|
||||
TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment "
|
||||
"memsize of 0,\n"
|
||||
" phdr index: %u p_memsz: 0x%" PRIx64,
|
||||
n1, (uint64_t) phdr.p_memsz);
|
||||
vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
|
||||
vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
|
||||
n1, (u64)phdr.p_memsz);
|
||||
gva_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size);
|
||||
gva_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1;
|
||||
seg_vend |= vm->page_size - 1;
|
||||
size_t seg_size = seg_vend - seg_vstart + 1;
|
||||
|
||||
vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
|
||||
MEM_REGION_CODE);
|
||||
TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
|
||||
gva_t gva = __vm_alloc(vm, seg_size, seg_vstart, MEM_REGION_CODE);
|
||||
TEST_ASSERT(gva == seg_vstart, "Unable to allocate "
|
||||
"virtual memory for segment at requested min addr,\n"
|
||||
" segment idx: %u\n"
|
||||
" seg_vstart: 0x%lx\n"
|
||||
" vaddr: 0x%lx",
|
||||
n1, seg_vstart, vaddr);
|
||||
memset(addr_gva2hva(vm, vaddr), 0, seg_size);
|
||||
" gva: 0x%lx",
|
||||
n1, seg_vstart, gva);
|
||||
memset(addr_gva2hva(vm, gva), 0, seg_size);
|
||||
/* TODO(lhuemill): Set permissions of each memory segment
|
||||
* based on the least-significant 3 bits of phdr.p_flags.
|
||||
*/
|
||||
|
||||
@@ -20,7 +20,7 @@ void guest_modes_append_default(void)
|
||||
#ifdef __aarch64__
|
||||
{
|
||||
unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
|
||||
uint32_t ipa4k, ipa16k, ipa64k;
|
||||
u32 ipa4k, ipa16k, ipa64k;
|
||||
int i;
|
||||
|
||||
aarch64_get_supported_page_sizes(limit, &ipa4k, &ipa16k, &ipa64k);
|
||||
|
||||
@@ -35,8 +35,8 @@ static int skip_atoi(const char **s)
|
||||
({ \
|
||||
int __res; \
|
||||
\
|
||||
__res = ((uint64_t) n) % (uint32_t) base; \
|
||||
n = ((uint64_t) n) / (uint32_t) base; \
|
||||
__res = ((u64)n) % (u32)base; \
|
||||
n = ((u64)n) / (u32)base; \
|
||||
__res; \
|
||||
})
|
||||
|
||||
@@ -119,7 +119,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
|
||||
{
|
||||
char *str, *end;
|
||||
const char *s;
|
||||
uint64_t num;
|
||||
u64 num;
|
||||
int i, base;
|
||||
int len;
|
||||
|
||||
@@ -216,7 +216,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
|
||||
while (--field_width > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
APPEND_BUFFER_SAFE(str, end,
|
||||
(uint8_t)va_arg(args, int));
|
||||
(u8)va_arg(args, int));
|
||||
while (--field_width > 0)
|
||||
APPEND_BUFFER_SAFE(str, end, ' ');
|
||||
continue;
|
||||
@@ -240,7 +240,7 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
|
||||
flags |= SPECIAL | SMALL | ZEROPAD;
|
||||
}
|
||||
str = number(str, end,
|
||||
(uint64_t)va_arg(args, void *), 16,
|
||||
(u64)va_arg(args, void *), 16,
|
||||
field_width, precision, flags);
|
||||
continue;
|
||||
|
||||
@@ -284,15 +284,15 @@ int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
|
||||
continue;
|
||||
}
|
||||
if (qualifier == 'l')
|
||||
num = va_arg(args, uint64_t);
|
||||
num = va_arg(args, u64);
|
||||
else if (qualifier == 'h') {
|
||||
num = (uint16_t)va_arg(args, int);
|
||||
num = (u16)va_arg(args, int);
|
||||
if (flags & SIGN)
|
||||
num = (int16_t)num;
|
||||
num = (s16)num;
|
||||
} else if (flags & SIGN)
|
||||
num = va_arg(args, int);
|
||||
else
|
||||
num = va_arg(args, uint32_t);
|
||||
num = va_arg(args, u32);
|
||||
str = number(str, end, num, base, field_width, precision, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
|
||||
#define KVM_UTIL_MIN_PFN 2
|
||||
|
||||
uint32_t guest_random_seed;
|
||||
u32 guest_random_seed;
|
||||
struct guest_random_state guest_rng;
|
||||
static uint32_t last_guest_seed;
|
||||
static u32 last_guest_seed;
|
||||
|
||||
static size_t vcpu_mmap_sz(void);
|
||||
|
||||
@@ -165,7 +165,7 @@ unsigned int kvm_check_cap(long cap)
|
||||
return (unsigned int)ret;
|
||||
}
|
||||
|
||||
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
|
||||
void vm_enable_dirty_ring(struct kvm_vm *vm, u32 ring_size)
|
||||
{
|
||||
if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
|
||||
vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
|
||||
@@ -189,7 +189,7 @@ static void vm_open(struct kvm_vm *vm)
|
||||
vm->stats.fd = -1;
|
||||
}
|
||||
|
||||
const char *vm_guest_mode_string(uint32_t i)
|
||||
const char *vm_guest_mode_string(u32 i)
|
||||
{
|
||||
static const char * const strings[] = {
|
||||
[VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
|
||||
@@ -267,7 +267,7 @@ _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params)
|
||||
* based on the MSB of the VA. On architectures with this behavior
|
||||
* the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
|
||||
*/
|
||||
__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
|
||||
__weak void vm_populate_gva_bitmap(struct kvm_vm *vm)
|
||||
{
|
||||
sparsebit_set_num(vm->vpages_valid,
|
||||
0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
|
||||
@@ -385,7 +385,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
|
||||
|
||||
/* Limit to VA-bit canonical virtual addresses. */
|
||||
vm->vpages_valid = sparsebit_alloc();
|
||||
vm_vaddr_populate_bitmap(vm);
|
||||
vm_populate_gva_bitmap(vm);
|
||||
|
||||
/* Limit physical addresses to PA-bits. */
|
||||
vm->max_gfn = vm_compute_max_gfn(vm);
|
||||
@@ -396,12 +396,12 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
|
||||
return vm;
|
||||
}
|
||||
|
||||
static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
|
||||
uint32_t nr_runnable_vcpus,
|
||||
uint64_t extra_mem_pages)
|
||||
static u64 vm_nr_pages_required(enum vm_guest_mode mode,
|
||||
u32 nr_runnable_vcpus,
|
||||
u64 extra_mem_pages)
|
||||
{
|
||||
uint64_t page_size = vm_guest_mode_params[mode].page_size;
|
||||
uint64_t nr_pages;
|
||||
u64 page_size = vm_guest_mode_params[mode].page_size;
|
||||
u64 nr_pages;
|
||||
|
||||
TEST_ASSERT(nr_runnable_vcpus,
|
||||
"Use vm_create_barebones() for VMs that _never_ have vCPUs");
|
||||
@@ -435,7 +435,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
|
||||
return vm_adjust_num_guest_pages(mode, nr_pages);
|
||||
}
|
||||
|
||||
void kvm_set_files_rlimit(uint32_t nr_vcpus)
|
||||
void kvm_set_files_rlimit(u32 nr_vcpus)
|
||||
{
|
||||
/*
|
||||
* Each vCPU will open two file descriptors: the vCPU itself and the
|
||||
@@ -476,10 +476,10 @@ static bool is_guest_memfd_required(struct vm_shape shape)
|
||||
#endif
|
||||
}
|
||||
|
||||
struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
|
||||
uint64_t nr_extra_pages)
|
||||
struct kvm_vm *__vm_create(struct vm_shape shape, u32 nr_runnable_vcpus,
|
||||
u64 nr_extra_pages)
|
||||
{
|
||||
uint64_t nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
|
||||
u64 nr_pages = vm_nr_pages_required(shape.mode, nr_runnable_vcpus,
|
||||
nr_extra_pages);
|
||||
struct userspace_mem_region *slot0;
|
||||
struct kvm_vm *vm;
|
||||
@@ -546,8 +546,8 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
|
||||
* extra_mem_pages is only used to calculate the maximum page table size,
|
||||
* no real memory allocation for non-slot0 memory in this function.
|
||||
*/
|
||||
struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
|
||||
uint64_t extra_mem_pages,
|
||||
struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, u32 nr_vcpus,
|
||||
u64 extra_mem_pages,
|
||||
void *guest_code, struct kvm_vcpu *vcpus[])
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
@@ -566,7 +566,7 @@ struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
|
||||
|
||||
struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
|
||||
struct kvm_vcpu **vcpu,
|
||||
uint64_t extra_mem_pages,
|
||||
u64 extra_mem_pages,
|
||||
void *guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpus[1];
|
||||
@@ -614,7 +614,7 @@ void kvm_vm_restart(struct kvm_vm *vmp)
|
||||
}
|
||||
|
||||
__weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
|
||||
uint32_t vcpu_id)
|
||||
u32 vcpu_id)
|
||||
{
|
||||
return __vm_vcpu_add(vm, vcpu_id);
|
||||
}
|
||||
@@ -636,9 +636,9 @@ int __pin_task_to_cpu(pthread_t task, int cpu)
|
||||
return pthread_setaffinity_np(task, sizeof(cpuset), &cpuset);
|
||||
}
|
||||
|
||||
static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
|
||||
static u32 parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
|
||||
{
|
||||
uint32_t pcpu = atoi_non_negative("CPU number", cpu_str);
|
||||
u32 pcpu = atoi_non_negative("CPU number", cpu_str);
|
||||
|
||||
TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask),
|
||||
"Not allowed to run on pCPU '%d', check cgroups?", pcpu);
|
||||
@@ -662,7 +662,7 @@ void kvm_print_vcpu_pinning_help(void)
|
||||
" (default: no pinning)\n", name, name);
|
||||
}
|
||||
|
||||
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
|
||||
void kvm_parse_vcpu_pinning(const char *pcpus_string, u32 vcpu_to_pcpu[],
|
||||
int nr_vcpus)
|
||||
{
|
||||
cpu_set_t allowed_mask;
|
||||
@@ -715,15 +715,15 @@ void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
|
||||
* region exists.
|
||||
*/
|
||||
static struct userspace_mem_region *
|
||||
userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
|
||||
userspace_mem_region_find(struct kvm_vm *vm, u64 start, u64 end)
|
||||
{
|
||||
struct rb_node *node;
|
||||
|
||||
for (node = vm->regions.gpa_tree.rb_node; node; ) {
|
||||
struct userspace_mem_region *region =
|
||||
container_of(node, struct userspace_mem_region, gpa_node);
|
||||
uint64_t existing_start = region->region.guest_phys_addr;
|
||||
uint64_t existing_end = region->region.guest_phys_addr
|
||||
u64 existing_start = region->region.guest_phys_addr;
|
||||
u64 existing_end = region->region.guest_phys_addr
|
||||
+ region->region.memory_size - 1;
|
||||
if (start <= existing_end && end >= existing_start)
|
||||
return region;
|
||||
@@ -918,8 +918,8 @@ static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
|
||||
}
|
||||
|
||||
|
||||
int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva)
|
||||
int __vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva)
|
||||
{
|
||||
struct kvm_userspace_memory_region region = {
|
||||
.slot = slot,
|
||||
@@ -932,8 +932,8 @@ int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags
|
||||
return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
|
||||
}
|
||||
|
||||
void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva)
|
||||
void vm_set_user_memory_region(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva)
|
||||
{
|
||||
int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
|
||||
|
||||
@@ -945,9 +945,9 @@ void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
__TEST_REQUIRE(kvm_has_cap(KVM_CAP_USER_MEMORY2), \
|
||||
"KVM selftests now require KVM_SET_USER_MEMORY_REGION2 (introduced in v6.8)")
|
||||
|
||||
int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva,
|
||||
uint32_t guest_memfd, uint64_t guest_memfd_offset)
|
||||
int __vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva,
|
||||
u32 guest_memfd, u64 guest_memfd_offset)
|
||||
{
|
||||
struct kvm_userspace_memory_region2 region = {
|
||||
.slot = slot,
|
||||
@@ -964,9 +964,9 @@ int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flag
|
||||
return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, ®ion);
|
||||
}
|
||||
|
||||
void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
|
||||
uint64_t gpa, uint64_t size, void *hva,
|
||||
uint32_t guest_memfd, uint64_t guest_memfd_offset)
|
||||
void vm_set_user_memory_region2(struct kvm_vm *vm, u32 slot, u32 flags,
|
||||
gpa_t gpa, u64 size, void *hva,
|
||||
u32 guest_memfd, u64 guest_memfd_offset)
|
||||
{
|
||||
int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
|
||||
guest_memfd, guest_memfd_offset);
|
||||
@@ -978,8 +978,8 @@ void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags
|
||||
|
||||
/* FIXME: This thing needs to be ripped apart and rewritten. */
|
||||
void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
uint64_t gpa, uint32_t slot, uint64_t npages, uint32_t flags,
|
||||
int guest_memfd, uint64_t guest_memfd_offset)
|
||||
gpa_t gpa, u32 slot, u64 npages, u32 flags,
|
||||
int guest_memfd, u64 guest_memfd_offset)
|
||||
{
|
||||
int ret;
|
||||
struct userspace_mem_region *region;
|
||||
@@ -1016,8 +1016,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
" requested gpa: 0x%lx npages: 0x%lx page_size: 0x%x\n"
|
||||
" existing gpa: 0x%lx size: 0x%lx",
|
||||
gpa, npages, vm->page_size,
|
||||
(uint64_t) region->region.guest_phys_addr,
|
||||
(uint64_t) region->region.memory_size);
|
||||
(u64)region->region.guest_phys_addr,
|
||||
(u64)region->region.memory_size);
|
||||
|
||||
/* Confirm no region with the requested slot already exists. */
|
||||
hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
|
||||
@@ -1027,11 +1027,11 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
|
||||
TEST_FAIL("A mem region with the requested slot "
|
||||
"already exists.\n"
|
||||
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
|
||||
" existing slot: %u paddr: 0x%lx size: 0x%lx",
|
||||
" requested slot: %u gpa: 0x%lx npages: 0x%lx\n"
|
||||
" existing slot: %u gpa: 0x%lx size: 0x%lx",
|
||||
slot, gpa, npages, region->region.slot,
|
||||
(uint64_t) region->region.guest_phys_addr,
|
||||
(uint64_t) region->region.memory_size);
|
||||
(u64)region->region.guest_phys_addr,
|
||||
(u64)region->region.memory_size);
|
||||
}
|
||||
|
||||
/* Allocate and initialize new mem region structure. */
|
||||
@@ -1085,7 +1085,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
|
||||
if (flags & KVM_MEM_GUEST_MEMFD) {
|
||||
if (guest_memfd < 0) {
|
||||
uint32_t guest_memfd_flags = 0;
|
||||
u32 guest_memfd_flags = 0;
|
||||
TEST_ASSERT(!guest_memfd_offset,
|
||||
"Offset must be zero when creating new guest_memfd");
|
||||
guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
|
||||
@@ -1141,8 +1141,7 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
|
||||
void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||
enum vm_mem_backing_src_type src_type,
|
||||
uint64_t gpa, uint32_t slot, uint64_t npages,
|
||||
uint32_t flags)
|
||||
gpa_t gpa, u32 slot, u64 npages, u32 flags)
|
||||
{
|
||||
vm_mem_add(vm, src_type, gpa, slot, npages, flags, -1, 0);
|
||||
}
|
||||
@@ -1163,7 +1162,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
|
||||
* memory slot ID).
|
||||
*/
|
||||
struct userspace_mem_region *
|
||||
memslot2region(struct kvm_vm *vm, uint32_t memslot)
|
||||
memslot2region(struct kvm_vm *vm, u32 memslot)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
|
||||
@@ -1194,7 +1193,7 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot)
|
||||
* Sets the flags of the memory region specified by the value of slot,
|
||||
* to the values given by flags.
|
||||
*/
|
||||
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
|
||||
void vm_mem_region_set_flags(struct kvm_vm *vm, u32 slot, u32 flags)
|
||||
{
|
||||
int ret;
|
||||
struct userspace_mem_region *region;
|
||||
@@ -1210,7 +1209,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
|
||||
ret, errno, slot, flags);
|
||||
}
|
||||
|
||||
void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot)
|
||||
void vm_mem_region_reload(struct kvm_vm *vm, u32 slot)
|
||||
{
|
||||
struct userspace_mem_region *region = memslot2region(vm, slot);
|
||||
struct kvm_userspace_memory_region2 tmp = region->region;
|
||||
@@ -1234,7 +1233,7 @@ void vm_mem_region_reload(struct kvm_vm *vm, uint32_t slot)
|
||||
*
|
||||
* Change the gpa of a memory region.
|
||||
*/
|
||||
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
|
||||
void vm_mem_region_move(struct kvm_vm *vm, u32 slot, u64 new_gpa)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
int ret;
|
||||
@@ -1263,7 +1262,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
|
||||
*
|
||||
* Delete a memory region.
|
||||
*/
|
||||
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
|
||||
void vm_mem_region_delete(struct kvm_vm *vm, u32 slot)
|
||||
{
|
||||
struct userspace_mem_region *region = memslot2region(vm, slot);
|
||||
|
||||
@@ -1273,18 +1272,18 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
|
||||
__vm_mem_region_delete(vm, region);
|
||||
}
|
||||
|
||||
void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
|
||||
void vm_guest_mem_fallocate(struct kvm_vm *vm, u64 base, u64 size,
|
||||
bool punch_hole)
|
||||
{
|
||||
const int mode = FALLOC_FL_KEEP_SIZE | (punch_hole ? FALLOC_FL_PUNCH_HOLE : 0);
|
||||
struct userspace_mem_region *region;
|
||||
uint64_t end = base + size;
|
||||
uint64_t gpa, len;
|
||||
u64 end = base + size;
|
||||
gpa_t gpa, len;
|
||||
off_t fd_offset;
|
||||
int ret;
|
||||
|
||||
for (gpa = base; gpa < end; gpa += len) {
|
||||
uint64_t offset;
|
||||
u64 offset;
|
||||
|
||||
region = userspace_mem_region_find(vm, gpa, gpa);
|
||||
TEST_ASSERT(region && region->region.flags & KVM_MEM_GUEST_MEMFD,
|
||||
@@ -1292,7 +1291,7 @@ void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
|
||||
|
||||
offset = gpa - region->region.guest_phys_addr;
|
||||
fd_offset = region->region.guest_memfd_offset + offset;
|
||||
len = min_t(uint64_t, end - gpa, region->region.memory_size - offset);
|
||||
len = min_t(u64, end - gpa, region->region.memory_size - offset);
|
||||
|
||||
ret = fallocate(region->region.guest_memfd, mode, fd_offset, len);
|
||||
TEST_ASSERT(!ret, "fallocate() failed to %s at %lx (len = %lu), fd = %d, mode = %x, offset = %lx",
|
||||
@@ -1317,7 +1316,7 @@ static size_t vcpu_mmap_sz(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
static bool vcpu_exists(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
@@ -1333,7 +1332,7 @@ static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
* Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
|
||||
* No additional vCPU setup is done. Returns the vCPU.
|
||||
*/
|
||||
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
@@ -1367,33 +1366,18 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
}
|
||||
|
||||
/*
|
||||
* VM Virtual Address Unused Gap
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* sz - Size (bytes)
|
||||
* vaddr_min - Minimum Virtual Address
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return:
|
||||
* Lowest virtual address at or above vaddr_min, with at least
|
||||
* sz unused bytes. TEST_ASSERT failure if no area of at least
|
||||
* size sz is available.
|
||||
*
|
||||
* Within the VM specified by vm, locates the lowest starting virtual
|
||||
* address >= vaddr_min, that has at least sz unallocated bytes. A
|
||||
* Within the VM specified by @vm, locates the lowest starting guest virtual
|
||||
* address >= @min_gva, that has at least @sz unallocated bytes. A
|
||||
* TEST_ASSERT failure occurs for invalid input or no area of at least
|
||||
* sz unallocated bytes >= vaddr_min is available.
|
||||
* @sz unallocated bytes >= @min_gva is available.
|
||||
*/
|
||||
vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
|
||||
vm_vaddr_t vaddr_min)
|
||||
gva_t vm_unused_gva_gap(struct kvm_vm *vm, size_t sz, gva_t min_gva)
|
||||
{
|
||||
uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
|
||||
u64 pages = (sz + vm->page_size - 1) >> vm->page_shift;
|
||||
|
||||
/* Determine lowest permitted virtual page index. */
|
||||
uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
|
||||
if ((pgidx_start * vm->page_size) < vaddr_min)
|
||||
u64 pgidx_start = (min_gva + vm->page_size - 1) >> vm->page_shift;
|
||||
if ((pgidx_start * vm->page_size) < min_gva)
|
||||
goto no_va_found;
|
||||
|
||||
/* Loop over section with enough valid virtual page indexes. */
|
||||
@@ -1430,7 +1414,7 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
|
||||
} while (pgidx_start != 0);
|
||||
|
||||
no_va_found:
|
||||
TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
|
||||
TEST_FAIL("No gva of specified pages available, pages: 0x%lx", pages);
|
||||
|
||||
/* NOT REACHED */
|
||||
return -1;
|
||||
@@ -1452,145 +1436,91 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
|
||||
return pgidx_start * vm->page_size;
|
||||
}
|
||||
|
||||
static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
|
||||
vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type,
|
||||
bool protected)
|
||||
static gva_t ____vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
|
||||
enum kvm_mem_region_type type, bool protected)
|
||||
{
|
||||
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
|
||||
u64 pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
|
||||
|
||||
virt_pgd_alloc(vm);
|
||||
vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages,
|
||||
KVM_UTIL_MIN_PFN * vm->page_size,
|
||||
vm->memslots[type], protected);
|
||||
gpa_t gpa = __vm_phy_pages_alloc(vm, pages,
|
||||
KVM_UTIL_MIN_PFN * vm->page_size,
|
||||
vm->memslots[type], protected);
|
||||
|
||||
/*
|
||||
* Find an unused range of virtual page addresses of at least
|
||||
* pages in length.
|
||||
*/
|
||||
vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
|
||||
gva_t gva_start = vm_unused_gva_gap(vm, sz, min_gva);
|
||||
|
||||
/* Map the virtual pages. */
|
||||
for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
|
||||
pages--, vaddr += vm->page_size, paddr += vm->page_size) {
|
||||
for (gva_t gva = gva_start; pages > 0;
|
||||
pages--, gva += vm->page_size, gpa += vm->page_size) {
|
||||
|
||||
virt_pg_map(vm, vaddr, paddr);
|
||||
virt_pg_map(vm, gva, gpa);
|
||||
}
|
||||
|
||||
return vaddr_start;
|
||||
return gva_start;
|
||||
}
|
||||
|
||||
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type)
|
||||
gva_t __vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva,
|
||||
enum kvm_mem_region_type type)
|
||||
{
|
||||
return ____vm_vaddr_alloc(vm, sz, vaddr_min, type,
|
||||
vm_arch_has_protected_memory(vm));
|
||||
return ____vm_alloc(vm, sz, min_gva, type,
|
||||
vm_arch_has_protected_memory(vm));
|
||||
}
|
||||
|
||||
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
|
||||
vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type)
|
||||
gva_t vm_alloc_shared(struct kvm_vm *vm, size_t sz, gva_t min_gva,
|
||||
enum kvm_mem_region_type type)
|
||||
{
|
||||
return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false);
|
||||
return ____vm_alloc(vm, sz, min_gva, type, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM Virtual Address Allocate
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* sz - Size in bytes
|
||||
* vaddr_min - Minimum starting virtual address
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return:
|
||||
* Starting guest virtual address
|
||||
*
|
||||
* Allocates at least sz bytes within the virtual address space of the vm
|
||||
* given by vm. The allocated bytes are mapped to a virtual address >=
|
||||
* the address given by vaddr_min. Note that each allocation uses a
|
||||
* a unique set of pages, with the minimum real allocation being at least
|
||||
* a page. The allocated physical space comes from the TEST_DATA memory region.
|
||||
* Allocates at least sz bytes within the virtual address space of the VM
|
||||
* given by @vm. The allocated bytes are mapped to a virtual address >= the
|
||||
* address given by @min_gva. Note that each allocation uses a a unique set
|
||||
* of pages, with the minimum real allocation being at least a page. The
|
||||
* allocated physical space comes from the TEST_DATA memory region.
|
||||
*/
|
||||
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
|
||||
gva_t vm_alloc(struct kvm_vm *vm, size_t sz, gva_t min_gva)
|
||||
{
|
||||
return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
|
||||
return __vm_alloc(vm, sz, min_gva, MEM_REGION_TEST_DATA);
|
||||
}
|
||||
|
||||
gva_t vm_alloc_pages(struct kvm_vm *vm, int nr_pages)
|
||||
{
|
||||
return vm_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
|
||||
}
|
||||
|
||||
gva_t __vm_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
|
||||
{
|
||||
return __vm_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
|
||||
}
|
||||
|
||||
gva_t vm_alloc_page(struct kvm_vm *vm)
|
||||
{
|
||||
return vm_alloc_pages(vm, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM Virtual Address Allocate Pages
|
||||
* Map a range of VM virtual address to the VM's physical address.
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return:
|
||||
* Starting guest virtual address
|
||||
*
|
||||
* Allocates at least N system pages worth of bytes within the virtual address
|
||||
* space of the vm.
|
||||
* Within the VM given by @vm, creates a virtual translation for @npages
|
||||
* starting at @gva to the page range starting at @gpa.
|
||||
*/
|
||||
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
|
||||
{
|
||||
return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
|
||||
}
|
||||
|
||||
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
|
||||
{
|
||||
return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM Virtual Address Allocate Page
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return:
|
||||
* Starting guest virtual address
|
||||
*
|
||||
* Allocates at least one system page worth of bytes within the virtual address
|
||||
* space of the vm.
|
||||
*/
|
||||
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
|
||||
{
|
||||
return vm_vaddr_alloc_pages(vm, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a range of VM virtual address to the VM's physical address
|
||||
*
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vaddr - Virtuall address to map
|
||||
* paddr - VM Physical Address
|
||||
* npages - The number of pages to map
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
* Return: None
|
||||
*
|
||||
* Within the VM given by @vm, creates a virtual translation for
|
||||
* @npages starting at @vaddr to the page range starting at @paddr.
|
||||
*/
|
||||
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
unsigned int npages)
|
||||
void virt_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa, unsigned int npages)
|
||||
{
|
||||
size_t page_size = vm->page_size;
|
||||
size_t size = npages * page_size;
|
||||
|
||||
TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
|
||||
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
|
||||
TEST_ASSERT(gva + size > gva, "Vaddr overflow");
|
||||
TEST_ASSERT(gpa + size > gpa, "Paddr overflow");
|
||||
|
||||
while (npages--) {
|
||||
virt_pg_map(vm, vaddr, paddr);
|
||||
virt_pg_map(vm, gva, gpa);
|
||||
|
||||
vaddr += page_size;
|
||||
paddr += page_size;
|
||||
gva += page_size;
|
||||
gpa += page_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1611,7 +1541,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
* address providing the memory to the vm physical address is returned.
|
||||
* A TEST_ASSERT failure occurs if no region containing gpa exists.
|
||||
*/
|
||||
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
void *addr_gpa2hva(struct kvm_vm *vm, gpa_t gpa)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
|
||||
@@ -1644,7 +1574,7 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
* VM physical address is returned. A TEST_ASSERT failure occurs if no
|
||||
* region containing hva exists.
|
||||
*/
|
||||
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
|
||||
gpa_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
|
||||
{
|
||||
struct rb_node *node;
|
||||
|
||||
@@ -1655,7 +1585,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
|
||||
if (hva >= region->host_mem) {
|
||||
if (hva <= (region->host_mem
|
||||
+ region->region.memory_size - 1))
|
||||
return (vm_paddr_t)((uintptr_t)
|
||||
return (gpa_t)((uintptr_t)
|
||||
region->region.guest_phys_addr
|
||||
+ (hva - (uintptr_t)region->host_mem));
|
||||
|
||||
@@ -1687,7 +1617,7 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
|
||||
* memory without mapping said memory in the guest's address space. And, for
|
||||
* userfaultfd-based demand paging, to do so without triggering userfaults.
|
||||
*/
|
||||
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
void *addr_gpa2alias(struct kvm_vm *vm, gpa_t gpa)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
uintptr_t offset;
|
||||
@@ -1781,8 +1711,8 @@ struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
|
||||
|
||||
void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint32_t page_size = getpagesize();
|
||||
uint32_t size = vcpu->vm->dirty_ring_size;
|
||||
u32 page_size = getpagesize();
|
||||
u32 size = vcpu->vm->dirty_ring_size;
|
||||
|
||||
TEST_ASSERT(size > 0, "Should enable dirty ring first");
|
||||
|
||||
@@ -1811,7 +1741,7 @@ void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
|
||||
* Device Ioctl
|
||||
*/
|
||||
|
||||
int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
|
||||
int __kvm_has_device_attr(int dev_fd, u32 group, u64 attr)
|
||||
{
|
||||
struct kvm_device_attr attribute = {
|
||||
.group = group,
|
||||
@@ -1822,7 +1752,7 @@ int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
|
||||
return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
|
||||
}
|
||||
|
||||
int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
|
||||
int __kvm_test_create_device(struct kvm_vm *vm, u64 type)
|
||||
{
|
||||
struct kvm_create_device create_dev = {
|
||||
.type = type,
|
||||
@@ -1832,7 +1762,7 @@ int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
|
||||
return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
|
||||
}
|
||||
|
||||
int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
|
||||
int __kvm_create_device(struct kvm_vm *vm, u64 type)
|
||||
{
|
||||
struct kvm_create_device create_dev = {
|
||||
.type = type,
|
||||
@@ -1846,7 +1776,7 @@ int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
|
||||
return err ? : create_dev.fd;
|
||||
}
|
||||
|
||||
int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
|
||||
int __kvm_device_attr_get(int dev_fd, u32 group, u64 attr, void *val)
|
||||
{
|
||||
struct kvm_device_attr kvmattr = {
|
||||
.group = group,
|
||||
@@ -1858,7 +1788,7 @@ int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
|
||||
return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
|
||||
}
|
||||
|
||||
int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
|
||||
int __kvm_device_attr_set(int dev_fd, u32 group, u64 attr, void *val)
|
||||
{
|
||||
struct kvm_device_attr kvmattr = {
|
||||
.group = group,
|
||||
@@ -1874,7 +1804,7 @@ int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
|
||||
* IRQ related functions.
|
||||
*/
|
||||
|
||||
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
|
||||
int _kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
|
||||
{
|
||||
struct kvm_irq_level irq_level = {
|
||||
.irq = irq,
|
||||
@@ -1884,7 +1814,7 @@ int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
|
||||
return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
|
||||
}
|
||||
|
||||
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
|
||||
void kvm_irq_line(struct kvm_vm *vm, u32 irq, int level)
|
||||
{
|
||||
int ret = _kvm_irq_line(vm, irq, level);
|
||||
|
||||
@@ -1906,7 +1836,7 @@ struct kvm_irq_routing *kvm_gsi_routing_create(void)
|
||||
}
|
||||
|
||||
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
|
||||
uint32_t gsi, uint32_t pin)
|
||||
u32 gsi, u32 pin)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -1956,7 +1886,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
|
||||
* Dumps the current state of the VM given by vm, to the FILE stream
|
||||
* given by stream.
|
||||
*/
|
||||
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
void vm_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
|
||||
{
|
||||
int ctr;
|
||||
struct userspace_mem_region *region;
|
||||
@@ -1969,8 +1899,8 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
|
||||
fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
|
||||
"host_virt: %p\n", indent + 2, "",
|
||||
(uint64_t) region->region.guest_phys_addr,
|
||||
(uint64_t) region->region.memory_size,
|
||||
(u64)region->region.guest_phys_addr,
|
||||
(u64)region->region.memory_size,
|
||||
region->host_mem);
|
||||
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
|
||||
sparsebit_dump(stream, region->unused_phy_pages, 0);
|
||||
@@ -2077,7 +2007,7 @@ const char *exit_reason_str(unsigned int exit_reason)
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* num - number of pages
|
||||
* paddr_min - Physical address minimum
|
||||
* min_gpa - Physical address minimum
|
||||
* memslot - Memory region to allocate page from
|
||||
* protected - True if the pages will be used as protected/private memory
|
||||
*
|
||||
@@ -2087,29 +2017,29 @@ const char *exit_reason_str(unsigned int exit_reason)
|
||||
* Starting physical address
|
||||
*
|
||||
* Within the VM specified by vm, locates a range of available physical
|
||||
* pages at or above paddr_min. If found, the pages are marked as in use
|
||||
* pages at or above min_gpa. If found, the pages are marked as in use
|
||||
* and their base address is returned. A TEST_ASSERT failure occurs if
|
||||
* not enough pages are available at or above paddr_min.
|
||||
* not enough pages are available at or above min_gpa.
|
||||
*/
|
||||
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot,
|
||||
bool protected)
|
||||
gpa_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
gpa_t min_gpa, u32 memslot,
|
||||
bool protected)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
sparsebit_idx_t pg, base;
|
||||
|
||||
TEST_ASSERT(num > 0, "Must allocate at least one page");
|
||||
|
||||
TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
|
||||
TEST_ASSERT((min_gpa % vm->page_size) == 0, "Min physical address "
|
||||
"not divisible by page size.\n"
|
||||
" paddr_min: 0x%lx page_size: 0x%x",
|
||||
paddr_min, vm->page_size);
|
||||
" min_gpa: 0x%lx page_size: 0x%x",
|
||||
min_gpa, vm->page_size);
|
||||
|
||||
region = memslot2region(vm, memslot);
|
||||
TEST_ASSERT(!protected || region->protected_phy_pages,
|
||||
"Region doesn't support protected memory");
|
||||
|
||||
base = pg = paddr_min >> vm->page_shift;
|
||||
base = pg = min_gpa >> vm->page_shift;
|
||||
do {
|
||||
for (; pg < base + num; ++pg) {
|
||||
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
|
||||
@@ -2121,8 +2051,8 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
|
||||
if (pg == 0) {
|
||||
fprintf(stderr, "No guest physical page available, "
|
||||
"paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
|
||||
paddr_min, vm->page_size, memslot);
|
||||
"min_gpa: 0x%lx page_size: 0x%x memslot: %u\n",
|
||||
min_gpa, vm->page_size, memslot);
|
||||
fputs("---- vm dump ----\n", stderr);
|
||||
vm_dump(stderr, vm, 2);
|
||||
abort();
|
||||
@@ -2137,13 +2067,12 @@ vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
return base * vm->page_size;
|
||||
}
|
||||
|
||||
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
|
||||
uint32_t memslot)
|
||||
gpa_t vm_phy_page_alloc(struct kvm_vm *vm, gpa_t min_gpa, u32 memslot)
|
||||
{
|
||||
return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
|
||||
return vm_phy_pages_alloc(vm, 1, min_gpa, memslot);
|
||||
}
|
||||
|
||||
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
|
||||
gpa_t vm_alloc_page_table(struct kvm_vm *vm)
|
||||
{
|
||||
return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
|
||||
vm->memslots[MEM_REGION_PT]);
|
||||
@@ -2161,7 +2090,7 @@ vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
|
||||
* Return:
|
||||
* Equivalent host virtual address
|
||||
*/
|
||||
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
void *addr_gva2hva(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
|
||||
}
|
||||
@@ -2259,7 +2188,7 @@ struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
|
||||
* Read the data values of a specified stat from the binary stats interface.
|
||||
*/
|
||||
void read_stat_data(int stats_fd, struct kvm_stats_header *header,
|
||||
struct kvm_stats_desc *desc, uint64_t *data,
|
||||
struct kvm_stats_desc *desc, u64 *data,
|
||||
size_t max_elements)
|
||||
{
|
||||
size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
|
||||
@@ -2280,7 +2209,7 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
|
||||
}
|
||||
|
||||
void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
|
||||
uint64_t *data, size_t max_elements)
|
||||
u64 *data, size_t max_elements)
|
||||
{
|
||||
struct kvm_stats_desc *desc;
|
||||
size_t size_desc;
|
||||
@@ -2357,7 +2286,7 @@ void __attribute((constructor)) kvm_selftest_init(void)
|
||||
kvm_selftest_arch_init();
|
||||
}
|
||||
|
||||
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
|
||||
bool vm_is_gpa_protected(struct kvm_vm *vm, gpa_t gpa)
|
||||
{
|
||||
sparsebit_idx_t pg = 0;
|
||||
struct userspace_mem_region *region;
|
||||
@@ -2365,10 +2294,10 @@ bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
|
||||
if (!vm_arch_has_protected_memory(vm))
|
||||
return false;
|
||||
|
||||
region = userspace_mem_region_find(vm, paddr, paddr);
|
||||
TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
|
||||
region = userspace_mem_region_find(vm, gpa, gpa);
|
||||
TEST_ASSERT(region, "No vm physical memory at 0x%lx", gpa);
|
||||
|
||||
pg = paddr >> vm->page_shift;
|
||||
pg = gpa >> vm->page_shift;
|
||||
return sparsebit_is_set(region->protected_phy_pages, pg);
|
||||
}
|
||||
|
||||
|
||||
@@ -12,32 +12,32 @@
|
||||
#define LOONGARCH_PAGE_TABLE_PHYS_MIN 0x200000
|
||||
#define LOONGARCH_GUEST_STACK_VADDR_MIN 0x200000
|
||||
|
||||
static vm_paddr_t invalid_pgtable[4];
|
||||
static vm_vaddr_t exception_handlers;
|
||||
static gpa_t invalid_pgtable[4];
|
||||
static gva_t exception_handlers;
|
||||
|
||||
static uint64_t virt_pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
|
||||
static u64 virt_pte_index(struct kvm_vm *vm, gva_t gva, int level)
|
||||
{
|
||||
unsigned int shift;
|
||||
uint64_t mask;
|
||||
u64 mask;
|
||||
|
||||
shift = level * (vm->page_shift - 3) + vm->page_shift;
|
||||
mask = (1UL << (vm->page_shift - 3)) - 1;
|
||||
return (gva >> shift) & mask;
|
||||
}
|
||||
|
||||
static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
|
||||
static u64 pte_addr(struct kvm_vm *vm, u64 entry)
|
||||
{
|
||||
return entry & ~((0x1UL << vm->page_shift) - 1);
|
||||
}
|
||||
|
||||
static uint64_t ptrs_per_pte(struct kvm_vm *vm)
|
||||
static u64 ptrs_per_pte(struct kvm_vm *vm)
|
||||
{
|
||||
return 1 << (vm->page_shift - 3);
|
||||
}
|
||||
|
||||
static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t child)
|
||||
static void virt_set_pgtable(struct kvm_vm *vm, gpa_t table, gpa_t child)
|
||||
{
|
||||
uint64_t *ptep;
|
||||
u64 *ptep;
|
||||
int i, ptrs_per_pte;
|
||||
|
||||
ptep = addr_gpa2hva(vm, table);
|
||||
@@ -49,7 +49,7 @@ static void virt_set_pgtable(struct kvm_vm *vm, vm_paddr_t table, vm_paddr_t chi
|
||||
void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
{
|
||||
int i;
|
||||
vm_paddr_t child, table;
|
||||
gpa_t child, table;
|
||||
|
||||
if (vm->mmu.pgd_created)
|
||||
return;
|
||||
@@ -67,16 +67,16 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
vm->mmu.pgd_created = true;
|
||||
}
|
||||
|
||||
static int virt_pte_none(uint64_t *ptep, int level)
|
||||
static int virt_pte_none(u64 *ptep, int level)
|
||||
{
|
||||
return *ptep == invalid_pgtable[level];
|
||||
}
|
||||
|
||||
static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
|
||||
static u64 *virt_populate_pte(struct kvm_vm *vm, gva_t gva, int alloc)
|
||||
{
|
||||
int level;
|
||||
uint64_t *ptep;
|
||||
vm_paddr_t child;
|
||||
u64 *ptep;
|
||||
gpa_t child;
|
||||
|
||||
if (!vm->mmu.pgd_created)
|
||||
goto unmapped_gva;
|
||||
@@ -106,43 +106,42 @@ static uint64_t *virt_populate_pte(struct kvm_vm *vm, vm_vaddr_t gva, int alloc)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
uint64_t *ptep;
|
||||
u64 *ptep;
|
||||
|
||||
ptep = virt_populate_pte(vm, gva, 0);
|
||||
TEST_ASSERT(*ptep != 0, "Virtual address vaddr: 0x%lx not mapped\n", gva);
|
||||
TEST_ASSERT(*ptep != 0, "Virtual address gva: 0x%lx not mapped\n", gva);
|
||||
|
||||
return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
|
||||
}
|
||||
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
|
||||
{
|
||||
uint32_t prot_bits;
|
||||
uint64_t *ptep;
|
||||
u32 prot_bits;
|
||||
u64 *ptep;
|
||||
|
||||
TEST_ASSERT((vaddr % vm->page_size) == 0,
|
||||
TEST_ASSERT((gva % vm->page_size) == 0,
|
||||
"Virtual address not on page boundary,\n"
|
||||
"vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
|
||||
(vaddr >> vm->page_shift)),
|
||||
"Invalid virtual address, vaddr: 0x%lx", vaddr);
|
||||
TEST_ASSERT((paddr % vm->page_size) == 0,
|
||||
"gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
|
||||
"Invalid virtual address, gva: 0x%lx", gva);
|
||||
TEST_ASSERT((gpa % vm->page_size) == 0,
|
||||
"Physical address not on page boundary,\n"
|
||||
"paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
|
||||
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
|
||||
"gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
|
||||
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
|
||||
"Physical address beyond maximum supported,\n"
|
||||
"paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
paddr, vm->max_gfn, vm->page_size);
|
||||
"gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
gpa, vm->max_gfn, vm->page_size);
|
||||
|
||||
ptep = virt_populate_pte(vm, vaddr, 1);
|
||||
ptep = virt_populate_pte(vm, gva, 1);
|
||||
prot_bits = _PAGE_PRESENT | __READABLE | __WRITEABLE | _CACHE_CC | _PAGE_USER;
|
||||
WRITE_ONCE(*ptep, paddr | prot_bits);
|
||||
WRITE_ONCE(*ptep, gpa | prot_bits);
|
||||
}
|
||||
|
||||
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
|
||||
static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent, u64 page, int level)
|
||||
{
|
||||
uint64_t pte, *ptep;
|
||||
u64 pte, *ptep;
|
||||
static const char * const type[] = { "pte", "pmd", "pud", "pgd"};
|
||||
|
||||
if (level < 0)
|
||||
@@ -158,7 +157,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
|
||||
}
|
||||
}
|
||||
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
|
||||
{
|
||||
int level;
|
||||
|
||||
@@ -169,7 +168,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
pte_dump(stream, vm, indent, vm->mmu.pgd, level);
|
||||
}
|
||||
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -206,8 +205,9 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
|
||||
LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
|
||||
vm->handlers = __vm_alloc(vm, sizeof(struct handlers),
|
||||
LOONGARCH_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
|
||||
addr = addr_gva2hva(vm, vm->handlers);
|
||||
memset(addr, 0, vm->page_size);
|
||||
@@ -223,7 +223,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector, handler_fn hand
|
||||
handlers->exception_handlers[vector] = handler;
|
||||
}
|
||||
|
||||
uint32_t guest_get_vcpuid(void)
|
||||
u32 guest_get_vcpuid(void)
|
||||
{
|
||||
return csr_read(LOONGARCH_CSR_CPUID);
|
||||
}
|
||||
@@ -241,36 +241,36 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
|
||||
|
||||
va_start(ap, num);
|
||||
for (i = 0; i < num; i++)
|
||||
regs.gpr[i + 4] = va_arg(ap, uint64_t);
|
||||
regs.gpr[i + 4] = va_arg(ap, u64);
|
||||
va_end(ap);
|
||||
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
}
|
||||
|
||||
static void loongarch_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
|
||||
static void loongarch_set_reg(struct kvm_vcpu *vcpu, u64 id, u64 val)
|
||||
{
|
||||
__vcpu_set_reg(vcpu, id, val);
|
||||
}
|
||||
|
||||
static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
|
||||
static void loongarch_set_cpucfg(struct kvm_vcpu *vcpu, u64 id, u64 val)
|
||||
{
|
||||
uint64_t cfgid;
|
||||
u64 cfgid;
|
||||
|
||||
cfgid = KVM_REG_LOONGARCH_CPUCFG | KVM_REG_SIZE_U64 | 8 * id;
|
||||
__vcpu_set_reg(vcpu, cfgid, val);
|
||||
}
|
||||
|
||||
static void loongarch_get_csr(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
|
||||
static void loongarch_get_csr(struct kvm_vcpu *vcpu, u64 id, void *addr)
|
||||
{
|
||||
uint64_t csrid;
|
||||
u64 csrid;
|
||||
|
||||
csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
|
||||
__vcpu_get_reg(vcpu, csrid, addr);
|
||||
}
|
||||
|
||||
static void loongarch_set_csr(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
|
||||
static void loongarch_set_csr(struct kvm_vcpu *vcpu, u64 id, u64 val)
|
||||
{
|
||||
uint64_t csrid;
|
||||
u64 csrid;
|
||||
|
||||
csrid = KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | 8 * id;
|
||||
__vcpu_set_reg(vcpu, csrid, val);
|
||||
@@ -354,8 +354,8 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
loongarch_set_csr(vcpu, LOONGARCH_CSR_STLBPGSIZE, PS_DEFAULT_SIZE);
|
||||
|
||||
/* LOONGARCH_CSR_KS1 is used for exception stack */
|
||||
val = __vm_vaddr_alloc(vm, vm->page_size,
|
||||
LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
|
||||
val = __vm_alloc(vm, vm->page_size, LOONGARCH_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
TEST_ASSERT(val != 0, "No memory for exception stack");
|
||||
val = val + vm->page_size;
|
||||
loongarch_set_csr(vcpu, LOONGARCH_CSR_KS1, val);
|
||||
@@ -369,23 +369,23 @@ void loongarch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
loongarch_set_csr(vcpu, LOONGARCH_CSR_TMID, vcpu->id);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
size_t stack_size;
|
||||
uint64_t stack_vaddr;
|
||||
u64 stack_gva;
|
||||
struct kvm_regs regs;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = __vm_vcpu_add(vm, vcpu_id);
|
||||
stack_size = vm->page_size;
|
||||
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
|
||||
LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
|
||||
TEST_ASSERT(stack_vaddr != 0, "No memory for vm stack");
|
||||
stack_gva = __vm_alloc(vm, stack_size,
|
||||
LOONGARCH_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
|
||||
TEST_ASSERT(stack_gva != 0, "No memory for vm stack");
|
||||
|
||||
loongarch_vcpu_setup(vcpu);
|
||||
/* Setup guest general purpose registers */
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
regs.gpr[3] = stack_vaddr + stack_size;
|
||||
regs.gpr[3] = stack_gva + stack_size;
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
|
||||
return vcpu;
|
||||
@@ -397,6 +397,6 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
|
||||
/* Setup guest PC register */
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
regs.pc = (uint64_t)guest_code;
|
||||
regs.pc = (u64)guest_code;
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
}
|
||||
|
||||
@@ -9,17 +9,17 @@
|
||||
* ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
|
||||
* VM), it must not be accessed from host code.
|
||||
*/
|
||||
vm_vaddr_t *ucall_exit_mmio_addr;
|
||||
gva_t *ucall_exit_mmio_addr;
|
||||
|
||||
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
void ucall_arch_init(struct kvm_vm *vm, gpa_t mmio_gpa)
|
||||
{
|
||||
vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
|
||||
gva_t mmio_gva = vm_unused_gva_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
|
||||
|
||||
virt_map(vm, mmio_gva, mmio_gpa, 1);
|
||||
|
||||
vm->ucall_mmio_addr = mmio_gpa;
|
||||
|
||||
write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
|
||||
write_guest_global(vm, ucall_exit_mmio_addr, (gva_t *)mmio_gva);
|
||||
}
|
||||
|
||||
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
@@ -28,10 +28,10 @@ void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (run->exit_reason == KVM_EXIT_MMIO &&
|
||||
run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) {
|
||||
TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t),
|
||||
TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(u64),
|
||||
"Unexpected ucall exit mmio address access");
|
||||
|
||||
return (void *)(*((uint64_t *)run->mmio.data));
|
||||
return (void *)(*((u64 *)run->mmio.data));
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
@@ -16,7 +16,7 @@ struct memstress_args memstress_args;
|
||||
* Guest virtual memory offset of the testing memory slot.
|
||||
* Must not conflict with identity mapped test code.
|
||||
*/
|
||||
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
static u64 guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||
|
||||
struct vcpu_thread {
|
||||
/* The index of the vCPU. */
|
||||
@@ -44,15 +44,15 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
|
||||
* Continuously write to the first 8 bytes of each page in the
|
||||
* specified region.
|
||||
*/
|
||||
void memstress_guest_code(uint32_t vcpu_idx)
|
||||
void memstress_guest_code(u32 vcpu_idx)
|
||||
{
|
||||
struct memstress_args *args = &memstress_args;
|
||||
struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
|
||||
struct guest_random_state rand_state;
|
||||
uint64_t gva;
|
||||
uint64_t pages;
|
||||
uint64_t addr;
|
||||
uint64_t page;
|
||||
gva_t gva;
|
||||
u64 pages;
|
||||
u64 addr;
|
||||
u64 page;
|
||||
int i;
|
||||
|
||||
rand_state = new_guest_random_state(guest_random_seed + vcpu_idx);
|
||||
@@ -76,9 +76,9 @@ void memstress_guest_code(uint32_t vcpu_idx)
|
||||
addr = gva + (page * args->guest_page_size);
|
||||
|
||||
if (__guest_random_bool(&rand_state, args->write_percent))
|
||||
*(uint64_t *)addr = 0x0123456789ABCDEF;
|
||||
*(u64 *)addr = 0x0123456789ABCDEF;
|
||||
else
|
||||
READ_ONCE(*(uint64_t *)addr);
|
||||
READ_ONCE(*(u64 *)addr);
|
||||
}
|
||||
|
||||
GUEST_SYNC(1);
|
||||
@@ -87,7 +87,7 @@ void memstress_guest_code(uint32_t vcpu_idx)
|
||||
|
||||
void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
|
||||
struct kvm_vcpu *vcpus[],
|
||||
uint64_t vcpu_memory_bytes,
|
||||
u64 vcpu_memory_bytes,
|
||||
bool partition_vcpu_memory_access)
|
||||
{
|
||||
struct memstress_args *args = &memstress_args;
|
||||
@@ -122,15 +122,15 @@ void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
|
||||
}
|
||||
|
||||
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
|
||||
uint64_t vcpu_memory_bytes, int slots,
|
||||
u64 vcpu_memory_bytes, int slots,
|
||||
enum vm_mem_backing_src_type backing_src,
|
||||
bool partition_vcpu_memory_access)
|
||||
{
|
||||
struct memstress_args *args = &memstress_args;
|
||||
struct kvm_vm *vm;
|
||||
uint64_t guest_num_pages, slot0_pages = 0;
|
||||
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
|
||||
uint64_t region_end_gfn;
|
||||
u64 guest_num_pages, slot0_pages = 0;
|
||||
u64 backing_src_pagesz = get_backing_src_pagesz(backing_src);
|
||||
u64 region_end_gfn;
|
||||
int i;
|
||||
|
||||
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
|
||||
@@ -202,8 +202,8 @@ struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
|
||||
|
||||
/* Add extra memory slots for testing */
|
||||
for (i = 0; i < slots; i++) {
|
||||
uint64_t region_pages = guest_num_pages / slots;
|
||||
vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
|
||||
u64 region_pages = guest_num_pages / slots;
|
||||
gpa_t region_start = args->gpa + region_pages * args->guest_page_size * i;
|
||||
|
||||
vm_userspace_mem_region_add(vm, backing_src, region_start,
|
||||
MEMSTRESS_MEM_SLOT_INDEX + i,
|
||||
@@ -232,7 +232,7 @@ void memstress_destroy_vm(struct kvm_vm *vm)
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
|
||||
void memstress_set_write_percent(struct kvm_vm *vm, u32 write_percent)
|
||||
{
|
||||
memstress_args.write_percent = write_percent;
|
||||
sync_global_to_guest(vm, memstress_args.write_percent);
|
||||
@@ -244,7 +244,7 @@ void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
|
||||
sync_global_to_guest(vm, memstress_args.random_access);
|
||||
}
|
||||
|
||||
uint64_t __weak memstress_nested_pages(int nr_vcpus)
|
||||
u64 __weak memstress_nested_pages(int nr_vcpus)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -349,7 +349,7 @@ void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int sl
|
||||
}
|
||||
|
||||
void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
|
||||
int slots, uint64_t pages_per_slot)
|
||||
int slots, u64 pages_per_slot)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -360,7 +360,7 @@ void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
|
||||
unsigned long **memstress_alloc_bitmaps(int slots, u64 pages_per_slot)
|
||||
{
|
||||
unsigned long **bitmaps;
|
||||
int i;
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
|
||||
#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000
|
||||
|
||||
static vm_vaddr_t exception_handlers;
|
||||
static gva_t exception_handlers;
|
||||
|
||||
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
|
||||
bool __vcpu_has_ext(struct kvm_vcpu *vcpu, u64 ext)
|
||||
{
|
||||
unsigned long value = 0;
|
||||
int ret;
|
||||
@@ -27,32 +27,32 @@ bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext)
|
||||
return !ret && !!value;
|
||||
}
|
||||
|
||||
static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
|
||||
static u64 pte_addr(struct kvm_vm *vm, u64 entry)
|
||||
{
|
||||
return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) <<
|
||||
PGTBL_PAGE_SIZE_SHIFT;
|
||||
}
|
||||
|
||||
static uint64_t ptrs_per_pte(struct kvm_vm *vm)
|
||||
static u64 ptrs_per_pte(struct kvm_vm *vm)
|
||||
{
|
||||
return PGTBL_PAGE_SIZE / sizeof(uint64_t);
|
||||
return PGTBL_PAGE_SIZE / sizeof(u64);
|
||||
}
|
||||
|
||||
static uint64_t pte_index_mask[] = {
|
||||
static u64 pte_index_mask[] = {
|
||||
PGTBL_L0_INDEX_MASK,
|
||||
PGTBL_L1_INDEX_MASK,
|
||||
PGTBL_L2_INDEX_MASK,
|
||||
PGTBL_L3_INDEX_MASK,
|
||||
};
|
||||
|
||||
static uint32_t pte_index_shift[] = {
|
||||
static u32 pte_index_shift[] = {
|
||||
PGTBL_L0_INDEX_SHIFT,
|
||||
PGTBL_L1_INDEX_SHIFT,
|
||||
PGTBL_L2_INDEX_SHIFT,
|
||||
PGTBL_L3_INDEX_SHIFT,
|
||||
};
|
||||
|
||||
static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
|
||||
static u64 pte_index(struct kvm_vm *vm, gva_t gva, int level)
|
||||
{
|
||||
TEST_ASSERT(level > -1,
|
||||
"Negative page table level (%d) not possible", level);
|
||||
@@ -75,26 +75,25 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
vm->mmu.pgd_created = true;
|
||||
}
|
||||
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
|
||||
{
|
||||
uint64_t *ptep, next_ppn;
|
||||
u64 *ptep, next_ppn;
|
||||
int level = vm->mmu.pgtable_levels - 1;
|
||||
|
||||
TEST_ASSERT((vaddr % vm->page_size) == 0,
|
||||
TEST_ASSERT((gva % vm->page_size) == 0,
|
||||
"Virtual address not on page boundary,\n"
|
||||
" vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
|
||||
(vaddr >> vm->page_shift)),
|
||||
"Invalid virtual address, vaddr: 0x%lx", vaddr);
|
||||
TEST_ASSERT((paddr % vm->page_size) == 0,
|
||||
" gva: 0x%lx vm->page_size: 0x%x", gva, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
|
||||
"Invalid virtual address, gva: 0x%lx", gva);
|
||||
TEST_ASSERT((gpa % vm->page_size) == 0,
|
||||
"Physical address not on page boundary,\n"
|
||||
" paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
|
||||
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
|
||||
" gpa: 0x%lx vm->page_size: 0x%x", gpa, vm->page_size);
|
||||
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
|
||||
"Physical address beyond maximum supported,\n"
|
||||
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
paddr, vm->max_gfn, vm->page_size);
|
||||
" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
gpa, vm->max_gfn, vm->page_size);
|
||||
|
||||
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, vaddr, level) * 8;
|
||||
ptep = addr_gpa2hva(vm, vm->mmu.pgd) + pte_index(vm, gva, level) * 8;
|
||||
if (!*ptep) {
|
||||
next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
|
||||
*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
|
||||
@@ -104,7 +103,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
|
||||
while (level > -1) {
|
||||
ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
|
||||
pte_index(vm, vaddr, level) * 8;
|
||||
pte_index(vm, gva, level) * 8;
|
||||
if (!*ptep && level > 0) {
|
||||
next_ppn = vm_alloc_page_table(vm) >>
|
||||
PGTBL_PAGE_SIZE_SHIFT;
|
||||
@@ -114,14 +113,14 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
level--;
|
||||
}
|
||||
|
||||
paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
|
||||
*ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
|
||||
gpa = gpa >> PGTBL_PAGE_SIZE_SHIFT;
|
||||
*ptep = (gpa << PGTBL_PTE_ADDR_SHIFT) |
|
||||
PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
|
||||
}
|
||||
|
||||
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
uint64_t *ptep;
|
||||
u64 *ptep;
|
||||
int level = vm->mmu.pgtable_levels - 1;
|
||||
|
||||
if (!vm->mmu.pgd_created)
|
||||
@@ -148,12 +147,12 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
|
||||
uint64_t page, int level)
|
||||
static void pte_dump(FILE *stream, struct kvm_vm *vm, u8 indent,
|
||||
u64 page, int level)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
static const char *const type[] = { "pte", "pmd", "pud", "p4d"};
|
||||
uint64_t pte, *ptep;
|
||||
u64 pte, *ptep;
|
||||
|
||||
if (level < 0)
|
||||
return;
|
||||
@@ -170,11 +169,11 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
|
||||
#endif
|
||||
}
|
||||
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
|
||||
{
|
||||
struct kvm_mmu *mmu = &vm->mmu;
|
||||
int level = mmu->pgtable_levels - 1;
|
||||
uint64_t pgd, *ptep;
|
||||
u64 pgd, *ptep;
|
||||
|
||||
if (!mmu->pgd_created)
|
||||
return;
|
||||
@@ -233,7 +232,7 @@ void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
|
||||
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(satp), satp);
|
||||
}
|
||||
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
|
||||
{
|
||||
struct kvm_riscv_core core;
|
||||
|
||||
@@ -311,20 +310,20 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
int r;
|
||||
size_t stack_size;
|
||||
unsigned long stack_vaddr;
|
||||
unsigned long stack_gva;
|
||||
unsigned long current_gp = 0;
|
||||
struct kvm_mp_state mps;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
|
||||
vm->page_size;
|
||||
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
|
||||
DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
stack_gva = __vm_alloc(vm, stack_size,
|
||||
DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
|
||||
vcpu = __vm_vcpu_add(vm, vcpu_id);
|
||||
riscv_vcpu_mmu_setup(vcpu);
|
||||
@@ -344,7 +343,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
|
||||
|
||||
/* Setup stack pointer and program counter of guest */
|
||||
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
|
||||
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_gva + stack_size);
|
||||
|
||||
/* Setup sscratch for guest_get_vcpuid() */
|
||||
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
|
||||
@@ -358,7 +357,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
|
||||
{
|
||||
va_list ap;
|
||||
uint64_t id = RISCV_CORE_REG(regs.a0);
|
||||
u64 id = RISCV_CORE_REG(regs.a0);
|
||||
int i;
|
||||
|
||||
TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
|
||||
@@ -393,7 +392,7 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
|
||||
id = RISCV_CORE_REG(regs.a7);
|
||||
break;
|
||||
}
|
||||
vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t));
|
||||
vcpu_set_reg(vcpu, id, va_arg(ap, u64));
|
||||
}
|
||||
|
||||
va_end(ap);
|
||||
@@ -449,10 +448,10 @@ void vcpu_init_vector_tables(struct kvm_vcpu *vcpu)
|
||||
|
||||
void vm_init_vector_tables(struct kvm_vm *vm)
|
||||
{
|
||||
vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
|
||||
vm->page_size, MEM_REGION_DATA);
|
||||
vm->handlers = __vm_alloc(vm, sizeof(struct handlers), vm->page_size,
|
||||
MEM_REGION_DATA);
|
||||
|
||||
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
|
||||
*(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
|
||||
}
|
||||
|
||||
void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
|
||||
@@ -470,7 +469,7 @@ void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handle
|
||||
handlers->exception_handlers[1][0] = handler;
|
||||
}
|
||||
|
||||
uint32_t guest_get_vcpuid(void)
|
||||
u32 guest_get_vcpuid(void)
|
||||
{
|
||||
return csr_read(CSR_SSCRATCH);
|
||||
}
|
||||
@@ -544,10 +543,10 @@ void kvm_selftest_arch_init(void)
|
||||
unsigned long riscv64_get_satp_mode(void)
|
||||
{
|
||||
int kvm_fd, vm_fd, vcpu_fd, err;
|
||||
uint64_t val;
|
||||
u64 val;
|
||||
struct kvm_one_reg reg = {
|
||||
.id = RISCV_CONFIG_REG(satp_mode),
|
||||
.addr = (uint64_t)&val,
|
||||
.addr = (u64)&val,
|
||||
};
|
||||
|
||||
kvm_fd = open_kvm_dev_path_or_exit();
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
uint64_t diag318_info = 0x12345678;
|
||||
u64 diag318_info = 0x12345678;
|
||||
|
||||
asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info));
|
||||
}
|
||||
@@ -23,13 +23,13 @@ static void guest_code(void)
|
||||
* we create an ad-hoc VM here to handle the instruction then extract the
|
||||
* necessary data. It is up to the caller to decide what to do with that data.
|
||||
*/
|
||||
static uint64_t diag318_handler(void)
|
||||
static u64 diag318_handler(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
uint64_t reg;
|
||||
uint64_t diag318_info;
|
||||
u64 reg;
|
||||
u64 diag318_info;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
vcpu_run(vcpu);
|
||||
@@ -51,9 +51,9 @@ static uint64_t diag318_handler(void)
|
||||
return diag318_info;
|
||||
}
|
||||
|
||||
uint64_t get_diag318_info(void)
|
||||
u64 get_diag318_info(void)
|
||||
{
|
||||
static uint64_t diag318_info;
|
||||
static u64 diag318_info;
|
||||
static bool printed_skip;
|
||||
|
||||
/*
|
||||
|
||||
@@ -10,5 +10,5 @@
|
||||
|
||||
#include "facility.h"
|
||||
|
||||
uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
|
||||
u64 stfl_doublewords[NB_STFL_DOUBLEWORDS];
|
||||
bool stfle_flag;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
gpa_t gpa;
|
||||
|
||||
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
|
||||
vm->page_size);
|
||||
@@ -20,12 +20,12 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
if (vm->mmu.pgd_created)
|
||||
return;
|
||||
|
||||
paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
|
||||
gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
|
||||
KVM_GUEST_PAGE_TABLE_MIN_PADDR,
|
||||
vm->memslots[MEM_REGION_PT]);
|
||||
memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
|
||||
memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size);
|
||||
|
||||
vm->mmu.pgd = paddr;
|
||||
vm->mmu.pgd = gpa;
|
||||
vm->mmu.pgd_created = true;
|
||||
}
|
||||
|
||||
@@ -34,9 +34,9 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
* a page table (ri == 4). Returns a suitable region/segment table entry
|
||||
* which points to the freshly allocated pages.
|
||||
*/
|
||||
static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
|
||||
static u64 virt_alloc_region(struct kvm_vm *vm, int ri)
|
||||
{
|
||||
uint64_t taddr;
|
||||
u64 taddr;
|
||||
|
||||
taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
|
||||
KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
|
||||
@@ -47,26 +47,24 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
|
||||
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
|
||||
}
|
||||
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
|
||||
{
|
||||
int ri, idx;
|
||||
uint64_t *entry;
|
||||
u64 *entry;
|
||||
|
||||
TEST_ASSERT((gva % vm->page_size) == 0,
|
||||
"Virtual address not on page boundary,\n"
|
||||
" vaddr: 0x%lx vm->page_size: 0x%x",
|
||||
gva, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
|
||||
(gva >> vm->page_shift)),
|
||||
"Invalid virtual address, vaddr: 0x%lx",
|
||||
gva);
|
||||
"Virtual address not on page boundary,\n"
|
||||
" gva: 0x%lx vm->page_size: 0x%x",
|
||||
gva, vm->page_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
|
||||
"Invalid virtual address, gva: 0x%lx", gva);
|
||||
TEST_ASSERT((gpa % vm->page_size) == 0,
|
||||
"Physical address not on page boundary,\n"
|
||||
" paddr: 0x%lx vm->page_size: 0x%x",
|
||||
" gpa: 0x%lx vm->page_size: 0x%x",
|
||||
gva, vm->page_size);
|
||||
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
|
||||
"Physical address beyond beyond maximum supported,\n"
|
||||
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
gva, vm->max_gfn, vm->page_size);
|
||||
|
||||
/* Walk through region and segment tables */
|
||||
@@ -86,10 +84,10 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
|
||||
entry[idx] = gpa;
|
||||
}
|
||||
|
||||
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
int ri, idx;
|
||||
uint64_t *entry;
|
||||
u64 *entry;
|
||||
|
||||
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
|
||||
vm->page_size);
|
||||
@@ -111,10 +109,10 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
return (entry[idx] & ~0xffful) + (gva & 0xffful);
|
||||
}
|
||||
|
||||
static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
|
||||
uint64_t ptea_start)
|
||||
static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, u8 indent,
|
||||
u64 ptea_start)
|
||||
{
|
||||
uint64_t *pte, ptea;
|
||||
u64 *pte, ptea;
|
||||
|
||||
for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
|
||||
pte = addr_gpa2hva(vm, ptea);
|
||||
@@ -125,10 +123,10 @@ static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
|
||||
}
|
||||
}
|
||||
|
||||
static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
|
||||
uint64_t reg_tab_addr)
|
||||
static void virt_dump_region(FILE *stream, struct kvm_vm *vm, u8 indent,
|
||||
u64 reg_tab_addr)
|
||||
{
|
||||
uint64_t addr, *entry;
|
||||
u64 addr, *entry;
|
||||
|
||||
for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
|
||||
entry = addr_gpa2hva(vm, addr);
|
||||
@@ -147,7 +145,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
|
||||
}
|
||||
}
|
||||
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
|
||||
{
|
||||
if (!vm->mmu.pgd_created)
|
||||
return;
|
||||
@@ -160,10 +158,10 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
vcpu->run->psw_addr = (uintptr_t)guest_code;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
|
||||
uint64_t stack_vaddr;
|
||||
u64 stack_gva;
|
||||
struct kvm_regs regs;
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_vcpu *vcpu;
|
||||
@@ -171,15 +169,14 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
|
||||
vm->page_size);
|
||||
|
||||
stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
|
||||
DEFAULT_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
|
||||
vcpu = __vm_vcpu_add(vm, vcpu_id);
|
||||
|
||||
/* Setup guest registers */
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
|
||||
regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160;
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
|
||||
vcpu_sregs_get(vcpu, &sregs);
|
||||
@@ -206,13 +203,13 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
|
||||
for (i = 0; i < num; i++)
|
||||
regs.gprs[i + 2] = va_arg(ap, uint64_t);
|
||||
regs.gprs[i + 2] = va_arg(ap, u64);
|
||||
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
|
||||
{
|
||||
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
|
||||
indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
|
||||
|
||||
@@ -76,11 +76,11 @@
|
||||
* the use of a binary-search tree, where each node contains at least
|
||||
* the following members:
|
||||
*
|
||||
* typedef uint64_t sparsebit_idx_t;
|
||||
* typedef uint64_t sparsebit_num_t;
|
||||
* typedef u64 sparsebit_idx_t;
|
||||
* typedef u64 sparsebit_num_t;
|
||||
*
|
||||
* sparsebit_idx_t idx;
|
||||
* uint32_t mask;
|
||||
* u32 mask;
|
||||
* sparsebit_num_t num_after;
|
||||
*
|
||||
* The idx member contains the bit index of the first bit described by this
|
||||
@@ -162,7 +162,7 @@
|
||||
|
||||
#define DUMP_LINE_MAX 100 /* Does not include indent amount */
|
||||
|
||||
typedef uint32_t mask_t;
|
||||
typedef u32 mask_t;
|
||||
#define MASK_BITS (sizeof(mask_t) * CHAR_BIT)
|
||||
|
||||
struct node {
|
||||
@@ -2056,9 +2056,9 @@ unsigned char get8(void)
|
||||
return ch;
|
||||
}
|
||||
|
||||
uint64_t get64(void)
|
||||
u64 get64(void)
|
||||
{
|
||||
uint64_t x;
|
||||
u64 x;
|
||||
|
||||
x = get8();
|
||||
x = (x << 8) | get8();
|
||||
@@ -2074,9 +2074,9 @@ int main(void)
|
||||
{
|
||||
s = sparsebit_alloc();
|
||||
for (;;) {
|
||||
uint8_t op = get8() & 0xf;
|
||||
uint64_t first = get64();
|
||||
uint64_t last = get64();
|
||||
u8 op = get8() & 0xf;
|
||||
u64 first = get64();
|
||||
u64 last = get64();
|
||||
|
||||
operate(op, first, last);
|
||||
}
|
||||
|
||||
@@ -30,15 +30,15 @@ void __attribute__((used)) expect_sigbus_handler(int signum)
|
||||
* Park-Miller LCG using standard constants.
|
||||
*/
|
||||
|
||||
struct guest_random_state new_guest_random_state(uint32_t seed)
|
||||
struct guest_random_state new_guest_random_state(u32 seed)
|
||||
{
|
||||
struct guest_random_state s = {.seed = seed};
|
||||
return s;
|
||||
}
|
||||
|
||||
uint32_t guest_random_u32(struct guest_random_state *state)
|
||||
u32 guest_random_u32(struct guest_random_state *state)
|
||||
{
|
||||
state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1);
|
||||
state->seed = (u64)state->seed * 48271 % ((u32)(1 << 31) - 1);
|
||||
return state->seed;
|
||||
}
|
||||
|
||||
@@ -83,12 +83,12 @@ size_t parse_size(const char *size)
|
||||
return base << shift;
|
||||
}
|
||||
|
||||
int64_t timespec_to_ns(struct timespec ts)
|
||||
s64 timespec_to_ns(struct timespec ts)
|
||||
{
|
||||
return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec;
|
||||
return (s64)ts.tv_nsec + 1000000000LL * (s64)ts.tv_sec;
|
||||
}
|
||||
|
||||
struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
|
||||
struct timespec timespec_add_ns(struct timespec ts, s64 ns)
|
||||
{
|
||||
struct timespec res;
|
||||
|
||||
@@ -101,15 +101,15 @@ struct timespec timespec_add_ns(struct timespec ts, int64_t ns)
|
||||
|
||||
struct timespec timespec_add(struct timespec ts1, struct timespec ts2)
|
||||
{
|
||||
int64_t ns1 = timespec_to_ns(ts1);
|
||||
int64_t ns2 = timespec_to_ns(ts2);
|
||||
s64 ns1 = timespec_to_ns(ts1);
|
||||
s64 ns2 = timespec_to_ns(ts2);
|
||||
return timespec_add_ns((struct timespec){0}, ns1 + ns2);
|
||||
}
|
||||
|
||||
struct timespec timespec_sub(struct timespec ts1, struct timespec ts2)
|
||||
{
|
||||
int64_t ns1 = timespec_to_ns(ts1);
|
||||
int64_t ns2 = timespec_to_ns(ts2);
|
||||
s64 ns1 = timespec_to_ns(ts1);
|
||||
s64 ns2 = timespec_to_ns(ts2);
|
||||
return timespec_add_ns((struct timespec){0}, ns1 - ns2);
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ struct timespec timespec_elapsed(struct timespec start)
|
||||
|
||||
struct timespec timespec_div(struct timespec ts, int divisor)
|
||||
{
|
||||
int64_t ns = timespec_to_ns(ts) / divisor;
|
||||
s64 ns = timespec_to_ns(ts) / divisor;
|
||||
|
||||
return timespec_add_ns((struct timespec){0}, ns);
|
||||
}
|
||||
@@ -225,7 +225,7 @@ size_t get_def_hugetlb_pagesz(void)
|
||||
#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
|
||||
#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB)
|
||||
|
||||
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
|
||||
const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(u32 i)
|
||||
{
|
||||
static const struct vm_mem_backing_src_alias aliases[] = {
|
||||
[VM_MEM_SRC_ANONYMOUS] = {
|
||||
@@ -317,9 +317,9 @@ const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i)
|
||||
|
||||
#define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))
|
||||
|
||||
size_t get_backing_src_pagesz(uint32_t i)
|
||||
size_t get_backing_src_pagesz(u32 i)
|
||||
{
|
||||
uint32_t flag = vm_mem_backing_src_alias(i)->flag;
|
||||
u32 flag = vm_mem_backing_src_alias(i)->flag;
|
||||
|
||||
switch (i) {
|
||||
case VM_MEM_SRC_ANONYMOUS:
|
||||
@@ -335,7 +335,7 @@ size_t get_backing_src_pagesz(uint32_t i)
|
||||
}
|
||||
}
|
||||
|
||||
bool is_backing_src_hugetlb(uint32_t i)
|
||||
bool is_backing_src_hugetlb(u32 i)
|
||||
{
|
||||
return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB);
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ struct ucall_header {
|
||||
struct ucall ucalls[KVM_MAX_VCPUS];
|
||||
};
|
||||
|
||||
int ucall_nr_pages_required(uint64_t page_size)
|
||||
int ucall_nr_pages_required(u64 page_size)
|
||||
{
|
||||
return align_up(sizeof(struct ucall_header), page_size) / page_size;
|
||||
}
|
||||
@@ -25,16 +25,16 @@ int ucall_nr_pages_required(uint64_t page_size)
|
||||
*/
|
||||
static struct ucall_header *ucall_pool;
|
||||
|
||||
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
void ucall_init(struct kvm_vm *vm, gpa_t mmio_gpa)
|
||||
{
|
||||
struct ucall_header *hdr;
|
||||
struct ucall *uc;
|
||||
vm_vaddr_t vaddr;
|
||||
gva_t gva;
|
||||
int i;
|
||||
|
||||
vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
|
||||
MEM_REGION_DATA);
|
||||
hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
|
||||
gva = vm_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
|
||||
MEM_REGION_DATA);
|
||||
hdr = (struct ucall_header *)addr_gva2hva(vm, gva);
|
||||
memset(hdr, 0, sizeof(*hdr));
|
||||
|
||||
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
||||
@@ -42,7 +42,7 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
uc->hva = uc;
|
||||
}
|
||||
|
||||
write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr);
|
||||
write_guest_global(vm, ucall_pool, (struct ucall_header *)gva);
|
||||
|
||||
ucall_arch_init(vm, mmio_gpa);
|
||||
}
|
||||
@@ -79,7 +79,7 @@ static void ucall_free(struct ucall *uc)
|
||||
clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
|
||||
}
|
||||
|
||||
void ucall_assert(uint64_t cmd, const char *exp, const char *file,
|
||||
void ucall_assert(u64 cmd, const char *exp, const char *file,
|
||||
unsigned int line, const char *fmt, ...)
|
||||
{
|
||||
struct ucall *uc;
|
||||
@@ -88,20 +88,20 @@ void ucall_assert(uint64_t cmd, const char *exp, const char *file,
|
||||
uc = ucall_alloc();
|
||||
uc->cmd = cmd;
|
||||
|
||||
WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
|
||||
WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
|
||||
WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (u64)(exp));
|
||||
WRITE_ONCE(uc->args[GUEST_FILE], (u64)(file));
|
||||
WRITE_ONCE(uc->args[GUEST_LINE], line);
|
||||
|
||||
va_start(va, fmt);
|
||||
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
|
||||
ucall_arch_do_ucall((gva_t)uc->hva);
|
||||
|
||||
ucall_free(uc);
|
||||
}
|
||||
|
||||
void ucall_fmt(uint64_t cmd, const char *fmt, ...)
|
||||
void ucall_fmt(u64 cmd, const char *fmt, ...)
|
||||
{
|
||||
struct ucall *uc;
|
||||
va_list va;
|
||||
@@ -113,12 +113,12 @@ void ucall_fmt(uint64_t cmd, const char *fmt, ...)
|
||||
guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
|
||||
va_end(va);
|
||||
|
||||
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
|
||||
ucall_arch_do_ucall((gva_t)uc->hva);
|
||||
|
||||
ucall_free(uc);
|
||||
}
|
||||
|
||||
void ucall(uint64_t cmd, int nargs, ...)
|
||||
void ucall(u64 cmd, int nargs, ...)
|
||||
{
|
||||
struct ucall *uc;
|
||||
va_list va;
|
||||
@@ -132,15 +132,15 @@ void ucall(uint64_t cmd, int nargs, ...)
|
||||
|
||||
va_start(va, nargs);
|
||||
for (i = 0; i < nargs; ++i)
|
||||
WRITE_ONCE(uc->args[i], va_arg(va, uint64_t));
|
||||
WRITE_ONCE(uc->args[i], va_arg(va, u64));
|
||||
va_end(va);
|
||||
|
||||
ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
|
||||
ucall_arch_do_ucall((gva_t)uc->hva);
|
||||
|
||||
ucall_free(uc);
|
||||
}
|
||||
|
||||
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
|
||||
u64 get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
|
||||
{
|
||||
struct ucall ucall;
|
||||
void *addr;
|
||||
|
||||
@@ -27,7 +27,7 @@ static void *uffd_handler_thread_fn(void *arg)
|
||||
{
|
||||
struct uffd_reader_args *reader_args = (struct uffd_reader_args *)arg;
|
||||
int uffd = reader_args->uffd;
|
||||
int64_t pages = 0;
|
||||
s64 pages = 0;
|
||||
struct timespec start;
|
||||
struct timespec ts_diff;
|
||||
struct epoll_event evt;
|
||||
@@ -100,8 +100,8 @@ static void *uffd_handler_thread_fn(void *arg)
|
||||
}
|
||||
|
||||
struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
|
||||
void *hva, uint64_t len,
|
||||
uint64_t num_readers,
|
||||
void *hva, u64 len,
|
||||
u64 num_readers,
|
||||
uffd_handler_t handler)
|
||||
{
|
||||
struct uffd_desc *uffd_desc;
|
||||
@@ -109,7 +109,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
|
||||
int uffd;
|
||||
struct uffdio_api uffdio_api;
|
||||
struct uffdio_register uffdio_register;
|
||||
uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
|
||||
u64 expected_ioctls = ((u64)1) << _UFFDIO_COPY;
|
||||
int ret, i;
|
||||
|
||||
PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
|
||||
@@ -132,7 +132,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
|
||||
|
||||
/* In order to get minor faults, prefault via the alias. */
|
||||
if (is_minor)
|
||||
expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE;
|
||||
expected_ioctls = ((u64)1) << _UFFDIO_CONTINUE;
|
||||
|
||||
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
|
||||
TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
|
||||
@@ -141,9 +141,9 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
|
||||
uffdio_api.features = 0;
|
||||
TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
|
||||
"ioctl UFFDIO_API failed: %" PRIu64,
|
||||
(uint64_t)uffdio_api.api);
|
||||
(u64)uffdio_api.api);
|
||||
|
||||
uffdio_register.range.start = (uint64_t)hva;
|
||||
uffdio_register.range.start = (u64)hva;
|
||||
uffdio_register.range.len = len;
|
||||
uffdio_register.mode = uffd_mode;
|
||||
TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
|
||||
|
||||
@@ -14,7 +14,7 @@ void apic_disable(void)
|
||||
|
||||
void xapic_enable(void)
|
||||
{
|
||||
uint64_t val = rdmsr(MSR_IA32_APICBASE);
|
||||
u64 val = rdmsr(MSR_IA32_APICBASE);
|
||||
|
||||
/* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */
|
||||
if (val & MSR_IA32_APICBASE_EXTD) {
|
||||
|
||||
@@ -76,23 +76,23 @@ bool kvm_hv_cpu_has(struct kvm_x86_cpu_feature feature)
|
||||
}
|
||||
|
||||
struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
|
||||
vm_vaddr_t *p_hv_pages_gva)
|
||||
gva_t *p_hv_pages_gva)
|
||||
{
|
||||
vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm);
|
||||
gva_t hv_pages_gva = vm_alloc_page(vm);
|
||||
struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva);
|
||||
|
||||
/* Setup of a region of guest memory for the VP Assist page. */
|
||||
hv->vp_assist = (void *)vm_vaddr_alloc_page(vm);
|
||||
hv->vp_assist = (void *)vm_alloc_page(vm);
|
||||
hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist);
|
||||
hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist);
|
||||
|
||||
/* Setup of a region of guest memory for the partition assist page. */
|
||||
hv->partition_assist = (void *)vm_vaddr_alloc_page(vm);
|
||||
hv->partition_assist = (void *)vm_alloc_page(vm);
|
||||
hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist);
|
||||
hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist);
|
||||
|
||||
/* Setup of a region of guest memory for the enlightened VMCS. */
|
||||
hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
|
||||
hv->enlightened_vmcs = (void *)vm_alloc_page(vm);
|
||||
hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs);
|
||||
hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs);
|
||||
|
||||
@@ -100,9 +100,9 @@ struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm,
|
||||
return hv;
|
||||
}
|
||||
|
||||
int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
|
||||
int enable_vp_assist(u64 vp_assist_pa, void *vp_assist)
|
||||
{
|
||||
uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
|
||||
u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
|
||||
HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
|
||||
|
||||
wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
#include "svm_util.h"
|
||||
#include "vmx.h"
|
||||
|
||||
void memstress_l2_guest_code(uint64_t vcpu_id)
|
||||
void memstress_l2_guest_code(u64 vcpu_id)
|
||||
{
|
||||
memstress_guest_code(vcpu_id);
|
||||
vmcall();
|
||||
@@ -32,7 +32,7 @@ __asm__(
|
||||
|
||||
#define L2_GUEST_STACK_SIZE 64
|
||||
|
||||
static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
|
||||
static void l1_vmx_code(struct vmx_pages *vmx, u64 vcpu_id)
|
||||
{
|
||||
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
|
||||
unsigned long *rsp;
|
||||
@@ -51,7 +51,7 @@ static void l1_vmx_code(struct vmx_pages *vmx, uint64_t vcpu_id)
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
|
||||
static void l1_svm_code(struct svm_test_data *svm, u64 vcpu_id)
|
||||
{
|
||||
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
|
||||
unsigned long *rsp;
|
||||
@@ -67,7 +67,7 @@ static void l1_svm_code(struct svm_test_data *svm, uint64_t vcpu_id)
|
||||
}
|
||||
|
||||
|
||||
static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
|
||||
static void memstress_l1_guest_code(void *data, u64 vcpu_id)
|
||||
{
|
||||
if (this_cpu_has(X86_FEATURE_VMX))
|
||||
l1_vmx_code(data, vcpu_id);
|
||||
@@ -75,7 +75,7 @@ static void memstress_l1_guest_code(void *data, uint64_t vcpu_id)
|
||||
l1_svm_code(data, vcpu_id);
|
||||
}
|
||||
|
||||
uint64_t memstress_nested_pages(int nr_vcpus)
|
||||
u64 memstress_nested_pages(int nr_vcpus)
|
||||
{
|
||||
/*
|
||||
* 513 page tables is enough to identity-map 256 TiB of L2 with 1G
|
||||
@@ -87,7 +87,7 @@ uint64_t memstress_nested_pages(int nr_vcpus)
|
||||
|
||||
static void memstress_setup_ept_mappings(struct kvm_vm *vm)
|
||||
{
|
||||
uint64_t start, end;
|
||||
u64 start, end;
|
||||
|
||||
/*
|
||||
* Identity map the first 4G and the test region with 1G pages so that
|
||||
@@ -104,7 +104,7 @@ static void memstress_setup_ept_mappings(struct kvm_vm *vm)
|
||||
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
|
||||
{
|
||||
struct kvm_regs regs;
|
||||
vm_vaddr_t nested_gva;
|
||||
gva_t nested_gva;
|
||||
int vcpu_id;
|
||||
|
||||
TEST_REQUIRE(kvm_cpu_has_tdp());
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
#include "processor.h"
|
||||
#include "pmu.h"
|
||||
|
||||
const uint64_t intel_pmu_arch_events[] = {
|
||||
const u64 intel_pmu_arch_events[] = {
|
||||
INTEL_ARCH_CPU_CYCLES,
|
||||
INTEL_ARCH_INSTRUCTIONS_RETIRED,
|
||||
INTEL_ARCH_REFERENCE_CYCLES,
|
||||
@@ -28,7 +28,7 @@ const uint64_t intel_pmu_arch_events[] = {
|
||||
};
|
||||
kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS);
|
||||
|
||||
const uint64_t amd_pmu_zen_events[] = {
|
||||
const u64 amd_pmu_zen_events[] = {
|
||||
AMD_ZEN_CORE_CYCLES,
|
||||
AMD_ZEN_INSTRUCTIONS_RETIRED,
|
||||
AMD_ZEN_BRANCHES_RETIRED,
|
||||
@@ -50,7 +50,7 @@ kvm_static_assert(ARRAY_SIZE(amd_pmu_zen_events) == NR_AMD_ZEN_EVENTS);
|
||||
* be overcounted on these certain instructions, but for Clearwater Forest
|
||||
* only "Instruction Retired" event is overcounted on these instructions.
|
||||
*/
|
||||
static uint64_t get_pmu_errata(void)
|
||||
static u64 get_pmu_errata(void)
|
||||
{
|
||||
if (!this_cpu_is_intel())
|
||||
return 0;
|
||||
@@ -72,7 +72,7 @@ static uint64_t get_pmu_errata(void)
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t pmu_errata_mask;
|
||||
u64 pmu_errata_mask;
|
||||
|
||||
void kvm_init_pmu_errata(void)
|
||||
{
|
||||
|
||||
@@ -21,13 +21,13 @@
|
||||
#define KERNEL_DS 0x10
|
||||
#define KERNEL_TSS 0x18
|
||||
|
||||
vm_vaddr_t exception_handlers;
|
||||
gva_t exception_handlers;
|
||||
bool host_cpu_is_amd;
|
||||
bool host_cpu_is_intel;
|
||||
bool host_cpu_is_hygon;
|
||||
bool host_cpu_is_amd_compatible;
|
||||
bool is_forced_emulation_enabled;
|
||||
uint64_t guest_tsc_khz;
|
||||
u64 guest_tsc_khz;
|
||||
|
||||
const char *ex_str(int vector)
|
||||
{
|
||||
@@ -62,7 +62,7 @@ const char *ex_str(int vector)
|
||||
}
|
||||
}
|
||||
|
||||
static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
|
||||
static void regs_dump(FILE *stream, struct kvm_regs *regs, u8 indent)
|
||||
{
|
||||
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
|
||||
"rcx: 0x%.16llx rdx: 0x%.16llx\n",
|
||||
@@ -86,7 +86,7 @@ static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
|
||||
}
|
||||
|
||||
static void segment_dump(FILE *stream, struct kvm_segment *segment,
|
||||
uint8_t indent)
|
||||
u8 indent)
|
||||
{
|
||||
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
|
||||
"selector: 0x%.4x type: 0x%.2x\n",
|
||||
@@ -103,7 +103,7 @@ static void segment_dump(FILE *stream, struct kvm_segment *segment,
|
||||
}
|
||||
|
||||
static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
|
||||
uint8_t indent)
|
||||
u8 indent)
|
||||
{
|
||||
fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
|
||||
"padding: 0x%.4x 0x%.4x 0x%.4x\n",
|
||||
@@ -111,7 +111,7 @@ static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
|
||||
dtable->padding[0], dtable->padding[1], dtable->padding[2]);
|
||||
}
|
||||
|
||||
static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
|
||||
static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, u8 indent)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@@ -207,37 +207,37 @@ void tdp_mmu_init(struct kvm_vm *vm, int pgtable_levels,
|
||||
}
|
||||
|
||||
static void *virt_get_pte(struct kvm_vm *vm, struct kvm_mmu *mmu,
|
||||
uint64_t *parent_pte, uint64_t vaddr, int level)
|
||||
u64 *parent_pte, gva_t gva, int level)
|
||||
{
|
||||
uint64_t pt_gpa = PTE_GET_PA(*parent_pte);
|
||||
uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
|
||||
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
|
||||
u64 pt_gpa = PTE_GET_PA(*parent_pte);
|
||||
u64 *page_table = addr_gpa2hva(vm, pt_gpa);
|
||||
int index = (gva >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
|
||||
|
||||
TEST_ASSERT((*parent_pte == mmu->pgd) || is_present_pte(mmu, parent_pte),
|
||||
"Parent PTE (level %d) not PRESENT for gva: 0x%08lx",
|
||||
level + 1, vaddr);
|
||||
level + 1, gva);
|
||||
|
||||
return &page_table[index];
|
||||
}
|
||||
|
||||
static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
|
||||
struct kvm_mmu *mmu,
|
||||
uint64_t *parent_pte,
|
||||
uint64_t vaddr,
|
||||
uint64_t paddr,
|
||||
int current_level,
|
||||
int target_level)
|
||||
static u64 *virt_create_upper_pte(struct kvm_vm *vm,
|
||||
struct kvm_mmu *mmu,
|
||||
u64 *parent_pte,
|
||||
gva_t gva,
|
||||
gpa_t gpa,
|
||||
int current_level,
|
||||
int target_level)
|
||||
{
|
||||
uint64_t *pte = virt_get_pte(vm, mmu, parent_pte, vaddr, current_level);
|
||||
u64 *pte = virt_get_pte(vm, mmu, parent_pte, gva, current_level);
|
||||
|
||||
paddr = vm_untag_gpa(vm, paddr);
|
||||
gpa = vm_untag_gpa(vm, gpa);
|
||||
|
||||
if (!is_present_pte(mmu, pte)) {
|
||||
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
|
||||
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
|
||||
PTE_ALWAYS_SET_MASK(mmu);
|
||||
if (current_level == target_level)
|
||||
*pte |= PTE_HUGE_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
|
||||
*pte |= PTE_HUGE_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
|
||||
else
|
||||
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
|
||||
} else {
|
||||
@@ -247,39 +247,39 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
|
||||
* this level.
|
||||
*/
|
||||
TEST_ASSERT(current_level != target_level,
|
||||
"Cannot create hugepage at level: %u, vaddr: 0x%lx",
|
||||
current_level, vaddr);
|
||||
"Cannot create hugepage at level: %u, gva: 0x%lx",
|
||||
current_level, gva);
|
||||
TEST_ASSERT(!is_huge_pte(mmu, pte),
|
||||
"Cannot create page table at level: %u, vaddr: 0x%lx",
|
||||
current_level, vaddr);
|
||||
"Cannot create page table at level: %u, gva: 0x%lx",
|
||||
current_level, gva);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
|
||||
uint64_t paddr, int level)
|
||||
void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, gva_t gva,
|
||||
gpa_t gpa, int level)
|
||||
{
|
||||
const uint64_t pg_size = PG_LEVEL_SIZE(level);
|
||||
uint64_t *pte = &mmu->pgd;
|
||||
const u64 pg_size = PG_LEVEL_SIZE(level);
|
||||
u64 *pte = &mmu->pgd;
|
||||
int current_level;
|
||||
|
||||
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
|
||||
"Unknown or unsupported guest mode: 0x%x", vm->mode);
|
||||
|
||||
TEST_ASSERT((vaddr % pg_size) == 0,
|
||||
TEST_ASSERT((gva % pg_size) == 0,
|
||||
"Virtual address not aligned,\n"
|
||||
"vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
|
||||
"Invalid virtual address, vaddr: 0x%lx", vaddr);
|
||||
TEST_ASSERT((paddr % pg_size) == 0,
|
||||
"gva: 0x%lx page size: 0x%lx", gva, pg_size);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
|
||||
"Invalid virtual address, gva: 0x%lx", gva);
|
||||
TEST_ASSERT((gpa % pg_size) == 0,
|
||||
"Physical address not aligned,\n"
|
||||
" paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
|
||||
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
|
||||
" gpa: 0x%lx page size: 0x%lx", gpa, pg_size);
|
||||
TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
|
||||
"Physical address beyond maximum supported,\n"
|
||||
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
paddr, vm->max_gfn, vm->page_size);
|
||||
TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
|
||||
"Unexpected bits in paddr: %lx", paddr);
|
||||
" gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
gpa, vm->max_gfn, vm->page_size);
|
||||
TEST_ASSERT(vm_untag_gpa(vm, gpa) == gpa,
|
||||
"Unexpected bits in gpa: %lx", gpa);
|
||||
|
||||
TEST_ASSERT(!PTE_EXECUTABLE_MASK(mmu) || !PTE_NX_MASK(mmu),
|
||||
"X and NX bit masks cannot be used simultaneously");
|
||||
@@ -291,40 +291,40 @@ void __virt_pg_map(struct kvm_vm *vm, struct kvm_mmu *mmu, uint64_t vaddr,
|
||||
for (current_level = mmu->pgtable_levels;
|
||||
current_level > PG_LEVEL_4K;
|
||||
current_level--) {
|
||||
pte = virt_create_upper_pte(vm, mmu, pte, vaddr, paddr,
|
||||
pte = virt_create_upper_pte(vm, mmu, pte, gva, gpa,
|
||||
current_level, level);
|
||||
if (is_huge_pte(mmu, pte))
|
||||
return;
|
||||
}
|
||||
|
||||
/* Fill in page table entry. */
|
||||
pte = virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
|
||||
pte = virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
|
||||
TEST_ASSERT(!is_present_pte(mmu, pte),
|
||||
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
|
||||
"PTE already present for 4k page at gva: 0x%lx", gva);
|
||||
*pte = PTE_PRESENT_MASK(mmu) | PTE_READABLE_MASK(mmu) |
|
||||
PTE_WRITABLE_MASK(mmu) | PTE_EXECUTABLE_MASK(mmu) |
|
||||
PTE_ALWAYS_SET_MASK(mmu) | (paddr & PHYSICAL_PAGE_MASK);
|
||||
PTE_ALWAYS_SET_MASK(mmu) | (gpa & PHYSICAL_PAGE_MASK);
|
||||
|
||||
/*
|
||||
* Neither SEV nor TDX supports shared page tables, so only the final
|
||||
* leaf PTE needs manually set the C/S-bit.
|
||||
*/
|
||||
if (vm_is_gpa_protected(vm, paddr))
|
||||
if (vm_is_gpa_protected(vm, gpa))
|
||||
*pte |= PTE_C_BIT_MASK(mmu);
|
||||
else
|
||||
*pte |= PTE_S_BIT_MASK(mmu);
|
||||
}
|
||||
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
|
||||
{
|
||||
__virt_pg_map(vm, &vm->mmu, vaddr, paddr, PG_LEVEL_4K);
|
||||
__virt_pg_map(vm, &vm->mmu, gva, gpa, PG_LEVEL_4K);
|
||||
}
|
||||
|
||||
void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
uint64_t nr_bytes, int level)
|
||||
void virt_map_level(struct kvm_vm *vm, gva_t gva, gpa_t gpa,
|
||||
u64 nr_bytes, int level)
|
||||
{
|
||||
uint64_t pg_size = PG_LEVEL_SIZE(level);
|
||||
uint64_t nr_pages = nr_bytes / pg_size;
|
||||
u64 pg_size = PG_LEVEL_SIZE(level);
|
||||
u64 nr_pages = nr_bytes / pg_size;
|
||||
int i;
|
||||
|
||||
TEST_ASSERT(nr_bytes % pg_size == 0,
|
||||
@@ -332,16 +332,16 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
|
||||
nr_bytes, pg_size);
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
__virt_pg_map(vm, &vm->mmu, vaddr, paddr, level);
|
||||
sparsebit_set_num(vm->vpages_mapped, vaddr >> vm->page_shift,
|
||||
__virt_pg_map(vm, &vm->mmu, gva, gpa, level);
|
||||
sparsebit_set_num(vm->vpages_mapped, gva >> vm->page_shift,
|
||||
nr_bytes / PAGE_SIZE);
|
||||
|
||||
vaddr += pg_size;
|
||||
paddr += pg_size;
|
||||
gva += pg_size;
|
||||
gpa += pg_size;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
|
||||
static bool vm_is_target_pte(struct kvm_mmu *mmu, u64 *pte,
|
||||
int *level, int current_level)
|
||||
{
|
||||
if (is_huge_pte(mmu, pte)) {
|
||||
@@ -354,13 +354,13 @@ static bool vm_is_target_pte(struct kvm_mmu *mmu, uint64_t *pte,
|
||||
return *level == current_level;
|
||||
}
|
||||
|
||||
static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
|
||||
struct kvm_mmu *mmu,
|
||||
uint64_t vaddr,
|
||||
int *level)
|
||||
static u64 *__vm_get_page_table_entry(struct kvm_vm *vm,
|
||||
struct kvm_mmu *mmu,
|
||||
gva_t gva,
|
||||
int *level)
|
||||
{
|
||||
int va_width = 12 + (mmu->pgtable_levels) * 9;
|
||||
uint64_t *pte = &mmu->pgd;
|
||||
u64 *pte = &mmu->pgd;
|
||||
int current_level;
|
||||
|
||||
TEST_ASSERT(!vm->arch.is_pt_protected,
|
||||
@@ -371,49 +371,46 @@ static uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm,
|
||||
|
||||
TEST_ASSERT(vm->mode == VM_MODE_PXXVYY_4K,
|
||||
"Unknown or unsupported guest mode: 0x%x", vm->mode);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
|
||||
(vaddr >> vm->page_shift)),
|
||||
"Invalid virtual address, vaddr: 0x%lx",
|
||||
vaddr);
|
||||
TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
|
||||
"Invalid virtual address, gva: 0x%lx", gva);
|
||||
/*
|
||||
* Check that the vaddr is a sign-extended va_width value.
|
||||
* Check that the gva is a sign-extended va_width value.
|
||||
*/
|
||||
TEST_ASSERT(vaddr ==
|
||||
(((int64_t)vaddr << (64 - va_width) >> (64 - va_width))),
|
||||
TEST_ASSERT(gva == (((s64)gva << (64 - va_width) >> (64 - va_width))),
|
||||
"Canonical check failed. The virtual address is invalid.");
|
||||
|
||||
for (current_level = mmu->pgtable_levels;
|
||||
current_level > PG_LEVEL_4K;
|
||||
current_level--) {
|
||||
pte = virt_get_pte(vm, mmu, pte, vaddr, current_level);
|
||||
pte = virt_get_pte(vm, mmu, pte, gva, current_level);
|
||||
if (vm_is_target_pte(mmu, pte, level, current_level))
|
||||
return pte;
|
||||
}
|
||||
|
||||
return virt_get_pte(vm, mmu, pte, vaddr, PG_LEVEL_4K);
|
||||
return virt_get_pte(vm, mmu, pte, gva, PG_LEVEL_4K);
|
||||
}
|
||||
|
||||
uint64_t *tdp_get_pte(struct kvm_vm *vm, uint64_t l2_gpa)
|
||||
u64 *tdp_get_pte(struct kvm_vm *vm, u64 l2_gpa)
|
||||
{
|
||||
int level = PG_LEVEL_4K;
|
||||
|
||||
return __vm_get_page_table_entry(vm, &vm->stage2_mmu, l2_gpa, &level);
|
||||
}
|
||||
|
||||
uint64_t *vm_get_pte(struct kvm_vm *vm, uint64_t vaddr)
|
||||
u64 *vm_get_pte(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
int level = PG_LEVEL_4K;
|
||||
|
||||
return __vm_get_page_table_entry(vm, &vm->mmu, vaddr, &level);
|
||||
return __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
|
||||
}
|
||||
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
|
||||
{
|
||||
struct kvm_mmu *mmu = &vm->mmu;
|
||||
uint64_t *pml4e, *pml4e_start;
|
||||
uint64_t *pdpe, *pdpe_start;
|
||||
uint64_t *pde, *pde_start;
|
||||
uint64_t *pte, *pte_start;
|
||||
u64 *pml4e, *pml4e_start;
|
||||
u64 *pdpe, *pdpe_start;
|
||||
u64 *pde, *pde_start;
|
||||
u64 *pte, *pte_start;
|
||||
|
||||
if (!mmu->pgd_created)
|
||||
return;
|
||||
@@ -423,8 +420,8 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
fprintf(stream, "%*s index hvaddr gpaddr "
|
||||
"addr w exec dirty\n",
|
||||
indent, "");
|
||||
pml4e_start = (uint64_t *) addr_gpa2hva(vm, mmu->pgd);
|
||||
for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
|
||||
pml4e_start = (u64 *)addr_gpa2hva(vm, mmu->pgd);
|
||||
for (u16 n1 = 0; n1 <= 0x1ffu; n1++) {
|
||||
pml4e = &pml4e_start[n1];
|
||||
if (!is_present_pte(mmu, pml4e))
|
||||
continue;
|
||||
@@ -436,7 +433,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
is_writable_pte(mmu, pml4e), is_nx_pte(mmu, pml4e));
|
||||
|
||||
pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
|
||||
for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
|
||||
for (u16 n2 = 0; n2 <= 0x1ffu; n2++) {
|
||||
pdpe = &pdpe_start[n2];
|
||||
if (!is_present_pte(mmu, pdpe))
|
||||
continue;
|
||||
@@ -449,7 +446,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
is_nx_pte(mmu, pdpe));
|
||||
|
||||
pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
|
||||
for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
|
||||
for (u16 n3 = 0; n3 <= 0x1ffu; n3++) {
|
||||
pde = &pde_start[n3];
|
||||
if (!is_present_pte(mmu, pde))
|
||||
continue;
|
||||
@@ -461,7 +458,7 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
is_nx_pte(mmu, pde));
|
||||
|
||||
pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
|
||||
for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
|
||||
for (u16 n4 = 0; n4 <= 0x1ffu; n4++) {
|
||||
pte = &pte_start[n4];
|
||||
if (!is_present_pte(mmu, pte))
|
||||
continue;
|
||||
@@ -475,10 +472,10 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
is_writable_pte(mmu, pte),
|
||||
is_nx_pte(mmu, pte),
|
||||
is_dirty_pte(mmu, pte),
|
||||
((uint64_t) n1 << 27)
|
||||
| ((uint64_t) n2 << 18)
|
||||
| ((uint64_t) n3 << 9)
|
||||
| ((uint64_t) n4));
|
||||
((u64)n1 << 27)
|
||||
| ((u64)n2 << 18)
|
||||
| ((u64)n3 << 9)
|
||||
| ((u64)n4));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -498,26 +495,24 @@ bool kvm_cpu_has_tdp(void)
|
||||
return kvm_cpu_has_ept() || kvm_cpu_has_npt();
|
||||
}
|
||||
|
||||
void __tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
|
||||
uint64_t size, int level)
|
||||
void __tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size, int level)
|
||||
{
|
||||
size_t page_size = PG_LEVEL_SIZE(level);
|
||||
size_t npages = size / page_size;
|
||||
|
||||
TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
|
||||
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
|
||||
TEST_ASSERT(l2_gpa + size > l2_gpa, "L2 GPA overflow");
|
||||
TEST_ASSERT(gpa + size > gpa, "GPA overflow");
|
||||
|
||||
while (npages--) {
|
||||
__virt_pg_map(vm, &vm->stage2_mmu, nested_paddr, paddr, level);
|
||||
nested_paddr += page_size;
|
||||
paddr += page_size;
|
||||
__virt_pg_map(vm, &vm->stage2_mmu, l2_gpa, gpa, level);
|
||||
l2_gpa += page_size;
|
||||
gpa += page_size;
|
||||
}
|
||||
}
|
||||
|
||||
void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
|
||||
uint64_t size)
|
||||
void tdp_map(struct kvm_vm *vm, gpa_t l2_gpa, gpa_t gpa, u64 size)
|
||||
{
|
||||
__tdp_map(vm, nested_paddr, paddr, size, PG_LEVEL_4K);
|
||||
__tdp_map(vm, l2_gpa, gpa, size, PG_LEVEL_4K);
|
||||
}
|
||||
|
||||
/* Prepare an identity extended page table that maps all the
|
||||
@@ -525,7 +520,7 @@ void tdp_map(struct kvm_vm *vm, uint64_t nested_paddr, uint64_t paddr,
|
||||
*/
|
||||
void tdp_identity_map_default_memslots(struct kvm_vm *vm)
|
||||
{
|
||||
uint32_t s, memslot = 0;
|
||||
u32 s, memslot = 0;
|
||||
sparsebit_idx_t i, last;
|
||||
struct userspace_mem_region *region = memslot2region(vm, memslot);
|
||||
|
||||
@@ -540,13 +535,13 @@ void tdp_identity_map_default_memslots(struct kvm_vm *vm)
|
||||
if (i > last)
|
||||
break;
|
||||
|
||||
tdp_map(vm, (uint64_t)i << vm->page_shift,
|
||||
(uint64_t)i << vm->page_shift, 1 << vm->page_shift);
|
||||
tdp_map(vm, (u64)i << vm->page_shift,
|
||||
(u64)i << vm->page_shift, 1 << vm->page_shift);
|
||||
}
|
||||
}
|
||||
|
||||
/* Identity map a region with 1GiB Pages. */
|
||||
void tdp_identity_map_1g(struct kvm_vm *vm, uint64_t addr, uint64_t size)
|
||||
void tdp_identity_map_1g(struct kvm_vm *vm, u64 addr, u64 size)
|
||||
{
|
||||
__tdp_map(vm, addr, addr, size, PG_LEVEL_1G);
|
||||
}
|
||||
@@ -618,10 +613,10 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_segment *segp)
|
||||
segp->present = true;
|
||||
}
|
||||
|
||||
vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
|
||||
{
|
||||
int level = PG_LEVEL_NONE;
|
||||
uint64_t *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
|
||||
u64 *pte = __vm_get_page_table_entry(vm, &vm->mmu, gva, &level);
|
||||
|
||||
TEST_ASSERT(is_present_pte(&vm->mmu, pte),
|
||||
"Leaf PTE not PRESENT for gva: 0x%08lx", gva);
|
||||
@@ -633,7 +628,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
|
||||
}
|
||||
|
||||
static void kvm_seg_set_tss_64bit(vm_vaddr_t base, struct kvm_segment *segp)
|
||||
static void kvm_seg_set_tss_64bit(gva_t base, struct kvm_segment *segp)
|
||||
{
|
||||
memset(segp, 0, sizeof(*segp));
|
||||
segp->base = base;
|
||||
@@ -746,16 +741,16 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm)
|
||||
struct kvm_segment seg;
|
||||
int i;
|
||||
|
||||
vm->arch.gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
|
||||
vm->arch.idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
|
||||
vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
|
||||
vm->arch.tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
|
||||
vm->arch.gdt = __vm_alloc_page(vm, MEM_REGION_DATA);
|
||||
vm->arch.idt = __vm_alloc_page(vm, MEM_REGION_DATA);
|
||||
vm->handlers = __vm_alloc_page(vm, MEM_REGION_DATA);
|
||||
vm->arch.tss = __vm_alloc_page(vm, MEM_REGION_DATA);
|
||||
|
||||
/* Handlers have the same address in both address spaces.*/
|
||||
for (i = 0; i < NUM_INTERRUPTS; i++)
|
||||
set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, KERNEL_CS);
|
||||
|
||||
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
|
||||
*(gva_t *)addr_gva2hva(vm, (gva_t)(&exception_handlers)) = vm->handlers;
|
||||
|
||||
kvm_seg_set_kernel_code_64bit(&seg);
|
||||
kvm_seg_fill_gdt_64bit(vm, &seg);
|
||||
@@ -770,9 +765,9 @@ static void vm_init_descriptor_tables(struct kvm_vm *vm)
|
||||
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
|
||||
void (*handler)(struct ex_regs *))
|
||||
{
|
||||
vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
|
||||
gva_t *handlers = (gva_t *)addr_gva2hva(vm, vm->handlers);
|
||||
|
||||
handlers[vector] = (vm_vaddr_t)handler;
|
||||
handlers[vector] = (gva_t)handler;
|
||||
}
|
||||
|
||||
void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
|
||||
@@ -821,18 +816,17 @@ void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
struct kvm_mp_state mp_state;
|
||||
struct kvm_regs regs;
|
||||
vm_vaddr_t stack_vaddr;
|
||||
gva_t stack_gva;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
|
||||
DEFAULT_GUEST_STACK_VADDR_MIN,
|
||||
MEM_REGION_DATA);
|
||||
stack_gva = __vm_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
|
||||
DEFAULT_GUEST_STACK_VADDR_MIN, MEM_REGION_DATA);
|
||||
|
||||
stack_vaddr += DEFAULT_STACK_PGS * getpagesize();
|
||||
stack_gva += DEFAULT_STACK_PGS * getpagesize();
|
||||
|
||||
/*
|
||||
* Align stack to match calling sequence requirements in section "The
|
||||
@@ -843,9 +837,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
* If this code is ever used to launch a vCPU with 32-bit entry point it
|
||||
* may need to subtract 4 bytes instead of 8 bytes.
|
||||
*/
|
||||
TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE),
|
||||
"__vm_vaddr_alloc() did not provide a page-aligned address");
|
||||
stack_vaddr -= 8;
|
||||
TEST_ASSERT(IS_ALIGNED(stack_gva, PAGE_SIZE),
|
||||
"__vm_alloc() did not provide a page-aligned address");
|
||||
stack_gva -= 8;
|
||||
|
||||
vcpu = __vm_vcpu_add(vm, vcpu_id);
|
||||
vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
|
||||
@@ -855,7 +849,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
/* Setup guest general purpose registers */
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
regs.rflags = regs.rflags | 0x2;
|
||||
regs.rsp = stack_vaddr;
|
||||
regs.rsp = stack_gva;
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
|
||||
/* Setup the MP state */
|
||||
@@ -872,7 +866,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, u32 vcpu_id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
|
||||
|
||||
@@ -907,9 +901,9 @@ const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
|
||||
return kvm_supported_cpuid;
|
||||
}
|
||||
|
||||
static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
|
||||
uint32_t function, uint32_t index,
|
||||
uint8_t reg, uint8_t lo, uint8_t hi)
|
||||
static u32 __kvm_cpu_has(const struct kvm_cpuid2 *cpuid,
|
||||
u32 function, u32 index,
|
||||
u8 reg, u8 lo, u8 hi)
|
||||
{
|
||||
const struct kvm_cpuid_entry2 *entry;
|
||||
int i;
|
||||
@@ -936,14 +930,14 @@ bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
|
||||
feature.reg, feature.bit, feature.bit);
|
||||
}
|
||||
|
||||
uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
|
||||
struct kvm_x86_cpu_property property)
|
||||
u32 kvm_cpuid_property(const struct kvm_cpuid2 *cpuid,
|
||||
struct kvm_x86_cpu_property property)
|
||||
{
|
||||
return __kvm_cpu_has(cpuid, property.function, property.index,
|
||||
property.reg, property.lo_bit, property.hi_bit);
|
||||
}
|
||||
|
||||
uint64_t kvm_get_feature_msr(uint64_t msr_index)
|
||||
u64 kvm_get_feature_msr(u64 msr_index)
|
||||
{
|
||||
struct {
|
||||
struct kvm_msrs header;
|
||||
@@ -962,7 +956,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
|
||||
return buffer.entry.data;
|
||||
}
|
||||
|
||||
void __vm_xsave_require_permission(uint64_t xfeature, const char *name)
|
||||
void __vm_xsave_require_permission(u64 xfeature, const char *name)
|
||||
{
|
||||
int kvm_fd;
|
||||
u64 bitmask;
|
||||
@@ -1019,7 +1013,7 @@ void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
|
||||
|
||||
void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
|
||||
struct kvm_x86_cpu_property property,
|
||||
uint32_t value)
|
||||
u32 value)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
|
||||
@@ -1034,7 +1028,7 @@ void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
|
||||
TEST_ASSERT_EQ(kvm_cpuid_property(vcpu->cpuid, property), value);
|
||||
}
|
||||
|
||||
void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
|
||||
void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
|
||||
|
||||
@@ -1063,7 +1057,7 @@ void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
|
||||
vcpu_set_cpuid(vcpu);
|
||||
}
|
||||
|
||||
uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
|
||||
u64 vcpu_get_msr(struct kvm_vcpu *vcpu, u64 msr_index)
|
||||
{
|
||||
struct {
|
||||
struct kvm_msrs header;
|
||||
@@ -1078,7 +1072,7 @@ uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
|
||||
return buffer.entry.data;
|
||||
}
|
||||
|
||||
int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
|
||||
int _vcpu_set_msr(struct kvm_vcpu *vcpu, u64 msr_index, u64 msr_value)
|
||||
{
|
||||
struct {
|
||||
struct kvm_msrs header;
|
||||
@@ -1106,28 +1100,28 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
|
||||
if (num >= 1)
|
||||
regs.rdi = va_arg(ap, uint64_t);
|
||||
regs.rdi = va_arg(ap, u64);
|
||||
|
||||
if (num >= 2)
|
||||
regs.rsi = va_arg(ap, uint64_t);
|
||||
regs.rsi = va_arg(ap, u64);
|
||||
|
||||
if (num >= 3)
|
||||
regs.rdx = va_arg(ap, uint64_t);
|
||||
regs.rdx = va_arg(ap, u64);
|
||||
|
||||
if (num >= 4)
|
||||
regs.rcx = va_arg(ap, uint64_t);
|
||||
regs.rcx = va_arg(ap, u64);
|
||||
|
||||
if (num >= 5)
|
||||
regs.r8 = va_arg(ap, uint64_t);
|
||||
regs.r8 = va_arg(ap, u64);
|
||||
|
||||
if (num >= 6)
|
||||
regs.r9 = va_arg(ap, uint64_t);
|
||||
regs.r9 = va_arg(ap, u64);
|
||||
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
|
||||
{
|
||||
struct kvm_regs regs;
|
||||
struct kvm_sregs sregs;
|
||||
@@ -1196,7 +1190,7 @@ const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
|
||||
return list;
|
||||
}
|
||||
|
||||
bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
|
||||
bool kvm_msr_is_in_save_restore_list(u32 msr_index)
|
||||
{
|
||||
const struct kvm_msr_list *list = kvm_get_msr_index_list();
|
||||
int i;
|
||||
@@ -1327,7 +1321,7 @@ void kvm_init_vm_address_properties(struct kvm_vm *vm)
|
||||
}
|
||||
|
||||
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
|
||||
uint32_t function, uint32_t index)
|
||||
u32 function, u32 index)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -1344,7 +1338,7 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
|
||||
|
||||
#define X86_HYPERCALL(inputs...) \
|
||||
({ \
|
||||
uint64_t r; \
|
||||
u64 r; \
|
||||
\
|
||||
asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \
|
||||
"jnz 1f\n\t" \
|
||||
@@ -1359,18 +1353,17 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
|
||||
r; \
|
||||
})
|
||||
|
||||
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
|
||||
uint64_t a3)
|
||||
u64 kvm_hypercall(u64 nr, u64 a0, u64 a1, u64 a2, u64 a3)
|
||||
{
|
||||
return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
|
||||
}
|
||||
|
||||
uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
|
||||
u64 __xen_hypercall(u64 nr, u64 a0, void *a1)
|
||||
{
|
||||
return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
|
||||
}
|
||||
|
||||
void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
|
||||
void xen_hypercall(u64 nr, u64 a0, void *a1)
|
||||
{
|
||||
GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
|
||||
}
|
||||
@@ -1379,7 +1372,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
|
||||
{
|
||||
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
|
||||
unsigned long ht_gfn, max_gfn, max_pfn;
|
||||
uint8_t maxphyaddr, guest_maxphyaddr;
|
||||
u8 maxphyaddr, guest_maxphyaddr;
|
||||
|
||||
/*
|
||||
* Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR
|
||||
@@ -1453,8 +1446,7 @@ bool kvm_arch_has_default_irqchip(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
|
||||
uint64_t smram_gpa,
|
||||
void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, u64 smram_gpa,
|
||||
const void *smi_handler, size_t handler_size)
|
||||
{
|
||||
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
* expression would cause us to quit the loop.
|
||||
*/
|
||||
static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region,
|
||||
uint8_t page_type, bool private)
|
||||
u8 page_type, bool private)
|
||||
{
|
||||
const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
|
||||
const vm_paddr_t gpa_base = region->region.guest_phys_addr;
|
||||
const gpa_t gpa_base = region->region.guest_phys_addr;
|
||||
const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
|
||||
sparsebit_idx_t i, j;
|
||||
|
||||
@@ -29,15 +29,15 @@ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *regio
|
||||
sev_register_encrypted_memory(vm, region);
|
||||
|
||||
sparsebit_for_each_set_range(protected_phy_pages, i, j) {
|
||||
const uint64_t size = (j - i + 1) * vm->page_size;
|
||||
const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
|
||||
const u64 size = (j - i + 1) * vm->page_size;
|
||||
const u64 offset = (i - lowest_page_in_region) * vm->page_size;
|
||||
|
||||
if (private)
|
||||
vm_mem_set_private(vm, gpa_base + offset, size);
|
||||
|
||||
if (is_sev_snp_vm(vm))
|
||||
snp_launch_update_data(vm, gpa_base + offset,
|
||||
(uint64_t)addr_gpa2hva(vm, gpa_base + offset),
|
||||
(u64)addr_gpa2hva(vm, gpa_base + offset),
|
||||
size, page_type);
|
||||
else
|
||||
sev_launch_update_data(vm, gpa_base + offset, size);
|
||||
@@ -79,7 +79,7 @@ void snp_vm_init(struct kvm_vm *vm)
|
||||
vm_sev_ioctl(vm, KVM_SEV_INIT2, &init);
|
||||
}
|
||||
|
||||
void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
|
||||
void sev_vm_launch(struct kvm_vm *vm, u32 policy)
|
||||
{
|
||||
struct kvm_sev_launch_start launch_start = {
|
||||
.policy = policy,
|
||||
@@ -103,7 +103,7 @@ void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
|
||||
vm->arch.is_pt_protected = true;
|
||||
}
|
||||
|
||||
void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
|
||||
void sev_vm_launch_measure(struct kvm_vm *vm, u8 *measurement)
|
||||
{
|
||||
struct kvm_sev_launch_measure launch_measure;
|
||||
struct kvm_sev_guest_status guest_status;
|
||||
@@ -131,7 +131,7 @@ void sev_vm_launch_finish(struct kvm_vm *vm)
|
||||
TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
|
||||
}
|
||||
|
||||
void snp_vm_launch_start(struct kvm_vm *vm, uint64_t policy)
|
||||
void snp_vm_launch_start(struct kvm_vm *vm, u64 policy)
|
||||
{
|
||||
struct kvm_sev_snp_launch_start launch_start = {
|
||||
.policy = policy,
|
||||
@@ -158,7 +158,7 @@ void snp_vm_launch_finish(struct kvm_vm *vm)
|
||||
vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish);
|
||||
}
|
||||
|
||||
struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
|
||||
struct kvm_vm *vm_sev_create_with_one_vcpu(u32 type, void *guest_code,
|
||||
struct kvm_vcpu **cpu)
|
||||
{
|
||||
struct vm_shape shape = {
|
||||
@@ -174,7 +174,7 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
|
||||
return vm;
|
||||
}
|
||||
|
||||
void vm_sev_launch(struct kvm_vm *vm, uint64_t policy, uint8_t *measurement)
|
||||
void vm_sev_launch(struct kvm_vm *vm, u64 policy, u8 *measurement)
|
||||
{
|
||||
if (is_sev_snp_vm(vm)) {
|
||||
vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, BIT(KVM_HC_MAP_GPA_RANGE));
|
||||
|
||||
@@ -28,20 +28,20 @@ u64 rflags;
|
||||
* Pointer to structure with the addresses of the SVM areas.
|
||||
*/
|
||||
struct svm_test_data *
|
||||
vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
|
||||
vcpu_alloc_svm(struct kvm_vm *vm, gva_t *p_svm_gva)
|
||||
{
|
||||
vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
|
||||
gva_t svm_gva = vm_alloc_page(vm);
|
||||
struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
|
||||
|
||||
svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
|
||||
svm->vmcb = (void *)vm_alloc_page(vm);
|
||||
svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
|
||||
svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
|
||||
|
||||
svm->save_area = (void *)vm_vaddr_alloc_page(vm);
|
||||
svm->save_area = (void *)vm_alloc_page(vm);
|
||||
svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
|
||||
svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
|
||||
|
||||
svm->msr = (void *)vm_vaddr_alloc_page(vm);
|
||||
svm->msr = (void *)vm_alloc_page(vm);
|
||||
svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr);
|
||||
svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr);
|
||||
memset(svm->msr_hva, 0, getpagesize());
|
||||
@@ -84,14 +84,14 @@ void vm_enable_npt(struct kvm_vm *vm)
|
||||
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
|
||||
{
|
||||
struct vmcb *vmcb = svm->vmcb;
|
||||
uint64_t vmcb_gpa = svm->vmcb_gpa;
|
||||
u64 vmcb_gpa = svm->vmcb_gpa;
|
||||
struct vmcb_save_area *save = &vmcb->save;
|
||||
struct vmcb_control_area *ctrl = &vmcb->control;
|
||||
u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
|
||||
| SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
|
||||
u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
|
||||
| SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
|
||||
uint64_t efer;
|
||||
u64 efer;
|
||||
|
||||
efer = rdmsr(MSR_EFER);
|
||||
wrmsr(MSR_EFER, efer | EFER_SVME);
|
||||
@@ -158,7 +158,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
|
||||
* for now. registers involved in LOAD/SAVE_GPR_C are eventually
|
||||
* unmodified so they do not need to be in the clobber list.
|
||||
*/
|
||||
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
|
||||
void run_guest(struct vmcb *vmcb, u64 vmcb_gpa)
|
||||
{
|
||||
asm volatile (
|
||||
"vmload %[vmcb_gpa]\n\t"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user