mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-10 09:34:16 -05:00
KVM: selftests: Remove useless shifts when creating guest page tables
Remove the pointless shift from GPA=>GFN and immediately back to GFN=>GPA when creating guest page tables. Ignore the other walkers that have a similar pattern for the moment, they will be converted to use virt_get_pte() in the near future. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Link: https://lore.kernel.org/r/20221006004512.666529-4-seanjc@google.com
This commit is contained in:
@@ -177,7 +177,8 @@ struct kvm_x86_cpu_feature {
|
||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||
|
||||
#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
|
||||
#define PTE_GET_PFN(pte) (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
|
||||
#define PTE_GET_PA(pte) ((pte) & PHYSICAL_PAGE_MASK)
|
||||
#define PTE_GET_PFN(pte) (PTE_GET_PA(pte) >> PAGE_SHIFT)
|
||||
|
||||
/* General Registers in 64-Bit Mode */
|
||||
struct gpr64_regs {
|
||||
|
||||
@@ -131,23 +131,23 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
|
||||
}
|
||||
}
|
||||
|
||||
static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
|
||||
static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_gpa, uint64_t vaddr,
|
||||
int level)
|
||||
{
|
||||
uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
|
||||
uint64_t *page_table = addr_gpa2hva(vm, pt_gpa);
|
||||
int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
|
||||
|
||||
return &page_table[index];
|
||||
}
|
||||
|
||||
static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
|
||||
uint64_t pt_pfn,
|
||||
uint64_t pt_gpa,
|
||||
uint64_t vaddr,
|
||||
uint64_t paddr,
|
||||
int current_level,
|
||||
int target_level)
|
||||
{
|
||||
uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level);
|
||||
uint64_t *pte = virt_get_pte(vm, pt_gpa, vaddr, current_level);
|
||||
|
||||
if (!(*pte & PTE_PRESENT_MASK)) {
|
||||
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
|
||||
@@ -197,21 +197,20 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
|
||||
* Allocate upper level page tables, if not already present. Return
|
||||
* early if a hugepage was created.
|
||||
*/
|
||||
pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
|
||||
vaddr, paddr, PG_LEVEL_512G, level);
|
||||
pml4e = virt_create_upper_pte(vm, vm->pgd, vaddr, paddr, PG_LEVEL_512G, level);
|
||||
if (*pml4e & PTE_LARGE_MASK)
|
||||
return;
|
||||
|
||||
pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
|
||||
pdpe = virt_create_upper_pte(vm, PTE_GET_PA(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
|
||||
if (*pdpe & PTE_LARGE_MASK)
|
||||
return;
|
||||
|
||||
pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
|
||||
pde = virt_create_upper_pte(vm, PTE_GET_PA(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
|
||||
if (*pde & PTE_LARGE_MASK)
|
||||
return;
|
||||
|
||||
/* Fill in page table entry. */
|
||||
pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K);
|
||||
pte = virt_get_pte(vm, PTE_GET_PA(*pde), vaddr, PG_LEVEL_4K);
|
||||
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
|
||||
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
|
||||
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
|
||||
|
||||
Reference in New Issue
Block a user