mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-03 15:51:40 -04:00
KVM: arm64: Defer EL2 stage-1 mapping on share
We currently blindly map into EL2 stage-1 *any* page passed to the __pkvm_host_share_hyp() HVC. This is less than ideal from a security perspective as it makes exploitation of potential hypervisor gadgets easier than it should be. But interestingly, pKVM should never need to access SHARED_BORROWED pages that it hasn't previously pinned, so there is no need to map the page before that. Reviewed-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20250416152648.2982950-7-qperret@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
committed by
Marc Zyngier
parent
3390b3cbb6
commit
48d8488823
@@ -693,7 +693,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
|
||||
int __pkvm_host_share_hyp(u64 pfn)
|
||||
{
|
||||
u64 phys = hyp_pfn_to_phys(pfn);
|
||||
void *virt = __hyp_va(phys);
|
||||
u64 size = PAGE_SIZE;
|
||||
int ret;
|
||||
|
||||
@@ -710,7 +709,6 @@ int __pkvm_host_share_hyp(u64 pfn)
|
||||
}
|
||||
|
||||
__hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED);
|
||||
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP));
|
||||
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED));
|
||||
|
||||
unlock:
|
||||
@@ -742,7 +740,6 @@ int __pkvm_host_unshare_hyp(u64 pfn)
|
||||
}
|
||||
|
||||
__hyp_set_page_state_range(phys, size, PKVM_NOPAGE);
|
||||
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
|
||||
WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED));
|
||||
|
||||
unlock:
|
||||
@@ -818,6 +815,7 @@ int hyp_pin_shared_mem(void *from, void *to)
|
||||
u64 end = PAGE_ALIGN((u64)to);
|
||||
u64 phys = __hyp_pa(start);
|
||||
u64 size = end - start;
|
||||
struct hyp_page *p;
|
||||
int ret;
|
||||
|
||||
host_lock_component();
|
||||
@@ -831,8 +829,14 @@ int hyp_pin_shared_mem(void *from, void *to)
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
for (cur = start; cur < end; cur += PAGE_SIZE)
|
||||
hyp_page_ref_inc(hyp_virt_to_page(cur));
|
||||
for (cur = start; cur < end; cur += PAGE_SIZE) {
|
||||
p = hyp_virt_to_page(cur);
|
||||
hyp_page_ref_inc(p);
|
||||
if (p->refcount == 1)
|
||||
WARN_ON(pkvm_create_mappings_locked((void *)cur,
|
||||
(void *)cur + PAGE_SIZE,
|
||||
PAGE_HYP));
|
||||
}
|
||||
|
||||
unlock:
|
||||
hyp_unlock_component();
|
||||
@@ -845,12 +849,17 @@ void hyp_unpin_shared_mem(void *from, void *to)
|
||||
{
|
||||
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
|
||||
u64 end = PAGE_ALIGN((u64)to);
|
||||
struct hyp_page *p;
|
||||
|
||||
host_lock_component();
|
||||
hyp_lock_component();
|
||||
|
||||
for (cur = start; cur < end; cur += PAGE_SIZE)
|
||||
hyp_page_ref_dec(hyp_virt_to_page(cur));
|
||||
for (cur = start; cur < end; cur += PAGE_SIZE) {
|
||||
p = hyp_virt_to_page(cur);
|
||||
if (p->refcount == 1)
|
||||
WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE);
|
||||
hyp_page_ref_dec(p);
|
||||
}
|
||||
|
||||
hyp_unlock_component();
|
||||
host_unlock_component();
|
||||
|
||||
Reference in New Issue
Block a user