mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 02:01:18 -04:00
x86/xen: Tolerate nested XEN_LAZY_MMU entering/leaving
With the support of nested lazy mmu sections it can happen that
arch_enter_lazy_mmu_mode() is being called twice without a call of
arch_leave_lazy_mmu_mode() in between, as the lazy_mmu_*() helpers
are not disabling preemption when checking for nested lazy mmu
sections.
This is a problem when running as a Xen PV guest, as
xen_enter_lazy_mmu() and xen_leave_lazy_mmu() don't tolerate this
case.
Fix that in xen_enter_lazy_mmu() and xen_leave_lazy_mmu() in order
not to hurt all other lazy mmu mode users.
Fixes: 291b3abed6 ("x86/xen: use lazy_mmu_state when context-switching")
Tested-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Message-ID: <20260508143933.493013-1-jgross@suse.com>
This commit is contained in:
@@ -2145,7 +2145,10 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
||||
|
||||
static void xen_enter_lazy_mmu(void)
|
||||
{
|
||||
enter_lazy(XEN_LAZY_MMU);
|
||||
preempt_disable();
|
||||
if (xen_get_lazy_mode() != XEN_LAZY_MMU)
|
||||
enter_lazy(XEN_LAZY_MMU);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void xen_flush_lazy_mmu(void)
|
||||
@@ -2182,7 +2185,8 @@ static void xen_leave_lazy_mmu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
xen_mc_flush();
|
||||
leave_lazy(XEN_LAZY_MMU);
|
||||
if (xen_get_lazy_mode() != XEN_LAZY_NONE)
|
||||
leave_lazy(XEN_LAZY_MMU);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user