mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 14:51:51 -04:00
Update the vma_modify_flags() and vma_modify_flags_uffd() functions to accept a vma_flags_t parameter rather than a vm_flags_t one, and propagate the changes as needed to implement this change. Also add vma_flags_reset_once() in replacement of vm_flags_reset_once(). We still need to be careful here because we need to avoid tearing, so maintain the assumption that the first system word set of flags are the only ones that require protection from tearing, and retain this functionality. We can copy the remainder of VMA flags above 64 bits normally. But hopefully by the time that happens, we will have replaced the logic that requires these WRITE_ONCE()'s with something else. We also replace instances of vm_flags_reset() with a simple write of VMA flags. We are no longer perform a number of checks, most notable of all the VMA flags asserts becase: 1. We might be operating on a VMA that is not yet added to the tree. 2. We might be operating on a VMA that is now detached. 3. Really in all but core code, you should be using vma_desc_xxx(). 4. Other VMA fields are manipulated with no such checks. 5. It'd be egregious to have to add variants of flag functions just to account for cases such as the above, especially when we don't do so for other VMA fields. Drivers are the problematic cases and why it was especially important (and also for debug as VMA locks were introduced), the mmap_prepare work is solving this generally. Additionally, we can fairly safely assume by this point the soft dirty flags are being set correctly, so it's reasonable to drop this also. Finally, update the VMA tests to reflect this. Link: https://lkml.kernel.org/r/51afbb2b8c3681003cc7926647e37335d793836e.1774034900.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Kees Cook <kees@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Ondrej Mosnacek <omosnace@redhat.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Moore <paul@paul-moore.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Stephen Smalley <stephen.smalley.work@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1487 lines
40 KiB
C
1487 lines
40 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/* Helper function which provides a wrapper around a merge new VMA operation. */
|
|
static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
/*
|
|
* For convenience, get prev and next VMAs. Which the new VMA operation
|
|
* requires.
|
|
*/
|
|
vmg->next = vma_next(vmg->vmi);
|
|
vmg->prev = vma_prev(vmg->vmi);
|
|
vma_iter_next_range(vmg->vmi);
|
|
|
|
vma = vma_merge_new_range(vmg);
|
|
if (vma)
|
|
vma_assert_attached(vma);
|
|
|
|
return vma;
|
|
}
|
|
|
|
/*
|
|
* Helper function which provides a wrapper around the expansion of an existing
|
|
* VMA.
|
|
*/
|
|
static int expand_existing(struct vma_merge_struct *vmg)
|
|
{
|
|
return vma_expand(vmg);
|
|
}
|
|
|
|
/*
|
|
* Helper function to reset merge state the associated VMA iterator to a
|
|
* specified new range.
|
|
*/
|
|
void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
|
|
unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags)
|
|
{
|
|
vma_iter_set(vmg->vmi, start);
|
|
|
|
vmg->prev = NULL;
|
|
vmg->middle = NULL;
|
|
vmg->next = NULL;
|
|
vmg->target = NULL;
|
|
|
|
vmg->start = start;
|
|
vmg->end = end;
|
|
vmg->pgoff = pgoff;
|
|
vmg->vma_flags = vma_flags;
|
|
|
|
vmg->just_expand = false;
|
|
vmg->__remove_middle = false;
|
|
vmg->__remove_next = false;
|
|
vmg->__adjust_middle_start = false;
|
|
vmg->__adjust_next_start = false;
|
|
}
|
|
|
|
/* Helper function to set both the VMG range and its anon_vma. */
|
|
static void vmg_set_range_anon_vma(struct vma_merge_struct *vmg, unsigned long start,
|
|
unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
|
|
struct anon_vma *anon_vma)
|
|
{
|
|
vmg_set_range(vmg, start, end, pgoff, vma_flags);
|
|
vmg->anon_vma = anon_vma;
|
|
}
|
|
|
|
/*
|
|
* Helper function to try to merge a new VMA.
|
|
*
|
|
* Update vmg and the iterator for it and try to merge, otherwise allocate a new
|
|
* VMA, link it to the maple tree and return it.
|
|
*/
|
|
static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm,
|
|
struct vma_merge_struct *vmg, unsigned long start,
|
|
unsigned long end, pgoff_t pgoff, vma_flags_t vma_flags,
|
|
bool *was_merged)
|
|
{
|
|
struct vm_area_struct *merged;
|
|
|
|
vmg_set_range(vmg, start, end, pgoff, vma_flags);
|
|
|
|
merged = merge_new(vmg);
|
|
if (merged) {
|
|
*was_merged = true;
|
|
ASSERT_EQ(vmg->state, VMA_MERGE_SUCCESS);
|
|
return merged;
|
|
}
|
|
|
|
*was_merged = false;
|
|
|
|
ASSERT_EQ(vmg->state, VMA_MERGE_NOMERGE);
|
|
|
|
return alloc_and_link_vma(mm, start, end, pgoff, vma_flags);
|
|
}
|
|
|
|
static bool test_simple_merge(void)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
|
|
VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
|
|
struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vma_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0x1000);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
.start = 0x1000,
|
|
.end = 0x2000,
|
|
.vma_flags = vma_flags,
|
|
.pgoff = 1,
|
|
};
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, vma_left));
|
|
ASSERT_FALSE(attach_vma(&mm, vma_right));
|
|
|
|
vma = merge_new(&vmg);
|
|
ASSERT_NE(vma, NULL);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_FLAGS_SAME_MASK(&vma->flags, vma_flags);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_simple_modify(void)
|
|
{
|
|
struct vm_area_struct *vma;
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
|
|
VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0x1000);
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, init_vma));
|
|
|
|
/*
|
|
* The flags will not be changed, the vma_modify_flags() function
|
|
* performs the merge/split only.
|
|
*/
|
|
vma = vma_modify_flags(&vmi, init_vma, init_vma,
|
|
0x1000, 0x2000, &vma_flags);
|
|
ASSERT_NE(vma, NULL);
|
|
/* We modify the provided VMA, and on split allocate new VMAs. */
|
|
ASSERT_EQ(vma, init_vma);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0x1000);
|
|
ASSERT_EQ(vma->vm_end, 0x2000);
|
|
ASSERT_EQ(vma->vm_pgoff, 1);
|
|
|
|
/*
|
|
* Now walk through the three split VMAs and make sure they are as
|
|
* expected.
|
|
*/
|
|
|
|
vma_iter_set(&vmi, 0);
|
|
vma = vma_iter_load(&vmi);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x1000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
|
|
detach_free_vma(vma);
|
|
vma_iter_clear(&vmi);
|
|
|
|
vma = vma_next(&vmi);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0x1000);
|
|
ASSERT_EQ(vma->vm_end, 0x2000);
|
|
ASSERT_EQ(vma->vm_pgoff, 1);
|
|
|
|
detach_free_vma(vma);
|
|
vma_iter_clear(&vmi);
|
|
|
|
vma = vma_next(&vmi);
|
|
|
|
ASSERT_EQ(vma->vm_start, 0x2000);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 2);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_simple_expand(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
|
|
VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x1000, 0, vma_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.vmi = &vmi,
|
|
.target = vma,
|
|
.start = 0,
|
|
.end = 0x3000,
|
|
.pgoff = 0,
|
|
};
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, vma));
|
|
|
|
ASSERT_FALSE(expand_existing(&vmg));
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_simple_shrink(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT, VMA_MAYREAD_BIT,
|
|
VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
|
|
ASSERT_FALSE(attach_vma(&mm, vma));
|
|
|
|
ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
|
|
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x1000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
|
|
detach_free_vma(vma);
|
|
mtree_destroy(&mm.mm_mt);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool __test_merge_new(bool is_sticky, bool a_is_sticky, bool b_is_sticky, bool c_is_sticky)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_a = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_b = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_c = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_d = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
int count;
|
|
struct vm_area_struct *vma, *vma_a, *vma_b, *vma_c, *vma_d;
|
|
bool merged;
|
|
|
|
if (is_sticky)
|
|
vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
|
|
|
|
/*
|
|
* 0123456789abc
|
|
* AA B CC
|
|
*/
|
|
vma_a = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
|
|
ASSERT_NE(vma_a, NULL);
|
|
if (a_is_sticky)
|
|
vma_flags_set_mask(&vma_a->flags, VMA_STICKY_FLAGS);
|
|
/* We give each VMA a single avc so we can test anon_vma duplication. */
|
|
INIT_LIST_HEAD(&vma_a->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_a.same_vma, &vma_a->anon_vma_chain);
|
|
|
|
vma_b = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
|
|
ASSERT_NE(vma_b, NULL);
|
|
if (b_is_sticky)
|
|
vma_flags_set_mask(&vma_b->flags, VMA_STICKY_FLAGS);
|
|
INIT_LIST_HEAD(&vma_b->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_b.same_vma, &vma_b->anon_vma_chain);
|
|
|
|
vma_c = alloc_and_link_vma(&mm, 0xb000, 0xc000, 0xb, vma_flags);
|
|
ASSERT_NE(vma_c, NULL);
|
|
if (c_is_sticky)
|
|
vma_flags_set_mask(&vma_c->flags, VMA_STICKY_FLAGS);
|
|
INIT_LIST_HEAD(&vma_c->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_c.same_vma, &vma_c->anon_vma_chain);
|
|
|
|
/*
|
|
* NO merge.
|
|
*
|
|
* 0123456789abc
|
|
* AA B ** CC
|
|
*/
|
|
vma_d = try_merge_new_vma(&mm, &vmg, 0x7000, 0x9000, 7, vma_flags, &merged);
|
|
ASSERT_NE(vma_d, NULL);
|
|
INIT_LIST_HEAD(&vma_d->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain_d.same_vma, &vma_d->anon_vma_chain);
|
|
ASSERT_FALSE(merged);
|
|
ASSERT_EQ(mm.map_count, 4);
|
|
|
|
/*
|
|
* Merge BOTH sides.
|
|
*
|
|
* 0123456789abc
|
|
* AA*B DD CC
|
|
*/
|
|
vma_a->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma_b->anon_vma = &dummy_anon_vma;
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x2000, 0x3000, 2, vma_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Merge with A, delete B. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x4000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 3);
|
|
if (is_sticky || a_is_sticky || b_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
|
|
|
|
/*
|
|
* Merge to PREVIOUS VMA.
|
|
*
|
|
* 0123456789abc
|
|
* AAAA* DD CC
|
|
*/
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x4000, 0x5000, 4, vma_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Extend A. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x5000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 3);
|
|
if (is_sticky || a_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
|
|
|
|
/*
|
|
* Merge to NEXT VMA.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAA *DD CC
|
|
*/
|
|
vma_d->anon_vma = &dummy_anon_vma;
|
|
vma_d->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x6000, 0x7000, 6, vma_flags, &merged);
|
|
ASSERT_EQ(vma, vma_d);
|
|
/* Prepend. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0x6000);
|
|
ASSERT_EQ(vma->vm_end, 0x9000);
|
|
ASSERT_EQ(vma->vm_pgoff, 6);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 3);
|
|
if (is_sticky) /* D uses is_sticky. */
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
|
|
|
|
/*
|
|
* Merge BOTH sides.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAA*DDD CC
|
|
*/
|
|
vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x5000, 0x6000, 5, vma_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Merge with A, delete D. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x9000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (is_sticky || a_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
|
|
|
|
/*
|
|
* Merge to NEXT VMA.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAAAAAA *CC
|
|
*/
|
|
vma_c->anon_vma = &dummy_anon_vma;
|
|
vma = try_merge_new_vma(&mm, &vmg, 0xa000, 0xb000, 0xa, vma_flags, &merged);
|
|
ASSERT_EQ(vma, vma_c);
|
|
/* Prepend C. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0xa000);
|
|
ASSERT_EQ(vma->vm_end, 0xc000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0xa);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (is_sticky || c_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
|
|
|
|
/*
|
|
* Merge BOTH sides.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAAAAAA*CCC
|
|
*/
|
|
vma = try_merge_new_vma(&mm, &vmg, 0x9000, 0xa000, 0x9, vma_flags, &merged);
|
|
ASSERT_EQ(vma, vma_a);
|
|
/* Extend A and delete C. */
|
|
ASSERT_TRUE(merged);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0xc000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (is_sticky || a_is_sticky || c_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma->flags, VMA_STICKY_FLAGS));
|
|
|
|
/*
|
|
* Final state.
|
|
*
|
|
* 0123456789abc
|
|
* AAAAAAAAAAAAA
|
|
*/
|
|
|
|
count = 0;
|
|
vma_iter_set(&vmi, 0);
|
|
for_each_vma(vmi, vma) {
|
|
ASSERT_NE(vma, NULL);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0xc000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->anon_vma, &dummy_anon_vma);
|
|
|
|
detach_free_vma(vma);
|
|
count++;
|
|
}
|
|
|
|
/* Should only have one VMA left (though freed) after all is done.*/
|
|
ASSERT_EQ(count, 1);
|
|
|
|
mtree_destroy(&mm.mm_mt);
|
|
return true;
|
|
}
|
|
|
|
static bool test_merge_new(void)
|
|
{
|
|
int i, j, k, l;
|
|
|
|
/* Generate every possible permutation of sticky flags. */
|
|
for (i = 0; i < 2; i++)
|
|
for (j = 0; j < 2; j++)
|
|
for (k = 0; k < 2; k++)
|
|
for (l = 0; l < 2; l++)
|
|
ASSERT_TRUE(__test_merge_new(i, j, k, l));
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_vma_merge_special_flags(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
vma_flag_t special_flags[] = { VMA_IO_BIT, VMA_DONTEXPAND_BIT,
|
|
VMA_PFNMAP_BIT, VMA_MIXEDMAP_BIT };
|
|
vma_flags_t all_special_flags = EMPTY_VMA_FLAGS;
|
|
int i;
|
|
struct vm_area_struct *vma_left, *vma;
|
|
|
|
/* Make sure there aren't new VM_SPECIAL flags. */
|
|
for (i = 0; i < ARRAY_SIZE(special_flags); i++)
|
|
vma_flags_set(&all_special_flags, special_flags[i]);
|
|
ASSERT_FLAGS_SAME_MASK(&all_special_flags, VMA_SPECIAL_FLAGS);
|
|
|
|
/*
|
|
* 01234
|
|
* AAA
|
|
*/
|
|
vma_left = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
ASSERT_NE(vma_left, NULL);
|
|
|
|
/* 1. Set up new VMA with special flag that would otherwise merge. */
|
|
|
|
/*
|
|
* 01234
|
|
* AAA*
|
|
*
|
|
* This should merge if not for the VM_SPECIAL flag.
|
|
*/
|
|
vmg_set_range(&vmg, 0x3000, 0x4000, 3, vma_flags);
|
|
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
|
|
vma_flag_t special_flag = special_flags[i];
|
|
vma_flags_t flags = vma_flags;
|
|
|
|
vma_flags_set(&flags, special_flag);
|
|
vma_left->flags = flags;
|
|
vmg.vma_flags = flags;
|
|
vma = merge_new(&vmg);
|
|
ASSERT_EQ(vma, NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
}
|
|
|
|
/* 2. Modify VMA with special flag that would otherwise merge. */
|
|
|
|
/*
|
|
* 01234
|
|
* AAAB
|
|
*
|
|
* Create a VMA to modify.
|
|
*/
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
|
|
ASSERT_NE(vma, NULL);
|
|
vmg.middle = vma;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
|
|
vma_flag_t special_flag = special_flags[i];
|
|
vma_flags_t flags = vma_flags;
|
|
|
|
vma_flags_set(&flags, special_flag);
|
|
vma_left->flags = flags;
|
|
vmg.vma_flags = flags;
|
|
vma = merge_existing(&vmg);
|
|
ASSERT_EQ(vma, NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
}
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_vma_merge_with_close(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
struct vm_area_struct *vma_prev, *vma_next, *vma;
|
|
|
|
/*
|
|
* When merging VMAs we are not permitted to remove any VMA that has a
|
|
* vm_ops->close() hook.
|
|
*
|
|
* Considering the two possible adjacent VMAs to which a VMA can be
|
|
* merged:
|
|
*
|
|
* [ prev ][ vma ][ next ]
|
|
*
|
|
* In no case will we need to delete prev. If the operation is
|
|
* mergeable, then prev will be extended with one or both of vma and
|
|
* next deleted.
|
|
*
|
|
* As a result, during initial mergeability checks, only
|
|
* can_vma_merge_before() (which implies the VMA being merged with is
|
|
* 'next' as shown above) bothers to check to see whether the next VMA
|
|
* has a vm_ops->close() callback that will need to be called when
|
|
* removed.
|
|
*
|
|
* If it does, then we cannot merge as the resources that the close()
|
|
* operation potentially clears down are tied only to the existing VMA
|
|
* range and we have no way of extending those to the nearly merged one.
|
|
*
|
|
* We must consider two scenarios:
|
|
*
|
|
* A.
|
|
*
|
|
* vm_ops->close: - - !NULL
|
|
* [ prev ][ vma ][ next ]
|
|
*
|
|
* Where prev may or may not be present/mergeable.
|
|
*
|
|
* This is picked up by a specific check in can_vma_merge_before().
|
|
*
|
|
* B.
|
|
*
|
|
* vm_ops->close: - !NULL
|
|
* [ prev ][ vma ]
|
|
*
|
|
* Where prev and vma are present and mergeable.
|
|
*
|
|
* This is picked up by a specific check in the modified VMA merge.
|
|
*
|
|
* IMPORTANT NOTE: We make the assumption that the following case:
|
|
*
|
|
* - !NULL NULL
|
|
* [ prev ][ vma ][ next ]
|
|
*
|
|
* Cannot occur, because vma->vm_ops being the same implies the same
|
|
* vma->vm_file, and therefore this would mean that next->vm_ops->close
|
|
* would be set too, and thus scenario A would pick this up.
|
|
*/
|
|
|
|
/*
|
|
* The only case of a new VMA merge that results in a VMA being deleted
|
|
* is one where both the previous and next VMAs are merged - in this
|
|
* instance the next VMA is deleted, and the previous VMA is extended.
|
|
*
|
|
* If we are unable to do so, we reduce the operation to simply
|
|
* extending the prev VMA and not merging next.
|
|
*
|
|
* 0123456789
|
|
* PPP**NNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
|
|
vma_next->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
ASSERT_EQ(merge_new(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x5000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* When modifying an existing VMA there are further cases where we
|
|
* delete VMAs.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* PPPVV
|
|
*
|
|
* In this instance, if vma has a close hook, the merge simply cannot
|
|
* proceed.
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
/*
|
|
* The VMA being modified in a way that would otherwise merge should
|
|
* also fail.
|
|
*/
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* This case is mirrored if merging with next.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* VVNNNN
|
|
*
|
|
* In this instance, if vma has a close hook, the merge simply cannot
|
|
* proceed.
|
|
*/
|
|
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
|
|
vma->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
/*
|
|
* Initially this is misapprehended as an out of memory report, as the
|
|
* close() check is handled in the same way as anon_vma duplication
|
|
* failures, however a subsequent patch resolves this.
|
|
*/
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Finally, we consider two variants of the case where we modify a VMA
|
|
* to merge with both the previous and next VMAs.
|
|
*
|
|
* The first variant is where vma has a close hook. In this instance, no
|
|
* merge can proceed.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* PPPVVNNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
|
|
vma->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
|
|
|
|
/*
|
|
* The second variant is where next has a close hook. In this instance,
|
|
* we reduce the operation to a merge between prev and vma.
|
|
*
|
|
* <>
|
|
* 0123456789
|
|
* PPPVVNNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPNNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x9000, 5, vma_flags);
|
|
vma_next->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x5000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_vma_merge_new_with_close(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct vm_area_struct *vma_prev = alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
|
|
struct vm_area_struct *vma_next = alloc_and_link_vma(&mm, 0x5000, 0x7000, 5, vma_flags);
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
struct vm_area_struct *vma;
|
|
|
|
/*
|
|
* We should allow the partial merge of a proposed new VMA if the
|
|
* surrounding VMAs have vm_ops->close() hooks (but are otherwise
|
|
* compatible), e.g.:
|
|
*
|
|
* New VMA
|
|
* A v-------v B
|
|
* |-----| |-----|
|
|
* close close
|
|
*
|
|
* Since the rule is to not DELETE a VMA with a close operation, this
|
|
* should be permitted, only rather than expanding A and deleting B, we
|
|
* should simply expand A and leave B intact, e.g.:
|
|
*
|
|
* New VMA
|
|
* A B
|
|
* |------------||-----|
|
|
* close close
|
|
*/
|
|
|
|
/* Have prev and next have a vm_ops->close() hook. */
|
|
vma_prev->vm_ops = &vm_ops;
|
|
vma_next->vm_ops = &vm_ops;
|
|
|
|
vmg_set_range(&vmg, 0x2000, 0x5000, 2, vma_flags);
|
|
vma = merge_new(&vmg);
|
|
ASSERT_NE(vma, NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x5000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_EQ(vma->vm_ops, &vm_ops);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool __test_merge_existing(bool prev_is_sticky, bool middle_is_sticky, bool next_is_sticky)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
vma_flags_t prev_flags = vma_flags;
|
|
vma_flags_t next_flags = vma_flags;
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vm_area_struct *vma, *vma_prev, *vma_next;
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
const struct vm_operations_struct vm_ops = {
|
|
.close = dummy_close,
|
|
};
|
|
struct anon_vma_chain avc = {};
|
|
|
|
if (prev_is_sticky)
|
|
vma_flags_set_mask(&prev_flags, VMA_STICKY_FLAGS);
|
|
if (middle_is_sticky)
|
|
vma_flags_set_mask(&vma_flags, VMA_STICKY_FLAGS);
|
|
if (next_is_sticky)
|
|
vma_flags_set_mask(&next_flags, VMA_STICKY_FLAGS);
|
|
|
|
/*
|
|
* Merge right case - partial span.
|
|
*
|
|
* <->
|
|
* 0123456789
|
|
* VVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* VNNNNNN
|
|
*/
|
|
vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
|
|
vma->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
|
|
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
|
|
vmg.middle = vma;
|
|
vmg.prev = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_next);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_next->vm_start, 0x3000);
|
|
ASSERT_EQ(vma_next->vm_end, 0x9000);
|
|
ASSERT_EQ(vma_next->vm_pgoff, 3);
|
|
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
|
|
ASSERT_EQ(vma->vm_start, 0x2000);
|
|
ASSERT_EQ(vma->vm_end, 0x3000);
|
|
ASSERT_EQ(vma->vm_pgoff, 2);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_TRUE(vma_write_started(vma_next));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (middle_is_sticky || next_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
|
|
|
|
/* Clear down and reset. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Merge right case - full span.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* VVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* NNNNNNN
|
|
*/
|
|
vma = alloc_and_link_vma(&mm, 0x2000, 0x6000, 2, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, next_flags);
|
|
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vmg_set_range_anon_vma(&vmg, 0x2000, 0x6000, 2, vma_flags, &dummy_anon_vma);
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_next);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_next->vm_start, 0x2000);
|
|
ASSERT_EQ(vma_next->vm_end, 0x9000);
|
|
ASSERT_EQ(vma_next->vm_pgoff, 2);
|
|
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma_next));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (middle_is_sticky || next_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma_next->flags, VMA_STICKY_FLAGS));
|
|
|
|
/* Clear down and reset. We should have deleted vma. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
|
|
|
|
/*
|
|
* Merge left case - partial span.
|
|
*
|
|
* <->
|
|
* 0123456789
|
|
* PPPVVVV
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPV
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
|
|
vma->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x6000, 3, vma_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x6000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_EQ(vma->vm_start, 0x6000);
|
|
ASSERT_EQ(vma->vm_end, 0x7000);
|
|
ASSERT_EQ(vma->vm_pgoff, 6);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 2);
|
|
if (prev_is_sticky || middle_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
|
|
|
|
/* Clear down and reset. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Merge left case - full span.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPPVVVV
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPP
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x7000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (prev_is_sticky || middle_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
|
|
|
|
/* Clear down and reset. We should have deleted vma. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
|
|
|
|
/*
|
|
* Merge both case.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPPVVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPPPPP
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma_prev->vm_ops = &vm_ops; /* This should have no impact. */
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, next_flags);
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x9000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
if (prev_is_sticky || middle_is_sticky || next_is_sticky)
|
|
ASSERT_TRUE(vma_flags_test_any_mask(&vma_prev->flags, VMA_STICKY_FLAGS));
|
|
|
|
/* Clear down and reset. We should have deleted prev and next. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 1);
|
|
|
|
/*
|
|
* Non-merge ranges. the modified VMA merge operation assumes that the
|
|
* caller always specifies ranges within the input VMA so we need only
|
|
* examine these cases.
|
|
*
|
|
* -
|
|
* -
|
|
* -
|
|
* <->
|
|
* <>
|
|
* <>
|
|
* 0123456789a
|
|
* PPPVVVVVNNN
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, prev_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x8000, 0xa000, 8, next_flags);
|
|
|
|
vmg_set_range(&vmg, 0x4000, 0x5000, 4, vma_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x6000, 0x7000, 6, vma_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x4000, 0x7000, 4, vma_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x4000, 0x6000, 4, vma_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
vmg_set_range(&vmg, 0x5000, 0x6000, 5, vma_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
|
|
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 3);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_merge_existing(void)
|
|
{
|
|
int i, j, k;
|
|
|
|
/* Generate every possible permutation of sticky flags. */
|
|
for (i = 0; i < 2; i++)
|
|
for (j = 0; j < 2; j++)
|
|
for (k = 0; k < 2; k++)
|
|
ASSERT_TRUE(__test_merge_existing(i, j, k));
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_anon_vma_non_mergeable(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vm_area_struct *vma, *vma_prev, *vma_next;
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain_1 = {};
|
|
struct anon_vma_chain dummy_anon_vma_chain_2 = {};
|
|
struct anon_vma dummy_anon_vma_2;
|
|
|
|
/*
|
|
* In the case of modified VMA merge, merging both left and right VMAs
|
|
* but where prev and next have incompatible anon_vma objects, we revert
|
|
* to a merge of prev and VMA:
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPPVVVVNNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPPNNN
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
|
|
|
|
/*
|
|
* Give both prev and next single anon_vma_chain fields, so they will
|
|
* merge with the NULL vmg->anon_vma.
|
|
*
|
|
* However, when prev is compared to next, the merge should fail.
|
|
*/
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
|
|
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x7000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_FALSE(vma_write_started(vma_next));
|
|
|
|
/* Clear down and reset. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
/*
|
|
* Now consider the new VMA case. This is equivalent, only adding a new
|
|
* VMA in a gap between prev and next.
|
|
*
|
|
* <-->
|
|
* 0123456789
|
|
* PPP****NNN
|
|
* ->
|
|
* 0123456789
|
|
* PPPPPPPNNN
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, vma_flags);
|
|
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x7000, 3, vma_flags, NULL);
|
|
vmg.prev = vma_prev;
|
|
vma_set_dummy_anon_vma(vma_prev, &dummy_anon_vma_chain_1);
|
|
__vma_set_dummy_anon_vma(vma_next, &dummy_anon_vma_chain_2, &dummy_anon_vma_2);
|
|
|
|
vmg.anon_vma = NULL;
|
|
ASSERT_EQ(merge_new(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x7000);
|
|
ASSERT_EQ(vma_prev->vm_pgoff, 0);
|
|
ASSERT_TRUE(vma_write_started(vma_prev));
|
|
ASSERT_FALSE(vma_write_started(vma_next));
|
|
|
|
/* Final cleanup. */
|
|
ASSERT_EQ(cleanup_mm(&mm, &vmi), 2);
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool test_dup_anon_vma(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain dummy_anon_vma_chain = {
|
|
.anon_vma = &dummy_anon_vma,
|
|
};
|
|
struct vm_area_struct *vma_prev, *vma_next, *vma;
|
|
|
|
reset_dummy_anon_vma();
|
|
|
|
/*
|
|
* Expanding a VMA delete the next one duplicates next's anon_vma and
|
|
* assigns it to the expanded VMA.
|
|
*
|
|
* This covers new VMA merging, as these operations amount to a VMA
|
|
* expand.
|
|
*/
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma_next->anon_vma = &dummy_anon_vma;
|
|
|
|
vmg_set_range(&vmg, 0, 0x5000, 0, vma_flags);
|
|
vmg.target = vma_prev;
|
|
vmg.next = vma_next;
|
|
|
|
ASSERT_EQ(expand_existing(&vmg), 0);
|
|
|
|
/* Will have been cloned. */
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
/* Cleanup ready for next run. */
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* next has anon_vma, we assign to prev.
|
|
*
|
|
* |<----->|
|
|
* |-------*********-------|
|
|
* prev vma next
|
|
* extend delete delete
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
|
|
|
|
/* Initialise avc so mergeability check passes. */
|
|
INIT_LIST_HEAD(&vma_next->anon_vma_chain);
|
|
list_add(&dummy_anon_vma_chain.same_vma, &vma_next->anon_vma_chain);
|
|
|
|
vma_next->anon_vma = &dummy_anon_vma;
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x8000);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* vma has anon_vma, we assign to prev.
|
|
*
|
|
* |<----->|
|
|
* |-------*********-------|
|
|
* prev vma next
|
|
* extend delete delete
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
|
|
vmg.anon_vma = &dummy_anon_vma;
|
|
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x8000);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* vma has anon_vma, we assign to prev.
|
|
*
|
|
* |<----->|
|
|
* |-------*************
|
|
* prev vma
|
|
* extend shrink/delete
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x8000, 3, vma_flags);
|
|
|
|
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_prev->vm_start, 0);
|
|
ASSERT_EQ(vma_prev->vm_end, 0x5000);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_prev->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
|
|
/*
|
|
* vma has anon_vma, we assign to next.
|
|
*
|
|
* |<----->|
|
|
* *************-------|
|
|
* vma next
|
|
* shrink/delete extend
|
|
*/
|
|
|
|
vma = alloc_and_link_vma(&mm, 0, 0x5000, 0, vma_flags);
|
|
vma_next = alloc_and_link_vma(&mm, 0x5000, 0x8000, 5, vma_flags);
|
|
|
|
vma_set_dummy_anon_vma(vma, &dummy_anon_vma_chain);
|
|
vmg_set_range(&vmg, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma;
|
|
vmg.middle = vma;
|
|
|
|
ASSERT_EQ(merge_existing(&vmg), vma_next);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
|
|
ASSERT_EQ(vma_next->vm_start, 0x3000);
|
|
ASSERT_EQ(vma_next->vm_end, 0x8000);
|
|
|
|
ASSERT_EQ(vma_next->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(vma_next->anon_vma->was_cloned);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_vmi_prealloc_fail(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vma_merge_struct vmg = {
|
|
.mm = &mm,
|
|
.vmi = &vmi,
|
|
};
|
|
struct anon_vma_chain avc = {};
|
|
struct vm_area_struct *vma_prev, *vma;
|
|
|
|
/*
|
|
* We are merging vma into prev, with vma possessing an anon_vma, which
|
|
* will be duplicated. We cause the vmi preallocation to fail and assert
|
|
* the duplicated anon_vma is unlinked.
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma->anon_vma = &dummy_anon_vma;
|
|
|
|
vmg_set_range_anon_vma(&vmg, 0x3000, 0x5000, 3, vma_flags, &dummy_anon_vma);
|
|
vmg.prev = vma_prev;
|
|
vmg.middle = vma;
|
|
vma_set_dummy_anon_vma(vma, &avc);
|
|
|
|
fail_prealloc = true;
|
|
|
|
/* This will cause the merge to fail. */
|
|
ASSERT_EQ(merge_existing(&vmg), NULL);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
|
|
/* We will already have assigned the anon_vma. */
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
/* And it was both cloned and unlinked. */
|
|
ASSERT_TRUE(dummy_anon_vma.was_cloned);
|
|
ASSERT_TRUE(dummy_anon_vma.was_unlinked);
|
|
|
|
cleanup_mm(&mm, &vmi); /* Resets fail_prealloc too. */
|
|
|
|
/*
|
|
* We repeat the same operation for expanding a VMA, which is what new
|
|
* VMA merging ultimately uses too. This asserts that unlinking is
|
|
* performed in this case too.
|
|
*/
|
|
|
|
vma_prev = alloc_and_link_vma(&mm, 0, 0x3000, 0, vma_flags);
|
|
vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vma->anon_vma = &dummy_anon_vma;
|
|
|
|
vmg_set_range(&vmg, 0, 0x5000, 3, vma_flags);
|
|
vmg.target = vma_prev;
|
|
vmg.next = vma;
|
|
|
|
fail_prealloc = true;
|
|
ASSERT_EQ(expand_existing(&vmg), -ENOMEM);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_ERROR_NOMEM);
|
|
|
|
ASSERT_EQ(vma_prev->anon_vma, &dummy_anon_vma);
|
|
ASSERT_TRUE(dummy_anon_vma.was_cloned);
|
|
ASSERT_TRUE(dummy_anon_vma.was_unlinked);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_merge_extend(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0x1000);
|
|
struct vm_area_struct *vma;
|
|
|
|
vma = alloc_and_link_vma(&mm, 0, 0x1000, 0, vma_flags);
|
|
alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, vma_flags);
|
|
|
|
/*
|
|
* Extend a VMA into the gap between itself and the following VMA.
|
|
* This should result in a merge.
|
|
*
|
|
* <->
|
|
* * *
|
|
*
|
|
*/
|
|
|
|
ASSERT_EQ(vma_merge_extend(&vmi, vma, 0x2000), vma);
|
|
ASSERT_EQ(vma->vm_start, 0);
|
|
ASSERT_EQ(vma->vm_end, 0x4000);
|
|
ASSERT_EQ(vma->vm_pgoff, 0);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(mm.map_count, 1);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static bool test_expand_only_mode(void)
|
|
{
|
|
vma_flags_t vma_flags = mk_vma_flags(VMA_READ_BIT, VMA_WRITE_BIT,
|
|
VMA_MAYREAD_BIT, VMA_MAYWRITE_BIT);
|
|
struct mm_struct mm = {};
|
|
VMA_ITERATOR(vmi, &mm, 0);
|
|
struct vm_area_struct *vma_prev, *vma;
|
|
VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, vma_flags, 5);
|
|
|
|
/*
|
|
* Place a VMA prior to the one we're expanding so we assert that we do
|
|
* not erroneously try to traverse to the previous VMA even though we
|
|
* have, through the use of the just_expand flag, indicated we do not
|
|
* need to do so.
|
|
*/
|
|
alloc_and_link_vma(&mm, 0, 0x2000, 0, vma_flags);
|
|
|
|
/*
|
|
* We will be positioned at the prev VMA, but looking to expand to
|
|
* 0x9000.
|
|
*/
|
|
vma_iter_set(&vmi, 0x3000);
|
|
vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, vma_flags);
|
|
vmg.prev = vma_prev;
|
|
vmg.just_expand = true;
|
|
|
|
vma = vma_merge_new_range(&vmg);
|
|
ASSERT_NE(vma, NULL);
|
|
ASSERT_EQ(vma, vma_prev);
|
|
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
|
|
ASSERT_EQ(vma->vm_start, 0x3000);
|
|
ASSERT_EQ(vma->vm_end, 0x9000);
|
|
ASSERT_EQ(vma->vm_pgoff, 3);
|
|
ASSERT_TRUE(vma_write_started(vma));
|
|
ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
|
|
vma_assert_attached(vma);
|
|
|
|
cleanup_mm(&mm, &vmi);
|
|
return true;
|
|
}
|
|
|
|
static void run_merge_tests(int *num_tests, int *num_fail)
|
|
{
|
|
/* Very simple tests to kick the tyres. */
|
|
TEST(simple_merge);
|
|
TEST(simple_modify);
|
|
TEST(simple_expand);
|
|
TEST(simple_shrink);
|
|
|
|
TEST(merge_new);
|
|
TEST(vma_merge_special_flags);
|
|
TEST(vma_merge_with_close);
|
|
TEST(vma_merge_new_with_close);
|
|
TEST(merge_existing);
|
|
TEST(anon_vma_non_mergeable);
|
|
TEST(dup_anon_vma);
|
|
TEST(vmi_prealloc_fail);
|
|
TEST(merge_extend);
|
|
TEST(expand_only_mode);
|
|
}
|