mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 04:21:09 -04:00
mm: prevent droppable mappings from being locked
Droppable mappings must not be lockable. There is a check for VMAs with
VM_DROPPABLE set in mlock_fixup() along with checks for other types of
unlockable VMAs which ensures this when calling mlock()/mlock2().
For mlockall(MCL_FUTURE), the check for unlockable VMAs is different. In
apply_mlockall_flags(), if the flags parameter has MCL_FUTURE set, the
current task's mm's default VMA flag field mm->def_flags has VM_LOCKED
applied to it. VM_LOCKONFAULT is also applied if MCL_ONFAULT is also set.
When these flags are set as default in this manner they are cleared in
__mmap_complete() for new mappings that do not support mlock. A check for
VM_DROPPABLE in __mmap_complete() is missing resulting in droppable
mappings created with VM_LOCKED set. To fix this and reduce that chance
of similar bugs in the future, introduce and use vma_supports_mlock().
Link: https://lkml.kernel.org/r/20260310155821.17869-1-anthony.yznaga@oracle.com
Fixes: 9651fcedf7 ("mm: add MAP_DROPPABLE for designating always lazily freeable mappings")
Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com>
Suggested-by: David Hildenbrand <david@kernel.org>
Acked-by: David Hildenbrand (Arm) <david@kernel.org>
Reviewed-by: Pedro Falcato <pfalcato@suse.de>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Tested-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
Cc: Jann Horn <jannh@google.com>
Cc: Jason A. Donenfeld <jason@zx2c4.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
301f392200
commit
d239462787
@@ -30,7 +30,7 @@ static inline bool is_vma_hugetlb_flags(const vma_flags_t *flags)
|
||||
|
||||
#endif
|
||||
|
||||
static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
static inline bool is_vm_hugetlb_page(const struct vm_area_struct *vma)
|
||||
{
|
||||
return is_vm_hugetlb_flags(vma->vm_flags);
|
||||
}
|
||||
|
||||
@@ -1243,6 +1243,16 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
|
||||
}
|
||||
return fpin;
|
||||
}
|
||||
|
||||
static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & (VM_SPECIAL | VM_DROPPABLE))
|
||||
return false;
|
||||
if (vma_is_dax(vma) || is_vm_hugetlb_page(vma))
|
||||
return false;
|
||||
return vma != get_gate_vma(current->mm);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
static inline void unmap_mapping_folio(struct folio *folio) { }
|
||||
static inline void mlock_new_folio(struct folio *folio) { }
|
||||
|
||||
10
mm/mlock.c
10
mm/mlock.c
@@ -472,10 +472,12 @@ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
||||
int ret = 0;
|
||||
vm_flags_t oldflags = vma->vm_flags;
|
||||
|
||||
if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
|
||||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
|
||||
vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE))
|
||||
/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
|
||||
if (newflags == oldflags || vma_is_secretmem(vma) ||
|
||||
!vma_supports_mlock(vma))
|
||||
/*
|
||||
* Don't set VM_LOCKED or VM_LOCKONFAULT and don't count.
|
||||
* For secretmem, don't allow the memory to be unlocked.
|
||||
*/
|
||||
goto out;
|
||||
|
||||
vma = vma_modify_flags(vmi, *prev, vma, start, end, &newflags);
|
||||
|
||||
4
mm/vma.c
4
mm/vma.c
@@ -2589,9 +2589,7 @@ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
|
||||
|
||||
vm_stat_account(mm, vma->vm_flags, map->pglen);
|
||||
if (vm_flags & VM_LOCKED) {
|
||||
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
|
||||
is_vm_hugetlb_page(vma) ||
|
||||
vma == get_gate_vma(mm))
|
||||
if (!vma_supports_mlock(vma))
|
||||
vm_flags_clear(vma, VM_LOCKED_MASK);
|
||||
else
|
||||
mm->locked_vm += map->pglen;
|
||||
|
||||
@@ -426,3 +426,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
static inline void hugetlb_split(struct vm_area_struct *, unsigned long) {}
|
||||
|
||||
static inline bool vma_supports_mlock(const struct vm_area_struct *vma)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user