mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 06:41:39 -04:00
mm/vma: convert as much as we can in mm/vma.c to vma_flags_t
Now we have established a good foundation for vm_flags_t to vma_flags_t changes, update mm/vma.c to utilise vma_flags_t wherever possible. We are able to convert VM_STARTGAP_FLAGS entirely as this is only used in mm/vma.c, and to account for the fact we can't use VM_NONE to make life easier, place the definition of this within existing #ifdef's to be cleaner. Generally the remaining changes are mechanical. Also update the VMA tests to reflect the changes. Link: https://lkml.kernel.org/r/5fdeaf8af9a12c2a5d68497495f52fa627d05a5b.1774034900.git.ljs@kernel.org Signed-off-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org> Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alexandre Ghiti <alex@ghiti.fr> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: David Hildenbrand <david@kernel.org> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Kees Cook <kees@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Ondrej Mosnacek <omosnace@redhat.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Moore <paul@paul-moore.com> Cc: Pedro Falcato <pfalcato@suse.de> Cc: Richard Weinberger <richard@nod.at> Cc: Russell King <linux@armlinux.org.uk> Cc: Stephen Smalley <stephen.smalley.work@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
a6f14fb593
commit
769669bd9c
@@ -463,8 +463,10 @@ enum {
|
||||
#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) || \
|
||||
defined(CONFIG_RISCV_USER_CFI)
|
||||
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
|
||||
#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
|
||||
#else
|
||||
#define VM_SHADOW_STACK VM_NONE
|
||||
#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
|
||||
#endif
|
||||
#if defined(CONFIG_PPC64)
|
||||
#define VM_SAO INIT_VM_FLAG(SAO)
|
||||
@@ -539,8 +541,6 @@ enum {
|
||||
/* Temporary until VMA flags conversion complete. */
|
||||
#define VM_STACK_FLAGS vma_flags_to_legacy(VMA_STACK_FLAGS)
|
||||
|
||||
#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
|
||||
|
||||
#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
|
||||
#define VM_SEALED_SYSMAP VM_SEALED
|
||||
#else
|
||||
@@ -584,6 +584,8 @@ enum {
|
||||
/* This mask represents all the VMA flag bits used by mlock */
|
||||
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
|
||||
|
||||
#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
|
||||
|
||||
/* These flags can be updated atomically via VMA/mmap read lock. */
|
||||
#define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
|
||||
|
||||
|
||||
89
mm/vma.c
89
mm/vma.c
@@ -185,7 +185,7 @@ static void init_multi_vma_prep(struct vma_prepare *vp,
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
|
||||
* Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff)
|
||||
* in front of (at a lower virtual address and file offset than) the vma.
|
||||
*
|
||||
* We cannot merge two vmas if they have differently assigned (non-NULL)
|
||||
@@ -211,7 +211,7 @@ static bool can_vma_merge_before(struct vma_merge_struct *vmg)
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
|
||||
* Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff)
|
||||
* beyond (at a higher virtual address and file offset than) the vma.
|
||||
*
|
||||
* We cannot merge two vmas if they have differently assigned (non-NULL)
|
||||
@@ -850,7 +850,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||
* furthermost left or right side of the VMA, then we have no chance of
|
||||
* merging and should abort.
|
||||
*/
|
||||
if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side))
|
||||
if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) ||
|
||||
(!left_side && !right_side))
|
||||
return NULL;
|
||||
|
||||
if (left_side)
|
||||
@@ -1072,7 +1073,8 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
|
||||
vmg->state = VMA_MERGE_NOMERGE;
|
||||
|
||||
/* Special VMAs are unmergeable, also if no prev/next. */
|
||||
if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next))
|
||||
if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) ||
|
||||
(!prev && !next))
|
||||
return NULL;
|
||||
|
||||
can_merge_left = can_vma_merge_left(vmg);
|
||||
@@ -1459,17 +1461,17 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
|
||||
nrpages = vma_pages(next);
|
||||
|
||||
vms->nr_pages += nrpages;
|
||||
if (next->vm_flags & VM_LOCKED)
|
||||
if (vma_test(next, VMA_LOCKED_BIT))
|
||||
vms->locked_vm += nrpages;
|
||||
|
||||
if (next->vm_flags & VM_ACCOUNT)
|
||||
if (vma_test(next, VMA_ACCOUNT_BIT))
|
||||
vms->nr_accounted += nrpages;
|
||||
|
||||
if (is_exec_mapping(next->vm_flags))
|
||||
vms->exec_vm += nrpages;
|
||||
else if (is_stack_mapping(next->vm_flags))
|
||||
vms->stack_vm += nrpages;
|
||||
else if (is_data_mapping(next->vm_flags))
|
||||
else if (is_data_mapping_vma_flags(&next->flags))
|
||||
vms->data_vm += nrpages;
|
||||
|
||||
if (vms->uf) {
|
||||
@@ -2065,14 +2067,13 @@ static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
|
||||
|
||||
static bool vma_is_shared_writable(struct vm_area_struct *vma)
|
||||
{
|
||||
return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
|
||||
(VM_WRITE | VM_SHARED);
|
||||
return vma_test_all(vma, VMA_WRITE_BIT, VMA_SHARED_BIT);
|
||||
}
|
||||
|
||||
static bool vma_fs_can_writeback(struct vm_area_struct *vma)
|
||||
{
|
||||
/* No managed pages to writeback. */
|
||||
if (vma->vm_flags & VM_PFNMAP)
|
||||
if (vma_test(vma, VMA_PFNMAP_BIT))
|
||||
return false;
|
||||
|
||||
return vma->vm_file && vma->vm_file->f_mapping &&
|
||||
@@ -2338,8 +2339,11 @@ void mm_drop_all_locks(struct mm_struct *mm)
|
||||
* We account for memory if it's a private writeable mapping,
|
||||
* not hugepages and VM_NORESERVE wasn't set.
|
||||
*/
|
||||
static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
|
||||
static bool accountable_mapping(struct mmap_state *map)
|
||||
{
|
||||
const struct file *file = map->file;
|
||||
vma_flags_t mask;
|
||||
|
||||
/*
|
||||
* hugetlb has its own accounting separate from the core VM
|
||||
* VM_HUGETLB may not be set yet so we cannot check for that flag.
|
||||
@@ -2347,7 +2351,9 @@ static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
|
||||
if (file && is_file_hugepages(file))
|
||||
return false;
|
||||
|
||||
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
|
||||
mask = vma_flags_and(&map->vma_flags, VMA_NORESERVE_BIT, VMA_SHARED_BIT,
|
||||
VMA_WRITE_BIT);
|
||||
return vma_flags_same(&mask, VMA_WRITE_BIT);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2450,7 +2456,7 @@ static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
|
||||
return -ENOMEM;
|
||||
|
||||
/* Private writable mapping: check memory availability. */
|
||||
if (accountable_mapping(map->file, map->vm_flags)) {
|
||||
if (accountable_mapping(map)) {
|
||||
map->charged = map->pglen;
|
||||
map->charged -= vms->nr_accounted;
|
||||
if (map->charged) {
|
||||
@@ -2460,7 +2466,7 @@ static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc,
|
||||
}
|
||||
|
||||
vms->nr_accounted = 0;
|
||||
map->vm_flags |= VM_ACCOUNT;
|
||||
vma_flags_set(&map->vma_flags, VMA_ACCOUNT_BIT);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2508,12 +2514,12 @@ static int __mmap_new_file_vma(struct mmap_state *map,
|
||||
* Drivers should not permit writability when previously it was
|
||||
* disallowed.
|
||||
*/
|
||||
VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags &&
|
||||
!(map->vm_flags & VM_MAYWRITE) &&
|
||||
(vma->vm_flags & VM_MAYWRITE));
|
||||
VM_WARN_ON_ONCE(!vma_flags_same_pair(&map->vma_flags, &vma->flags) &&
|
||||
!vma_flags_test(&map->vma_flags, VMA_MAYWRITE_BIT) &&
|
||||
vma_test(vma, VMA_MAYWRITE_BIT));
|
||||
|
||||
map->file = vma->vm_file;
|
||||
map->vm_flags = vma->vm_flags;
|
||||
map->vma_flags = vma->flags;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2544,7 +2550,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
|
||||
|
||||
vma_iter_config(vmi, map->addr, map->end);
|
||||
vma_set_range(vma, map->addr, map->end, map->pgoff);
|
||||
vm_flags_init(vma, map->vm_flags);
|
||||
vma->flags = map->vma_flags;
|
||||
vma->vm_page_prot = map->page_prot;
|
||||
|
||||
if (vma_iter_prealloc(vmi, vma)) {
|
||||
@@ -2554,7 +2560,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
|
||||
|
||||
if (map->file)
|
||||
error = __mmap_new_file_vma(map, vma);
|
||||
else if (map->vm_flags & VM_SHARED)
|
||||
else if (vma_flags_test(&map->vma_flags, VMA_SHARED_BIT))
|
||||
error = shmem_zero_setup(vma);
|
||||
else
|
||||
vma_set_anonymous(vma);
|
||||
@@ -2564,7 +2570,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
|
||||
|
||||
if (!map->check_ksm_early) {
|
||||
update_ksm_flags(map);
|
||||
vm_flags_init(vma, map->vm_flags);
|
||||
vma->flags = map->vma_flags;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARC64
|
||||
@@ -2604,7 +2610,6 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
|
||||
static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
|
||||
{
|
||||
struct mm_struct *mm = map->mm;
|
||||
vm_flags_t vm_flags = vma->vm_flags;
|
||||
|
||||
perf_event_mmap(vma);
|
||||
|
||||
@@ -2612,9 +2617,9 @@ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
|
||||
vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
|
||||
|
||||
vm_stat_account(mm, vma->vm_flags, map->pglen);
|
||||
if (vm_flags & VM_LOCKED) {
|
||||
if (vma_test(vma, VMA_LOCKED_BIT)) {
|
||||
if (!vma_supports_mlock(vma))
|
||||
vm_flags_clear(vma, VM_LOCKED_MASK);
|
||||
vma_clear_flags_mask(vma, VMA_LOCKED_MASK);
|
||||
else
|
||||
mm->locked_vm += map->pglen;
|
||||
}
|
||||
@@ -2630,7 +2635,7 @@ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
|
||||
* a completely new data area).
|
||||
*/
|
||||
if (pgtable_supports_soft_dirty())
|
||||
vm_flags_set(vma, VM_SOFTDIRTY);
|
||||
vma_set_flags(vma, VMA_SOFTDIRTY_BIT);
|
||||
|
||||
vma_set_page_prot(vma);
|
||||
}
|
||||
@@ -2993,7 +2998,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
|
||||
gap = vma_iter_addr(&vmi) + info->start_gap;
|
||||
gap += (info->align_offset - gap) & info->align_mask;
|
||||
tmp = vma_next(&vmi);
|
||||
if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
|
||||
/* Avoid prev check if possible */
|
||||
if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
|
||||
if (vm_start_gap(tmp) < gap + length - 1) {
|
||||
low_limit = tmp->vm_end;
|
||||
vma_iter_reset(&vmi);
|
||||
@@ -3045,7 +3051,8 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
|
||||
gap -= (gap - info->align_offset) & info->align_mask;
|
||||
gap_end = vma_iter_end(&vmi);
|
||||
tmp = vma_next(&vmi);
|
||||
if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
|
||||
/* Avoid prev check if possible */
|
||||
if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) {
|
||||
if (vm_start_gap(tmp) < gap_end) {
|
||||
high_limit = vm_start_gap(tmp);
|
||||
vma_iter_reset(&vmi);
|
||||
@@ -3083,12 +3090,16 @@ static int acct_stack_growth(struct vm_area_struct *vma,
|
||||
return -ENOMEM;
|
||||
|
||||
/* mlock limit tests */
|
||||
if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, grow << PAGE_SHIFT))
|
||||
if (!mlock_future_ok(mm, vma_test(vma, VMA_LOCKED_BIT),
|
||||
grow << PAGE_SHIFT))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Check to ensure the stack will not grow into a hugetlb-only region */
|
||||
new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
|
||||
vma->vm_end - size;
|
||||
new_start = vma->vm_end - size;
|
||||
#ifdef CONFIG_STACK_GROWSUP
|
||||
if (vma_test(vma, VMA_GROWSUP_BIT))
|
||||
new_start = vma->vm_start;
|
||||
#endif
|
||||
if (is_hugepage_only_range(vma->vm_mm, new_start, size))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -3102,7 +3113,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_STACK_GROWSUP)
|
||||
#ifdef CONFIG_STACK_GROWSUP
|
||||
/*
|
||||
* PA-RISC uses this for its stack.
|
||||
* vma is the last one with address > vma->vm_end. Have to extend vma.
|
||||
@@ -3115,7 +3126,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
int error = 0;
|
||||
VMA_ITERATOR(vmi, mm, vma->vm_start);
|
||||
|
||||
if (!(vma->vm_flags & VM_GROWSUP))
|
||||
if (!vma_test(vma, VMA_GROWSUP_BIT))
|
||||
return -EFAULT;
|
||||
|
||||
mmap_assert_write_locked(mm);
|
||||
@@ -3135,7 +3146,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
|
||||
next = find_vma_intersection(mm, vma->vm_end, gap_addr);
|
||||
if (next && vma_is_accessible(next)) {
|
||||
if (!(next->vm_flags & VM_GROWSUP))
|
||||
if (!vma_test(next, VMA_GROWSUP_BIT))
|
||||
return -ENOMEM;
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
}
|
||||
@@ -3169,7 +3180,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
|
||||
error = acct_stack_growth(vma, size, grow);
|
||||
if (!error) {
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
if (vma_test(vma, VMA_LOCKED_BIT))
|
||||
mm->locked_vm += grow;
|
||||
vm_stat_account(mm, vma->vm_flags, grow);
|
||||
anon_vma_interval_tree_pre_update_vma(vma);
|
||||
@@ -3200,7 +3211,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
|
||||
int error = 0;
|
||||
VMA_ITERATOR(vmi, mm, vma->vm_start);
|
||||
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
if (!vma_test(vma, VMA_GROWSDOWN_BIT))
|
||||
return -EFAULT;
|
||||
|
||||
mmap_assert_write_locked(mm);
|
||||
@@ -3213,7 +3224,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
|
||||
prev = vma_prev(&vmi);
|
||||
/* Check that both stack segments have the same anon_vma? */
|
||||
if (prev) {
|
||||
if (!(prev->vm_flags & VM_GROWSDOWN) &&
|
||||
if (!vma_test(prev, VMA_GROWSDOWN_BIT) &&
|
||||
vma_is_accessible(prev) &&
|
||||
(address - prev->vm_end < stack_guard_gap))
|
||||
return -ENOMEM;
|
||||
@@ -3248,7 +3259,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
|
||||
if (grow <= vma->vm_pgoff) {
|
||||
error = acct_stack_growth(vma, size, grow);
|
||||
if (!error) {
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
if (vma_test(vma, VMA_LOCKED_BIT))
|
||||
mm->locked_vm += grow;
|
||||
vm_stat_account(mm, vma->vm_flags, grow);
|
||||
anon_vma_interval_tree_pre_update_vma(vma);
|
||||
@@ -3297,7 +3308,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
|
||||
if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
|
||||
return -ENOMEM;
|
||||
|
||||
if ((vma->vm_flags & VM_ACCOUNT) &&
|
||||
if (vma_test(vma, VMA_ACCOUNT_BIT) &&
|
||||
security_vm_enough_memory_mm(mm, charged))
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -3319,7 +3330,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
if (vma_link(mm, vma)) {
|
||||
if (vma->vm_flags & VM_ACCOUNT)
|
||||
if (vma_test(vma, VMA_ACCOUNT_BIT))
|
||||
vm_unacct_memory(charged);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -267,8 +267,10 @@ enum {
|
||||
#endif /* CONFIG_ARCH_HAS_PKEYS */
|
||||
#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
|
||||
#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
|
||||
#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT, VMA_SHADOW_STACK_BIT)
|
||||
#else
|
||||
#define VM_SHADOW_STACK VM_NONE
|
||||
#define VMA_STARTGAP_FLAGS mk_vma_flags(VMA_GROWSDOWN_BIT)
|
||||
#endif
|
||||
#if defined(CONFIG_PPC64)
|
||||
#define VM_SAO INIT_VM_FLAG(SAO)
|
||||
@@ -366,6 +368,8 @@ enum {
|
||||
/* This mask represents all the VMA flag bits used by mlock */
|
||||
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
|
||||
|
||||
#define VMA_LOCKED_MASK mk_vma_flags(VMA_LOCKED_BIT, VMA_LOCKONFAULT_BIT)
|
||||
|
||||
#define RLIMIT_STACK 3 /* max stack size */
|
||||
#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
|
||||
|
||||
|
||||
@@ -229,7 +229,7 @@ static inline bool signal_pending(void *p)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_file_hugepages(struct file *file)
|
||||
static inline bool is_file_hugepages(const struct file *file)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user