mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-17 10:10:30 -05:00
mm, s390: constify mapping related test/getter functions
For improved const-correctness. We select certain test functions which either invoke each other, functions that are already const-ified, or no further functions. It is therefore relatively trivial to const-ify them, which provides a basis for further const-ification further up the call stack. (Even though seemingly unrelated, this also constifies the pointer parameter of mmap_is_legacy() in arch/s390/mm/mmap.c because a copy of the function exists in mm/util.c.) Link: https://lkml.kernel.org/r/20250901205021.3573313-7-max.kellermann@ionos.com Signed-off-by: Max Kellermann <max.kellermann@ionos.com> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Borislav Betkov <bp@alien8.de> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christian Zankel <chris@zankel.net> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Bottomley <james.bottomley@HansenPartnership.com> Cc: Jan Kara <jack@suse.cz> Cc: Jocelyn Falempe <jfalempe@redhat.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Brown <broonie@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: "Nysal Jan K.A" <nysal@linux.ibm.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russel King <linux@armlinux.org.uk> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Thomas Huth <thuth@redhat.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Wei Xu <weixugc@google.com> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
4680092f8c
commit
0bf25cfc9e
@@ -27,7 +27,7 @@ static unsigned long stack_maxrandom_size(void)
|
||||
return STACK_RND_MASK << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline int mmap_is_legacy(struct rlimit *rlim_stack)
|
||||
static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
|
||||
{
|
||||
if (current->personality & ADDR_COMPAT_LAYOUT)
|
||||
return 1;
|
||||
|
||||
@@ -1002,7 +1002,7 @@ static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false
|
||||
static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
|
||||
#endif
|
||||
|
||||
int vma_is_stack_for_current(struct vm_area_struct *vma);
|
||||
int vma_is_stack_for_current(const struct vm_area_struct *vma);
|
||||
|
||||
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
|
||||
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
|
||||
@@ -2617,7 +2617,7 @@ void folio_add_pin(struct folio *folio);
|
||||
|
||||
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
|
||||
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
|
||||
struct task_struct *task, bool bypass_rlim);
|
||||
const struct task_struct *task, bool bypass_rlim);
|
||||
|
||||
struct kvec;
|
||||
struct page *get_dump_page(unsigned long addr, int *locked);
|
||||
@@ -3380,7 +3380,7 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
|
||||
avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
|
||||
|
||||
/* mmap.c */
|
||||
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
|
||||
extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
|
||||
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
|
||||
extern void exit_mmap(struct mm_struct *);
|
||||
bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
|
||||
@@ -545,7 +545,7 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
|
||||
#endif
|
||||
}
|
||||
|
||||
struct address_space *folio_mapping(struct folio *);
|
||||
struct address_space *folio_mapping(const struct folio *folio);
|
||||
|
||||
/**
|
||||
* folio_flush_mapping - Find the file mapping this folio belongs to.
|
||||
|
||||
10
mm/util.c
10
mm/util.c
@@ -315,7 +315,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
|
||||
EXPORT_SYMBOL(memdup_user_nul);
|
||||
|
||||
/* Check if the vma is being used as a stack by this task */
|
||||
int vma_is_stack_for_current(struct vm_area_struct *vma)
|
||||
int vma_is_stack_for_current(const struct vm_area_struct *vma)
|
||||
{
|
||||
struct task_struct * __maybe_unused t = current;
|
||||
|
||||
@@ -410,7 +410,7 @@ unsigned long arch_mmap_rnd(void)
|
||||
return rnd << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static int mmap_is_legacy(struct rlimit *rlim_stack)
|
||||
static int mmap_is_legacy(const struct rlimit *rlim_stack)
|
||||
{
|
||||
if (current->personality & ADDR_COMPAT_LAYOUT)
|
||||
return 1;
|
||||
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
|
||||
* * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
|
||||
*/
|
||||
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
|
||||
struct task_struct *task, bool bypass_rlim)
|
||||
const struct task_struct *task, bool bypass_rlim)
|
||||
{
|
||||
unsigned long locked_vm, limit;
|
||||
int ret = 0;
|
||||
@@ -688,7 +688,7 @@ struct anon_vma *folio_anon_vma(const struct folio *folio)
|
||||
* You can call this for folios which aren't in the swap cache or page
|
||||
* cache and it will return NULL.
|
||||
*/
|
||||
struct address_space *folio_mapping(struct folio *folio)
|
||||
struct address_space *folio_mapping(const struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
|
||||
@@ -926,7 +926,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
|
||||
* Note this is a helper function intended to be used by LSMs which
|
||||
* wish to use this logic.
|
||||
*/
|
||||
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
|
||||
int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin)
|
||||
{
|
||||
long allowed;
|
||||
unsigned long bytes_failed;
|
||||
|
||||
Reference in New Issue
Block a user