mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-02 09:21:19 -04:00
selftests/mm: fix FORCE_READ to read input value correctly
FORCE_READ() converts input value x to its pointer type then reads from
address x. This is wrong. If x is a non-pointer, it would be caught it
easily. But all FORCE_READ() callers are trying to read from a pointer
and FORCE_READ() basically reads a pointer to a pointer instead of the
original typed pointer. Almost no access violation was found, except the
one from split_huge_page_test.
Fix it by implementing a simplified READ_ONCE() instead.
Link: https://lkml.kernel.org/r/20250805175140.241656-1-ziy@nvidia.com
Fixes: 3f6bfd4789 ("selftests/mm: reuse FORCE_READ to replace "asm volatile("" : "+r" (XXX));"")
Signed-off-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jann Horn <jannh@google.com>
Cc: Kairui Song <ryncsn@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
@@ -1554,8 +1554,8 @@ static void run_with_zeropage(non_anon_test_fn fn, const char *desc)
|
||||
}
|
||||
|
||||
/* Read from the page to populate the shared zeropage. */
|
||||
FORCE_READ(mem);
|
||||
FORCE_READ(smem);
|
||||
FORCE_READ(*mem);
|
||||
FORCE_READ(*smem);
|
||||
|
||||
fn(mem, smem, pagesize);
|
||||
munmap:
|
||||
|
||||
@@ -145,7 +145,7 @@ static bool try_access_buf(char *ptr, bool write)
|
||||
if (write)
|
||||
*ptr = 'x';
|
||||
else
|
||||
FORCE_READ(ptr);
|
||||
FORCE_READ(*ptr);
|
||||
}
|
||||
|
||||
signal_jump_set = false;
|
||||
|
||||
@@ -50,8 +50,10 @@ void read_fault_pages(void *addr, unsigned long nr_pages)
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
unsigned long *addr2 =
|
||||
((unsigned long *)(addr + (i * huge_page_size)));
|
||||
/* Prevent the compiler from optimizing out the entire loop: */
|
||||
FORCE_READ(((unsigned long *)(addr + (i * huge_page_size))));
|
||||
FORCE_READ(*addr2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ void *access_mem(void *ptr)
|
||||
* the memory access actually happens and prevents the compiler
|
||||
* from optimizing away this entire loop.
|
||||
*/
|
||||
FORCE_READ((uint64_t *)ptr);
|
||||
FORCE_READ(*(uint64_t *)ptr);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
||||
@@ -1525,7 +1525,7 @@ void zeropfn_tests(void)
|
||||
|
||||
ret = madvise(mem, hpage_size, MADV_HUGEPAGE);
|
||||
if (!ret) {
|
||||
FORCE_READ(mem);
|
||||
FORCE_READ(*mem);
|
||||
|
||||
ret = pagemap_ioctl(mem, hpage_size, &vec, 1, 0,
|
||||
0, PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
|
||||
|
||||
@@ -439,8 +439,11 @@ int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd,
|
||||
}
|
||||
madvise(*addr, fd_size, MADV_HUGEPAGE);
|
||||
|
||||
for (size_t i = 0; i < fd_size; i++)
|
||||
FORCE_READ((*addr + i));
|
||||
for (size_t i = 0; i < fd_size; i++) {
|
||||
char *addr2 = *addr + i;
|
||||
|
||||
FORCE_READ(*addr2);
|
||||
}
|
||||
|
||||
if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) {
|
||||
ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n");
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
* anything with it in order to trigger a read page fault. We therefore must use
|
||||
* volatile to stop the compiler from optimising this away.
|
||||
*/
|
||||
#define FORCE_READ(x) (*(volatile typeof(x) *)x)
|
||||
#define FORCE_READ(x) (*(const volatile typeof(x) *)&(x))
|
||||
|
||||
extern unsigned int __page_size;
|
||||
extern unsigned int __page_shift;
|
||||
|
||||
Reference in New Issue
Block a user