mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 14:41:22 -05:00
This is a type-correctness cleanup to MMU/boot code that replaces several instances of void * and u64 with phys_addr_t (to represent addresses) and resource_size_t (to represent sizes) to emphasize that the code in question concerns physical memory specifically. The rationale for this change is to improve clarity and readability in a few modules that handle both types (physical and virtual) of address and differentiation is essential. I have left u64 in cases where the address may be either physical or virtual, where the address is exclusively virtual but used in heavy pointer arithmetic, and in cases I may have overlooked. I do not necessarily consider u64 the ideal type in those situations, but it avoids breaking existing semantics in this cleanup. This patch provably has no effect at runtime: I have verified that .text of vmlinux is identical after this change. Signed-off-by: Sam Edwards <CFSworks@gmail.com> Signed-off-by: Will Deacon <will@kernel.org>
110 lines
3.2 KiB
C
110 lines
3.2 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
// Copyright 2023 Google LLC
|
|
// Author: Ard Biesheuvel <ardb@google.com>
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/sizes.h>
|
|
|
|
#include <asm/memory.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
#include "pi.h"
|
|
|
|
/**
|
|
* map_range - Map a contiguous range of physical pages into virtual memory
|
|
*
|
|
* @pte: Address of physical pointer to array of pages to
|
|
* allocate page tables from
|
|
* @start: Virtual address of the start of the range
|
|
* @end: Virtual address of the end of the range (exclusive)
|
|
* @pa: Physical address of the start of the range
|
|
* @prot: Access permissions of the range
|
|
* @level: Translation level for the mapping
|
|
* @tbl: The level @level page table to create the mappings in
|
|
* @may_use_cont: Whether the use of the contiguous attribute is allowed
|
|
* @va_offset: Offset between a physical page and its current mapping
|
|
* in the VA space
|
|
*/
|
|
void __init map_range(phys_addr_t *pte, u64 start, u64 end, phys_addr_t pa,
|
|
pgprot_t prot, int level, pte_t *tbl, bool may_use_cont,
|
|
u64 va_offset)
|
|
{
|
|
u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
|
|
ptdesc_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
|
|
int lshift = (3 - level) * PTDESC_TABLE_SHIFT;
|
|
u64 lmask = (PAGE_SIZE << lshift) - 1;
|
|
|
|
start &= PAGE_MASK;
|
|
pa &= PAGE_MASK;
|
|
|
|
/* Advance tbl to the entry that covers start */
|
|
tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE;
|
|
|
|
/*
|
|
* Set the right block/page bits for this level unless we are
|
|
* clearing the mapping
|
|
*/
|
|
if (protval)
|
|
protval |= (level == 2) ? PMD_TYPE_SECT : PTE_TYPE_PAGE;
|
|
|
|
while (start < end) {
|
|
u64 next = min((start | lmask) + 1, PAGE_ALIGN(end));
|
|
|
|
if (level < 2 || (level == 2 && (start | next | pa) & lmask)) {
|
|
/*
|
|
* This chunk needs a finer grained mapping. Create a
|
|
* table mapping if necessary and recurse.
|
|
*/
|
|
if (pte_none(*tbl)) {
|
|
*tbl = __pte(__phys_to_pte_val(*pte) |
|
|
PMD_TYPE_TABLE | PMD_TABLE_UXN);
|
|
*pte += PTRS_PER_PTE * sizeof(pte_t);
|
|
}
|
|
map_range(pte, start, next, pa, prot, level + 1,
|
|
(pte_t *)(__pte_to_phys(*tbl) + va_offset),
|
|
may_use_cont, va_offset);
|
|
} else {
|
|
/*
|
|
* Start a contiguous range if start and pa are
|
|
* suitably aligned
|
|
*/
|
|
if (((start | pa) & cmask) == 0 && may_use_cont)
|
|
protval |= PTE_CONT;
|
|
|
|
/*
|
|
* Clear the contiguous attribute if the remaining
|
|
* range does not cover a contiguous block
|
|
*/
|
|
if ((end & ~cmask) <= start)
|
|
protval &= ~PTE_CONT;
|
|
|
|
/* Put down a block or page mapping */
|
|
*tbl = __pte(__phys_to_pte_val(pa) | protval);
|
|
}
|
|
pa += next - start;
|
|
start = next;
|
|
tbl++;
|
|
}
|
|
}
|
|
|
|
asmlinkage phys_addr_t __init create_init_idmap(pgd_t *pg_dir, ptdesc_t clrmask)
|
|
{
|
|
phys_addr_t ptep = (phys_addr_t)pg_dir + PAGE_SIZE; /* MMU is off */
|
|
pgprot_t text_prot = PAGE_KERNEL_ROX;
|
|
pgprot_t data_prot = PAGE_KERNEL;
|
|
|
|
pgprot_val(text_prot) &= ~clrmask;
|
|
pgprot_val(data_prot) &= ~clrmask;
|
|
|
|
/* MMU is off; pointer casts to phys_addr_t are safe */
|
|
map_range(&ptep, (u64)_stext, (u64)__initdata_begin,
|
|
(phys_addr_t)_stext, text_prot, IDMAP_ROOT_LEVEL,
|
|
(pte_t *)pg_dir, false, 0);
|
|
map_range(&ptep, (u64)__initdata_begin, (u64)_end,
|
|
(phys_addr_t)__initdata_begin, data_prot, IDMAP_ROOT_LEVEL,
|
|
(pte_t *)pg_dir, false, 0);
|
|
|
|
return ptep;
|
|
}
|