mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-20 01:46:40 -05:00
Merge branch 'for-next/lpa2-prep' into for-next/core
* for-next/lpa2-prep: arm64: mm: get rid of kimage_vaddr global variable arm64: mm: Take potential load offset into account when KASLR is off arm64: kernel: Disable latent_entropy GCC plugin in early C runtime arm64: Add ARM64_HAS_LPA2 CPU capability arm64/mm: Add FEAT_LPA2 specific ID_AA64MMFR0.TGRAN[2] arm64/mm: Update tlb invalidation routines for FEAT_LPA2 arm64/mm: Add lpa2_is_enabled() kvm_lpa2_is_enabled() stubs arm64/mm: Modify range-based tlbi to decrement scale
This commit is contained in:
@@ -820,6 +820,11 @@ static inline bool system_supports_tlb_range(void)
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
|
||||
}
|
||||
|
||||
static inline bool system_supports_lpa2(void)
|
||||
{
|
||||
return cpus_have_final_cap(ARM64_HAS_LPA2);
|
||||
}
|
||||
|
||||
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
|
||||
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
|
||||
|
||||
|
||||
@@ -37,27 +37,12 @@
|
||||
|
||||
|
||||
/*
|
||||
* If KASLR is enabled, then an offset K is added to the kernel address
|
||||
* space. The bottom 21 bits of this offset are zero to guarantee 2MB
|
||||
* alignment for PA and VA.
|
||||
*
|
||||
* For each pagetable level of the swapper, we know that the shift will
|
||||
* be larger than 21 (for the 4KB granule case we use section maps thus
|
||||
* the smallest shift is actually 30) thus there is the possibility that
|
||||
* KASLR can increase the number of pagetable entries by 1, so we make
|
||||
* room for this extra entry.
|
||||
*
|
||||
* Note KASLR cannot increase the number of required entries for a level
|
||||
* by more than one because it increments both the virtual start and end
|
||||
* addresses equally (the extra entry comes from the case where the end
|
||||
* address is just pushed over a boundary and the start address isn't).
|
||||
* A relocatable kernel may execute from an address that differs from the one at
|
||||
* which it was linked. In the worst case, its runtime placement may intersect
|
||||
* with two adjacent PGDIR entries, which means that an additional page table
|
||||
* may be needed at each subordinate level.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
#define EARLY_KASLR (1)
|
||||
#else
|
||||
#define EARLY_KASLR (0)
|
||||
#endif
|
||||
#define EXTRA_PAGE __is_defined(CONFIG_RELOCATABLE)
|
||||
|
||||
#define SPAN_NR_ENTRIES(vstart, vend, shift) \
|
||||
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
|
||||
@@ -83,7 +68,7 @@
|
||||
+ EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
|
||||
+ EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
|
||||
+ EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
|
||||
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
|
||||
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EXTRA_PAGE))
|
||||
|
||||
/* the initial ID map may need two extra pages if it needs to be extended */
|
||||
#if VA_BITS < 48
|
||||
|
||||
@@ -25,6 +25,8 @@
|
||||
#define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U
|
||||
#endif
|
||||
|
||||
#define kvm_lpa2_is_enabled() false
|
||||
|
||||
static inline u64 kvm_get_parange(u64 mmfr0)
|
||||
{
|
||||
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
|
||||
@@ -182,6 +182,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#if VA_BITS > 48
|
||||
extern u64 vabits_actual;
|
||||
@@ -193,15 +194,12 @@ extern s64 memstart_addr;
|
||||
/* PHYS_OFFSET - the physical address of the start of memory. */
|
||||
#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
|
||||
|
||||
/* the virtual base of the kernel image */
|
||||
extern u64 kimage_vaddr;
|
||||
|
||||
/* the offset between the kernel virtual and physical mappings */
|
||||
extern u64 kimage_voffset;
|
||||
|
||||
static inline unsigned long kaslr_offset(void)
|
||||
{
|
||||
return kimage_vaddr - KIMAGE_VADDR;
|
||||
return (u64)&_text - KIMAGE_VADDR;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
|
||||
@@ -71,6 +71,8 @@ extern bool arm64_use_ng_mappings;
|
||||
#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0)
|
||||
#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
|
||||
|
||||
#define lpa2_is_enabled() false
|
||||
|
||||
/*
|
||||
* If we have userspace only BTI we don't want to mark kernel pages
|
||||
* guarded even if the system does support BTI.
|
||||
|
||||
@@ -871,10 +871,12 @@
|
||||
|
||||
/* id_aa64mmfr0 */
|
||||
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
|
||||
#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7
|
||||
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
|
||||
#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
|
||||
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
|
||||
#define ID_AA64MMFR0_EL1_TGRAN16_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
|
||||
|
||||
#define ARM64_MIN_PARANGE_BITS 32
|
||||
@@ -882,6 +884,7 @@
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2 0x3
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
@@ -892,11 +895,13 @@
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_LPA2 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
|
||||
#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
|
||||
|
||||
@@ -22,15 +22,15 @@ static void tlb_flush(struct mmu_gather *tlb);
|
||||
#include <asm-generic/tlb.h>
|
||||
|
||||
/*
|
||||
* get the tlbi levels in arm64. Default value is 0 if more than one
|
||||
* of cleared_* is set or neither is set.
|
||||
* Arm64 doesn't support p4ds now.
|
||||
* get the tlbi levels in arm64. Default value is TLBI_TTL_UNKNOWN if more than
|
||||
* one of cleared_* is set or neither is set - this elides the level hinting to
|
||||
* the hardware.
|
||||
*/
|
||||
static inline int tlb_get_level(struct mmu_gather *tlb)
|
||||
{
|
||||
/* The TTL field is only valid for the leaf entry. */
|
||||
if (tlb->freed_tables)
|
||||
return 0;
|
||||
return TLBI_TTL_UNKNOWN;
|
||||
|
||||
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
|
||||
tlb->cleared_puds ||
|
||||
@@ -47,7 +47,12 @@ static inline int tlb_get_level(struct mmu_gather *tlb)
|
||||
tlb->cleared_p4ds))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
if (tlb->cleared_p4ds && !(tlb->cleared_ptes ||
|
||||
tlb->cleared_pmds ||
|
||||
tlb->cleared_puds))
|
||||
return 0;
|
||||
|
||||
return TLBI_TTL_UNKNOWN;
|
||||
}
|
||||
|
||||
static inline void tlb_flush(struct mmu_gather *tlb)
|
||||
|
||||
@@ -94,19 +94,22 @@ static inline unsigned long get_trans_granule(void)
|
||||
* When ARMv8.4-TTL exists, TLBI operations take an additional hint for
|
||||
* the level at which the invalidation must take place. If the level is
|
||||
* wrong, no invalidation may take place. In the case where the level
|
||||
* cannot be easily determined, a 0 value for the level parameter will
|
||||
* perform a non-hinted invalidation.
|
||||
* cannot be easily determined, the value TLBI_TTL_UNKNOWN will perform
|
||||
* a non-hinted invalidation. Any provided level outside the hint range
|
||||
* will also cause fall-back to non-hinted invalidation.
|
||||
*
|
||||
* For Stage-2 invalidation, use the level values provided to that effect
|
||||
* in asm/stage2_pgtable.h.
|
||||
*/
|
||||
#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
|
||||
|
||||
#define TLBI_TTL_UNKNOWN INT_MAX
|
||||
|
||||
#define __tlbi_level(op, addr, level) do { \
|
||||
u64 arg = addr; \
|
||||
\
|
||||
if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
|
||||
level) { \
|
||||
level >= 0 && level <= 3) { \
|
||||
u64 ttl = level & 3; \
|
||||
ttl |= get_trans_granule() << 2; \
|
||||
arg &= ~TLBI_TTL_MASK; \
|
||||
@@ -122,28 +125,34 @@ static inline unsigned long get_trans_granule(void)
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This macro creates a properly formatted VA operand for the TLB RANGE.
|
||||
* The value bit assignments are:
|
||||
* This macro creates a properly formatted VA operand for the TLB RANGE. The
|
||||
* value bit assignments are:
|
||||
*
|
||||
* +----------+------+-------+-------+-------+----------------------+
|
||||
* | ASID | TG | SCALE | NUM | TTL | BADDR |
|
||||
* +-----------------+-------+-------+-------+----------------------+
|
||||
* |63 48|47 46|45 44|43 39|38 37|36 0|
|
||||
*
|
||||
* The address range is determined by below formula:
|
||||
* [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE)
|
||||
* The address range is determined by below formula: [BADDR, BADDR + (NUM + 1) *
|
||||
* 2^(5*SCALE + 1) * PAGESIZE)
|
||||
*
|
||||
* Note that the first argument, baddr, is pre-shifted; If LPA2 is in use, BADDR
|
||||
* holds addr[52:16]. Else BADDR holds page number. See for example ARM DDI
|
||||
* 0487J.a section C5.5.60 "TLBI VAE1IS, TLBI VAE1ISNXS, TLB Invalidate by VA,
|
||||
* EL1, Inner Shareable".
|
||||
*
|
||||
*/
|
||||
#define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl) \
|
||||
({ \
|
||||
unsigned long __ta = (addr) >> PAGE_SHIFT; \
|
||||
__ta &= GENMASK_ULL(36, 0); \
|
||||
__ta |= (unsigned long)(ttl) << 37; \
|
||||
__ta |= (unsigned long)(num) << 39; \
|
||||
__ta |= (unsigned long)(scale) << 44; \
|
||||
__ta |= get_trans_granule() << 46; \
|
||||
__ta |= (unsigned long)(asid) << 48; \
|
||||
__ta; \
|
||||
#define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \
|
||||
({ \
|
||||
unsigned long __ta = (baddr); \
|
||||
unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
|
||||
__ta &= GENMASK_ULL(36, 0); \
|
||||
__ta |= __ttl << 37; \
|
||||
__ta |= (unsigned long)(num) << 39; \
|
||||
__ta |= (unsigned long)(scale) << 44; \
|
||||
__ta |= get_trans_granule() << 46; \
|
||||
__ta |= (unsigned long)(asid) << 48; \
|
||||
__ta; \
|
||||
})
|
||||
|
||||
/* These macros are used by the TLBI RANGE feature. */
|
||||
@@ -216,12 +225,16 @@ static inline unsigned long get_trans_granule(void)
|
||||
* CPUs, ensuring that any walk-cache entries associated with the
|
||||
* translation are also invalidated.
|
||||
*
|
||||
* __flush_tlb_range(vma, start, end, stride, last_level)
|
||||
* __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
|
||||
* Invalidate the virtual-address range '[start, end)' on all
|
||||
* CPUs for the user address space corresponding to 'vma->mm'.
|
||||
* The invalidation operations are issued at a granularity
|
||||
* determined by 'stride' and only affect any walk-cache entries
|
||||
* if 'last_level' is equal to false.
|
||||
* if 'last_level' is equal to false. tlb_level is the level at
|
||||
* which the invalidation must take place. If the level is wrong,
|
||||
* no invalidation may take place. In the case where the level
|
||||
* cannot be easily determined, the value TLBI_TTL_UNKNOWN will
|
||||
* perform a non-hinted invalidation.
|
||||
*
|
||||
*
|
||||
* Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
|
||||
@@ -345,34 +358,44 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||
* @tlb_level: Translation Table level hint, if known
|
||||
* @tlbi_user: If 'true', call an additional __tlbi_user()
|
||||
* (typically for user ASIDs). 'flase' for IPA instructions
|
||||
* @lpa2: If 'true', the lpa2 scheme is used as set out below
|
||||
*
|
||||
* When the CPU does not support TLB range operations, flush the TLB
|
||||
* entries one by one at the granularity of 'stride'. If the TLB
|
||||
* range ops are supported, then:
|
||||
*
|
||||
* 1. If 'pages' is odd, flush the first page through non-range
|
||||
* operations;
|
||||
* 1. If FEAT_LPA2 is in use, the start address of a range operation must be
|
||||
* 64KB aligned, so flush pages one by one until the alignment is reached
|
||||
* using the non-range operations. This step is skipped if LPA2 is not in
|
||||
* use.
|
||||
*
|
||||
* 2. For remaining pages: the minimum range granularity is decided
|
||||
* by 'scale', so multiple range TLBI operations may be required.
|
||||
* Start from scale = 0, flush the corresponding number of pages
|
||||
* ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
|
||||
* until no pages left.
|
||||
* 2. The minimum range granularity is decided by 'scale', so multiple range
|
||||
* TLBI operations may be required. Start from scale = 3, flush the largest
|
||||
* possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
|
||||
* requested range, then decrement scale and continue until one or zero pages
|
||||
* are left. We must start from highest scale to ensure 64KB start alignment
|
||||
* is maintained in the LPA2 case.
|
||||
*
|
||||
* 3. If there is 1 page remaining, flush it through non-range operations. Range
|
||||
* operations can only span an even number of pages. We save this for last to
|
||||
* ensure 64KB start alignment is maintained for the LPA2 case.
|
||||
*
|
||||
* Note that certain ranges can be represented by either num = 31 and
|
||||
* scale or num = 0 and scale + 1. The loop below favours the latter
|
||||
* since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
|
||||
*/
|
||||
#define __flush_tlb_range_op(op, start, pages, stride, \
|
||||
asid, tlb_level, tlbi_user) \
|
||||
asid, tlb_level, tlbi_user, lpa2) \
|
||||
do { \
|
||||
int num = 0; \
|
||||
int scale = 0; \
|
||||
int scale = 3; \
|
||||
int shift = lpa2 ? 16 : PAGE_SHIFT; \
|
||||
unsigned long addr; \
|
||||
\
|
||||
while (pages > 0) { \
|
||||
if (!system_supports_tlb_range() || \
|
||||
pages % 2 == 1) { \
|
||||
pages == 1 || \
|
||||
(lpa2 && start != ALIGN(start, SZ_64K))) { \
|
||||
addr = __TLBI_VADDR(start, asid); \
|
||||
__tlbi_level(op, addr, tlb_level); \
|
||||
if (tlbi_user) \
|
||||
@@ -384,20 +407,20 @@ do { \
|
||||
\
|
||||
num = __TLBI_RANGE_NUM(pages, scale); \
|
||||
if (num >= 0) { \
|
||||
addr = __TLBI_VADDR_RANGE(start, asid, scale, \
|
||||
num, tlb_level); \
|
||||
addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
|
||||
scale, num, tlb_level); \
|
||||
__tlbi(r##op, addr); \
|
||||
if (tlbi_user) \
|
||||
__tlbi_user(r##op, addr); \
|
||||
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
|
||||
pages -= __TLBI_RANGE_PAGES(num, scale); \
|
||||
} \
|
||||
scale++; \
|
||||
scale--; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
|
||||
__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
|
||||
__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
|
||||
|
||||
static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end,
|
||||
@@ -427,9 +450,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
||||
asid = ASID(vma->vm_mm);
|
||||
|
||||
if (last_level)
|
||||
__flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
|
||||
__flush_tlb_range_op(vale1is, start, pages, stride, asid,
|
||||
tlb_level, true, lpa2_is_enabled());
|
||||
else
|
||||
__flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
|
||||
__flush_tlb_range_op(vae1is, start, pages, stride, asid,
|
||||
tlb_level, true, lpa2_is_enabled());
|
||||
|
||||
dsb(ish);
|
||||
mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
|
||||
@@ -441,9 +466,10 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
/*
|
||||
* We cannot use leaf-only invalidation here, since we may be invalidating
|
||||
* table entries as part of collapsing hugepages or moving page tables.
|
||||
* Set the tlb_level to 0 because we can not get enough information here.
|
||||
* Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
|
||||
* information here.
|
||||
*/
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
|
||||
@@ -1739,6 +1739,39 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
return !meltdown_safe;
|
||||
}
|
||||
|
||||
#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
|
||||
static bool has_lpa2_at_stage1(u64 mmfr0)
|
||||
{
|
||||
unsigned int tgran;
|
||||
|
||||
tgran = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_EL1_TGRAN_SHIFT);
|
||||
return tgran == ID_AA64MMFR0_EL1_TGRAN_LPA2;
|
||||
}
|
||||
|
||||
static bool has_lpa2_at_stage2(u64 mmfr0)
|
||||
{
|
||||
unsigned int tgran;
|
||||
|
||||
tgran = cpuid_feature_extract_unsigned_field(mmfr0,
|
||||
ID_AA64MMFR0_EL1_TGRAN_2_SHIFT);
|
||||
return tgran == ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2;
|
||||
}
|
||||
|
||||
static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
u64 mmfr0;
|
||||
|
||||
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
return has_lpa2_at_stage1(mmfr0) && has_lpa2_at_stage2(mmfr0);
|
||||
}
|
||||
#else
|
||||
static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
|
||||
|
||||
@@ -2696,6 +2729,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.matches = has_cpuid_feature,
|
||||
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
|
||||
},
|
||||
{
|
||||
.desc = "52-bit Virtual Addressing for KVM (LPA2)",
|
||||
.capability = ARM64_HAS_LPA2,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_lpa2,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
||||
@@ -482,7 +482,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
||||
|
||||
str_l x21, __fdt_pointer, x5 // Save FDT pointer
|
||||
|
||||
ldr_l x4, kimage_vaddr // Save the offset between
|
||||
adrp x4, _text // Save the offset between
|
||||
sub x4, x4, x0 // the kernel virtual and
|
||||
str_l x4, kimage_voffset, x5 // physical mappings
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
KBUILD_CFLAGS := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) -fpie \
|
||||
-Os -DDISABLE_BRANCH_PROFILING $(DISABLE_STACKLEAK_PLUGIN) \
|
||||
$(DISABLE_LATENT_ENTROPY_PLUGIN) \
|
||||
$(call cc-option,-mbranch-protection=none) \
|
||||
-I$(srctree)/scripts/dtc/libfdt -fno-stack-protector \
|
||||
-include $(srctree)/include/linux/hidden.h \
|
||||
|
||||
@@ -52,9 +52,6 @@ u64 vabits_actual __ro_after_init = VA_BITS_MIN;
|
||||
EXPORT_SYMBOL(vabits_actual);
|
||||
#endif
|
||||
|
||||
u64 kimage_vaddr __ro_after_init = (u64)&_text;
|
||||
EXPORT_SYMBOL(kimage_vaddr);
|
||||
|
||||
u64 kimage_voffset __ro_after_init;
|
||||
EXPORT_SYMBOL(kimage_voffset);
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ HAS_GIC_PRIO_MASKING
|
||||
HAS_GIC_PRIO_RELAXED_SYNC
|
||||
HAS_HCX
|
||||
HAS_LDAPR
|
||||
HAS_LPA2
|
||||
HAS_LSE_ATOMICS
|
||||
HAS_MOPS
|
||||
HAS_NESTED_VIRT
|
||||
|
||||
Reference in New Issue
Block a user