mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 11:06:41 -05:00
Merge branches 'for-next/livepatch', 'for-next/user-contig-bbml2', 'for-next/misc', 'for-next/acpi', 'for-next/debug-entry', 'for-next/feat_mte_tagged_far', 'for-next/kselftest', 'for-next/mdscr-cleanup' and 'for-next/vmap-stack', remote-tracking branch 'arm64/for-next/perf' into for-next/core
* arm64/for-next/perf: (23 commits) drivers/perf: hisi: Support PMUs with no interrupt drivers/perf: hisi: Relax the event number check of v2 PMUs drivers/perf: hisi: Add support for HiSilicon SLLC v3 PMU driver drivers/perf: hisi: Use ACPI driver_data to retrieve SLLC PMU information drivers/perf: hisi: Add support for HiSilicon DDRC v3 PMU driver drivers/perf: hisi: Simplify the probe process for each DDRC version perf/arm-ni: Support sharing IRQs within an NI instance perf/arm-ni: Consolidate CPU affinity handling perf/cxlpmu: Fix typos in cxl_pmu.c comments and documentation perf/cxlpmu: Remove unintended newline from IRQ name format string perf/cxlpmu: Fix devm_kcalloc() argument order in cxl_pmu_probe() perf: arm_spe: Relax period restriction perf: arm_pmuv3: Add support for the Branch Record Buffer Extension (BRBE) KVM: arm64: nvhe: Disable branch generation in nVHE guests arm64: Handle BRBE booting requirements arm64/sysreg: Add BRBE registers and fields perf/arm: Add missing .suppress_bind_attrs perf/arm-cmn: Reduce stack usage during discovery perf: imx9_perf: make the read-only array mask static const perf/arm-cmn: Broaden module description for wider interconnect support ... * for-next/livepatch: : Support for HAVE_LIVEPATCH on arm64 arm64: Kconfig: Keep selects somewhat alphabetically ordered arm64: Implement HAVE_LIVEPATCH arm64: stacktrace: Implement arch_stack_walk_reliable() arm64: stacktrace: Check kretprobe_find_ret_addr() return value arm64/module: Use text-poke API for late relocations. * for-next/user-contig-bbml2: : Optimise the TLBI when folding/unfolding contigous PTEs on hardware with BBML2 and no TLB conflict aborts arm64/mm: Elide tlbi in contpte_convert() under BBML2 iommu/arm: Add BBM Level 2 smmu feature arm64: Add BBM Level 2 cpu feature arm64: cpufeature: Introduce MATCH_ALL_EARLY_CPUS capability type * for-next/misc: : Miscellaneous arm64 patches arm64/gcs: task_gcs_el0_enable() should use passed task arm64: signal: Remove ISB when resetting POR_EL0 arm64/mm: Drop redundant addr increment in set_huge_pte_at() arm64: Mark kernel as tainted on SAE and SError panic arm64/gcs: Don't call gcs_free() when releasing task_struct arm64: fix unnecessary rebuilding when CONFIG_DEBUG_EFI=y arm64/mm: Optimize loop to reduce redundant operations of contpte_ptep_get arm64: pi: use 'targets' instead of extra-y in Makefile * for-next/acpi: : Various ACPI arm64 changes ACPI: Suppress misleading SPCR console message when SPCR table is absent ACPI: Return -ENODEV from acpi_parse_spcr() when SPCR support is disabled * for-next/debug-entry: : Simplify the debug exception entry path arm64: debug: remove debug exception registration infrastructure arm64: debug: split bkpt32 exception entry arm64: debug: split brk64 exception entry arm64: debug: split hardware watchpoint exception entry arm64: debug: split single stepping exception entry arm64: debug: refactor reinstall_suspended_bps() arm64: debug: split hardware breakpoint exception entry arm64: entry: Add entry and exit functions for debug exceptions arm64: debug: remove break/step handler registration infrastructure arm64: debug: call step handlers statically arm64: debug: call software breakpoint handlers statically arm64: refactor aarch32_break_handler() arm64: debug: clean up single_step_handler logic * for-next/feat_mte_tagged_far: : Support for reporting the non-address bits during a synchronous MTE tag check fault kselftest/arm64/mte: Add mtefar tests on check_mmap_options kselftest/arm64/mte: Refactor check_mmap_option test kselftest/arm64/mte: Add verification for address tag in signal handler kselftest/arm64/mte: Add address tag related macro and function kselftest/arm64/mte: Check MTE_FAR feature is supported kselftest/arm64/mte: Register mte signal handler with SA_EXPOSE_TAGBITS kselftest/arm64: Add MTE_FAR hwcap test KVM: arm64: Expose FEAT_MTE_TAGGED_FAR feature to guest arm64: Report address tag when FEAT_MTE_TAGGED_FAR is supported arm64/cpufeature: Add FEAT_MTE_TAGGED_FAR feature * for-next/kselftest: : Kselftest updates for arm64 kselftest/arm64: Handle attempts to disable SM on SME only systems kselftest/arm64: Fix SVE write data generation for SME only systems kselftest/arm64: Test SME on SME only systems in fp-ptrace kselftest/arm64: Test FPSIMD format data writes via NT_ARM_SVE in fp-ptrace kselftest/arm64: Allow sve-ptrace to run on SME only systems kselftest/arm4: Provide local defines for AT_HWCAP3 kselftest/arm64: Specify SVE data when testing VL set in sve-ptrace kselftest/arm64: Fix test for streaming FPSIMD write in sve-ptrace kselftest/arm64: Fix check for setting new VLs in sve-ptrace kselftest/arm64: Convert tpidr2 test to use kselftest.h * for-next/mdscr-cleanup: : Drop redundant DBG_MDSCR_* macros KVM: selftests: Change MDSCR_EL1 register holding variables as uint64_t arm64/debug: Drop redundant DBG_MDSCR_* macros * for-next/vmap-stack: : Force VMAP_STACK on arm64 arm64: remove CONFIG_VMAP_STACK checks from entry code arm64: remove CONFIG_VMAP_STACK checks from SDEI stack handling arm64: remove CONFIG_VMAP_STACK checks from stacktrace overflow logic arm64: remove CONFIG_VMAP_STACK conditionals from traps overflow stack arm64: remove CONFIG_VMAP_STACK conditionals from irq stack setup arm64: Remove CONFIG_VMAP_STACK conditionals from THREAD_SHIFT and THREAD_ALIGN arm64: efi: Remove CONFIG_VMAP_STACK check arm64: Mandate VMAP_STACK arm64: efi: Fix KASAN false positive for EFI runtime stack arm64/ptrace: Fix stack-out-of-bounds read in regs_get_kernel_stack_nth() arm64/gcs: Don't call gcs_free() during flush_gcs() arm64: Restrict pagetable teardown to avoid false warning docs: arm64: Fix ICC_SRE_EL2 register typo in booting.rst
This commit is contained in:
@@ -234,7 +234,7 @@ Before jumping into the kernel, the following conditions must be met:
|
||||
|
||||
- If the kernel is entered at EL1:
|
||||
|
||||
- ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1
|
||||
- ICC_SRE_EL2.Enable (bit 3) must be initialised to 0b1
|
||||
- ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1.
|
||||
|
||||
- The DT or ACPI tables must describe a GICv3 interrupt controller.
|
||||
|
||||
@@ -435,6 +435,9 @@ HWCAP2_SME_SF8DP4
|
||||
HWCAP2_POE
|
||||
Functionality implied by ID_AA64MMFR3_EL1.S1POE == 0b0001.
|
||||
|
||||
HWCAP3_MTE_FAR
|
||||
Functionality implied by ID_AA64PFR2_EL1.MTEFAR == 0b0001.
|
||||
|
||||
4. Unused AT_HWCAP bits
|
||||
-----------------------
|
||||
|
||||
|
||||
@@ -60,11 +60,12 @@ that signal handlers in applications making use of tags cannot rely
|
||||
on the tag information for user virtual addresses being maintained
|
||||
in these fields unless the flag was set.
|
||||
|
||||
Due to architecture limitations, bits 63:60 of the fault address
|
||||
are not preserved in response to synchronous tag check faults
|
||||
(SEGV_MTESERR) even if SA_EXPOSE_TAGBITS was set. Applications should
|
||||
treat the values of these bits as undefined in order to accommodate
|
||||
future architecture revisions which may preserve the bits.
|
||||
If FEAT_MTE_TAGGED_FAR (Armv8.9) is supported, bits 63:60 of the fault address
|
||||
are preserved in response to synchronous tag check faults (SEGV_MTESERR)
|
||||
otherwise not preserved even if SA_EXPOSE_TAGBITS was set.
|
||||
Applications should interpret the values of these bits based on
|
||||
the support for the HWCAP3_MTE_FAR. If the support is not present,
|
||||
the values of these bits should be considered as undefined otherwise valid.
|
||||
|
||||
For signals raised in response to watchpoint debug exceptions, the
|
||||
tag information will be preserved regardless of the SA_EXPOSE_TAGBITS
|
||||
|
||||
@@ -234,6 +234,7 @@ config ARM64
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_LIVEPATCH
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_PERF_EVENTS
|
||||
@@ -242,6 +243,7 @@ config ARM64
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_PREEMPT_DYNAMIC_KEY
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE
|
||||
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
|
||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||
select MMU_GATHER_RCU_TABLE_FREE
|
||||
@@ -279,6 +281,7 @@ config ARM64
|
||||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||
select USER_STACKTRACE_SUPPORT
|
||||
select VDSO_GETRANDOM
|
||||
select VMAP_STACK
|
||||
help
|
||||
ARM 64-bit (AArch64) Linux support.
|
||||
|
||||
@@ -2499,3 +2502,4 @@ source "drivers/acpi/Kconfig"
|
||||
|
||||
source "arch/arm64/kvm/Kconfig"
|
||||
|
||||
source "kernel/livepatch/Kconfig"
|
||||
|
||||
@@ -53,7 +53,7 @@
|
||||
.macro disable_step_tsk, flgs, tmp
|
||||
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
||||
mrs \tmp, mdscr_el1
|
||||
bic \tmp, \tmp, #DBG_MDSCR_SS
|
||||
bic \tmp, \tmp, #MDSCR_EL1_SS
|
||||
msr mdscr_el1, \tmp
|
||||
isb // Take effect before a subsequent clear of DAIF.D
|
||||
9990:
|
||||
@@ -63,7 +63,7 @@
|
||||
.macro enable_step_tsk, flgs, tmp
|
||||
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
||||
mrs \tmp, mdscr_el1
|
||||
orr \tmp, \tmp, #DBG_MDSCR_SS
|
||||
orr \tmp, \tmp, #MDSCR_EL1_SS
|
||||
msr mdscr_el1, \tmp
|
||||
9990:
|
||||
.endm
|
||||
|
||||
@@ -275,6 +275,14 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
||||
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
|
||||
/* Panic when a conflict is detected */
|
||||
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
|
||||
/*
|
||||
* When paired with SCOPE_LOCAL_CPU, all early CPUs must satisfy the
|
||||
* condition. This is different from SCOPE_SYSTEM where the check is performed
|
||||
* only once at the end of the SMP boot on the sanitised ID registers.
|
||||
* SCOPE_SYSTEM is not suitable for cases where the capability depends on
|
||||
* properties local to a CPU like MIDR_EL1.
|
||||
*/
|
||||
#define ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS ((u16)BIT(7))
|
||||
|
||||
/*
|
||||
* CPU errata workarounds that need to be enabled at boot time if one or
|
||||
@@ -304,6 +312,16 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
||||
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
|
||||
ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
|
||||
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
|
||||
/*
|
||||
* CPU feature detected at boot time and present on all early CPUs. Late CPUs
|
||||
* are permitted to have the feature even if it hasn't been enabled, although
|
||||
* the feature will not be used by Linux in this case. If all early CPUs have
|
||||
* the feature, then every late CPU must have it.
|
||||
*/
|
||||
#define ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE \
|
||||
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
|
||||
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU | \
|
||||
ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS)
|
||||
|
||||
/*
|
||||
* CPU feature detected at boot time, on one or more CPUs. A late CPU
|
||||
@@ -391,6 +409,11 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
|
||||
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
|
||||
}
|
||||
|
||||
static inline bool cpucap_match_all_early_cpus(const struct arm64_cpu_capabilities *cap)
|
||||
{
|
||||
return cap->type & ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic helper for handling capabilities with multiple (match,enable) pairs
|
||||
* of call backs, sharing the same capability bit.
|
||||
@@ -848,6 +871,11 @@ static inline bool system_supports_pmuv3(void)
|
||||
return cpus_have_final_cap(ARM64_HAS_PMUV3);
|
||||
}
|
||||
|
||||
static inline bool system_supports_bbml2_noabort(void)
|
||||
{
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
|
||||
}
|
||||
|
||||
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
|
||||
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
|
||||
|
||||
|
||||
@@ -13,14 +13,8 @@
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* Low-level stepping controls. */
|
||||
#define DBG_MDSCR_SS (1 << 0)
|
||||
#define DBG_SPSR_SS (1 << 21)
|
||||
|
||||
/* MDSCR_EL1 enabling bits */
|
||||
#define DBG_MDSCR_KDE (1 << 13)
|
||||
#define DBG_MDSCR_MDE (1 << 15)
|
||||
#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
|
||||
|
||||
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
|
||||
|
||||
/* AArch64 */
|
||||
@@ -62,30 +56,6 @@ struct task_struct;
|
||||
#define DBG_HOOK_HANDLED 0
|
||||
#define DBG_HOOK_ERROR 1
|
||||
|
||||
struct step_hook {
|
||||
struct list_head node;
|
||||
int (*fn)(struct pt_regs *regs, unsigned long esr);
|
||||
};
|
||||
|
||||
void register_user_step_hook(struct step_hook *hook);
|
||||
void unregister_user_step_hook(struct step_hook *hook);
|
||||
|
||||
void register_kernel_step_hook(struct step_hook *hook);
|
||||
void unregister_kernel_step_hook(struct step_hook *hook);
|
||||
|
||||
struct break_hook {
|
||||
struct list_head node;
|
||||
int (*fn)(struct pt_regs *regs, unsigned long esr);
|
||||
u16 imm;
|
||||
u16 mask; /* These bits are ignored when comparing with imm */
|
||||
};
|
||||
|
||||
void register_user_break_hook(struct break_hook *hook);
|
||||
void unregister_user_break_hook(struct break_hook *hook);
|
||||
|
||||
void register_kernel_break_hook(struct break_hook *hook);
|
||||
void unregister_kernel_break_hook(struct break_hook *hook);
|
||||
|
||||
u8 debug_monitors_arch(void);
|
||||
|
||||
enum dbg_active_el {
|
||||
@@ -108,17 +78,15 @@ void kernel_rewind_single_step(struct pt_regs *regs);
|
||||
void kernel_fastforward_single_step(struct pt_regs *regs);
|
||||
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
int reinstall_suspended_bps(struct pt_regs *regs);
|
||||
bool try_step_suspended_breakpoints(struct pt_regs *regs);
|
||||
#else
|
||||
static inline int reinstall_suspended_bps(struct pt_regs *regs)
|
||||
static inline bool try_step_suspended_breakpoints(struct pt_regs *regs)
|
||||
{
|
||||
return -ENODEV;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
int aarch32_break_handler(struct pt_regs *regs);
|
||||
|
||||
void debug_traps_init(void);
|
||||
bool try_handle_aarch32_break(struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASSEMBLY */
|
||||
#endif /* __ASM_DEBUG_MONITORS_H */
|
||||
|
||||
@@ -59,8 +59,20 @@ void do_el0_bti(struct pt_regs *regs);
|
||||
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
|
||||
void do_el0_gcs(struct pt_regs *regs, unsigned long esr);
|
||||
void do_el1_gcs(struct pt_regs *regs, unsigned long esr);
|
||||
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
void do_breakpoint(unsigned long esr, struct pt_regs *regs);
|
||||
void do_watchpoint(unsigned long addr, unsigned long esr,
|
||||
struct pt_regs *regs);
|
||||
#else
|
||||
static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {}
|
||||
static inline void do_watchpoint(unsigned long addr, unsigned long esr,
|
||||
struct pt_regs *regs) {}
|
||||
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
||||
void do_el0_softstep(unsigned long esr, struct pt_regs *regs);
|
||||
void do_el1_softstep(unsigned long esr, struct pt_regs *regs);
|
||||
void do_el0_brk64(unsigned long esr, struct pt_regs *regs);
|
||||
void do_el1_brk64(unsigned long esr, struct pt_regs *regs);
|
||||
void do_bkpt32(unsigned long esr, struct pt_regs *regs);
|
||||
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
|
||||
void do_sve_acc(unsigned long esr, struct pt_regs *regs);
|
||||
void do_sme_acc(unsigned long esr, struct pt_regs *regs);
|
||||
|
||||
@@ -58,7 +58,7 @@ static inline u64 gcsss2(void)
|
||||
|
||||
static inline bool task_gcs_el0_enabled(struct task_struct *task)
|
||||
{
|
||||
return current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
|
||||
return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
|
||||
}
|
||||
|
||||
void gcs_set_el0_mode(struct task_struct *task);
|
||||
|
||||
@@ -176,6 +176,7 @@
|
||||
#define KERNEL_HWCAP_POE __khwcap2_feature(POE)
|
||||
|
||||
#define __khwcap3_feature(x) (const_ilog2(HWCAP3_ ## x) + 128)
|
||||
#define KERNEL_HWCAP_MTE_FAR __khwcap3_feature(MTE_FAR)
|
||||
|
||||
/*
|
||||
* This yields a mask that user programs can use to figure out what
|
||||
|
||||
@@ -24,6 +24,18 @@ static inline void arch_kgdb_breakpoint(void)
|
||||
extern void kgdb_handle_bus_error(void);
|
||||
extern int kgdb_fault_expected;
|
||||
|
||||
int kgdb_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
int kgdb_compiled_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
#ifdef CONFIG_KGDB
|
||||
int kgdb_single_step_handler(struct pt_regs *regs, unsigned long esr);
|
||||
#else
|
||||
static inline int kgdb_single_step_handler(struct pt_regs *regs,
|
||||
unsigned long esr)
|
||||
{
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
||||
@@ -41,4 +41,12 @@ void __kretprobe_trampoline(void);
|
||||
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
|
||||
|
||||
#endif /* CONFIG_KPROBES */
|
||||
|
||||
int __kprobes kprobe_brk_handler(struct pt_regs *regs,
|
||||
unsigned long esr);
|
||||
int __kprobes kprobe_ss_brk_handler(struct pt_regs *regs,
|
||||
unsigned long esr);
|
||||
int __kprobes kretprobe_brk_handler(struct pt_regs *regs,
|
||||
unsigned long esr);
|
||||
|
||||
#endif /* _ARM_KPROBES_H */
|
||||
|
||||
@@ -118,7 +118,7 @@
|
||||
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
|
||||
* stacks are a multiple of page size.
|
||||
*/
|
||||
#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
|
||||
#if (MIN_THREAD_SHIFT < PAGE_SHIFT)
|
||||
#define THREAD_SHIFT PAGE_SHIFT
|
||||
#else
|
||||
#define THREAD_SHIFT MIN_THREAD_SHIFT
|
||||
@@ -135,11 +135,7 @@
|
||||
* checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
|
||||
* assembly.
|
||||
*/
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
#define THREAD_ALIGN (2 * THREAD_SIZE)
|
||||
#else
|
||||
#define THREAD_ALIGN THREAD_SIZE
|
||||
#endif
|
||||
|
||||
#define IRQ_STACK_SIZE THREAD_SIZE
|
||||
|
||||
|
||||
@@ -59,7 +59,6 @@ static inline bool on_task_stack(const struct task_struct *tsk,
|
||||
|
||||
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
|
||||
|
||||
static inline struct stack_info stackinfo_get_overflow(void)
|
||||
@@ -72,11 +71,8 @@ static inline struct stack_info stackinfo_get_overflow(void)
|
||||
.high = high,
|
||||
};
|
||||
}
|
||||
#else
|
||||
#define stackinfo_get_overflow() stackinfo_get_unknown()
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
|
||||
#if defined(CONFIG_ARM_SDE_INTERFACE)
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
|
||||
|
||||
|
||||
@@ -25,10 +25,6 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
|
||||
int signo, int sicode, unsigned long far,
|
||||
unsigned long err);
|
||||
|
||||
void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long,
|
||||
struct pt_regs *),
|
||||
int sig, int code, const char *name);
|
||||
|
||||
struct mm_struct;
|
||||
extern void __show_regs(struct pt_regs *);
|
||||
|
||||
|
||||
@@ -70,6 +70,7 @@ void arch_setup_new_exec(void);
|
||||
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
|
||||
#define TIF_SECCOMP 11 /* syscall secure computing */
|
||||
#define TIF_SYSCALL_EMU 12 /* syscall emulation active */
|
||||
#define TIF_PATCH_PENDING 13 /* pending live patching update */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
@@ -96,6 +97,7 @@ void arch_setup_new_exec(void);
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
@@ -107,7 +109,8 @@ void arch_setup_new_exec(void);
|
||||
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
|
||||
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
||||
_TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
|
||||
_TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING)
|
||||
_TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING | \
|
||||
_TIF_PATCH_PENDING)
|
||||
|
||||
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
||||
|
||||
@@ -29,6 +29,12 @@ void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey);
|
||||
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
|
||||
void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str);
|
||||
|
||||
int bug_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
int cfi_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
int reserved_fault_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
int kasan_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
int ubsan_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
|
||||
int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
|
||||
@@ -28,4 +28,15 @@ struct arch_uprobe {
|
||||
bool simulate;
|
||||
};
|
||||
|
||||
int uprobe_brk_handler(struct pt_regs *regs, unsigned long esr);
|
||||
#ifdef CONFIG_UPROBES
|
||||
int uprobe_single_step_handler(struct pt_regs *regs, unsigned long esr);
|
||||
#else
|
||||
static inline int uprobe_single_step_handler(struct pt_regs *regs,
|
||||
unsigned long esr)
|
||||
{
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -143,5 +143,6 @@
|
||||
/*
|
||||
* HWCAP3 flags - for AT_HWCAP3
|
||||
*/
|
||||
#define HWCAP3_MTE_FAR (1UL << 0)
|
||||
|
||||
#endif /* _UAPI__ASM_HWCAP_H */
|
||||
|
||||
@@ -81,7 +81,7 @@ obj-y += head.o
|
||||
always-$(KBUILD_BUILTIN) += vmlinux.lds
|
||||
|
||||
ifeq ($(CONFIG_DEBUG_EFI),y)
|
||||
AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
|
||||
AFLAGS_head.o += -DVMLINUX_PATH="\"$(abspath vmlinux)\""
|
||||
endif
|
||||
|
||||
# for cleaning
|
||||
|
||||
@@ -197,6 +197,8 @@ static int __init acpi_fadt_sanity_check(void)
|
||||
*/
|
||||
void __init acpi_boot_table_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Enable ACPI instead of device tree unless
|
||||
* - ACPI has been disabled explicitly (acpi=off), or
|
||||
@@ -250,10 +252,12 @@ void __init acpi_boot_table_init(void)
|
||||
* behaviour, use acpi=nospcr to disable console in ACPI SPCR
|
||||
* table as default serial console.
|
||||
*/
|
||||
acpi_parse_spcr(earlycon_acpi_spcr_enable,
|
||||
ret = acpi_parse_spcr(earlycon_acpi_spcr_enable,
|
||||
!param_acpi_nospcr);
|
||||
pr_info("Use ACPI SPCR as default console: %s\n",
|
||||
param_acpi_nospcr ? "No" : "Yes");
|
||||
if (!ret || param_acpi_nospcr || !IS_ENABLED(CONFIG_ACPI_SPCR_TABLE))
|
||||
pr_info("Use ACPI SPCR as default console: No\n");
|
||||
else
|
||||
pr_info("Use ACPI SPCR as default console: Yes\n");
|
||||
|
||||
if (IS_ENABLED(CONFIG_ACPI_BGRT))
|
||||
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
|
||||
|
||||
@@ -320,6 +320,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTEFAR_SHIFT, 4, ID_AA64PFR2_EL1_MTEFAR_NI),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
@@ -2213,6 +2214,38 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
|
||||
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
|
||||
}
|
||||
|
||||
static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
|
||||
{
|
||||
/*
|
||||
* We want to allow usage of BBML2 in as wide a range of kernel contexts
|
||||
* as possible. This list is therefore an allow-list of known-good
|
||||
* implementations that both support BBML2 and additionally, fulfill the
|
||||
* extra constraint of never generating TLB conflict aborts when using
|
||||
* the relaxed BBML2 semantics (such aborts make use of BBML2 in certain
|
||||
* kernel contexts difficult to prove safe against recursive aborts).
|
||||
*
|
||||
* Note that implementations can only be considered "known-good" if their
|
||||
* implementors attest to the fact that the implementation never raises
|
||||
* TLB conflict aborts for BBML2 mapping granularity changes.
|
||||
*/
|
||||
static const struct midr_range supports_bbml2_noabort_list[] = {
|
||||
MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
|
||||
MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
|
||||
{}
|
||||
};
|
||||
|
||||
/* Does our cpu guarantee to never raise TLB conflict aborts? */
|
||||
if (!is_midr_in_range_list(supports_bbml2_noabort_list))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We currently ignore the ID_AA64MMFR2_EL1 register, and only care
|
||||
* about whether the MIDR check passes.
|
||||
*/
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
|
||||
{
|
||||
@@ -2874,6 +2907,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.matches = has_cpuid_feature,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE3)
|
||||
},
|
||||
{
|
||||
.desc = "FAR on MTE Tag Check Fault",
|
||||
.capability = ARM64_MTE_FAR,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, MTEFAR, IMP)
|
||||
},
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
{
|
||||
.desc = "RCpc load-acquire (LDAPR)",
|
||||
@@ -2980,6 +3020,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.matches = has_cpuid_feature,
|
||||
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
|
||||
},
|
||||
{
|
||||
.desc = "BBM Level 2 without TLB conflict abort",
|
||||
.capability = ARM64_HAS_BBML2_NOABORT,
|
||||
.type = ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE,
|
||||
.matches = has_bbml2_noabort,
|
||||
},
|
||||
{
|
||||
.desc = "52-bit Virtual Addressing for KVM (LPA2)",
|
||||
.capability = ARM64_HAS_LPA2,
|
||||
@@ -3211,6 +3257,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE2, CAP_HWCAP, KERNEL_HWCAP_MTE),
|
||||
HWCAP_CAP(ID_AA64PFR1_EL1, MTE, MTE3, CAP_HWCAP, KERNEL_HWCAP_MTE3),
|
||||
HWCAP_CAP(ID_AA64PFR2_EL1, MTEFAR, IMP, CAP_HWCAP, KERNEL_HWCAP_MTE_FAR),
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
HWCAP_CAP(ID_AA64MMFR0_EL1, ECV, IMP, CAP_HWCAP, KERNEL_HWCAP_ECV),
|
||||
HWCAP_CAP(ID_AA64MMFR1_EL1, AFP, IMP, CAP_HWCAP, KERNEL_HWCAP_AFP),
|
||||
@@ -3370,18 +3417,49 @@ static void update_cpu_capabilities(u16 scope_mask)
|
||||
|
||||
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
||||
for (i = 0; i < ARM64_NCAPS; i++) {
|
||||
bool match_all = false;
|
||||
bool caps_set = false;
|
||||
bool boot_cpu = false;
|
||||
|
||||
caps = cpucap_ptrs[i];
|
||||
if (!caps || !(caps->type & scope_mask) ||
|
||||
cpus_have_cap(caps->capability) ||
|
||||
!caps->matches(caps, cpucap_default_scope(caps)))
|
||||
if (!caps || !(caps->type & scope_mask))
|
||||
continue;
|
||||
|
||||
if (caps->desc && !caps->cpus)
|
||||
match_all = cpucap_match_all_early_cpus(caps);
|
||||
caps_set = cpus_have_cap(caps->capability);
|
||||
boot_cpu = scope_mask & SCOPE_BOOT_CPU;
|
||||
|
||||
/*
|
||||
* Unless it's a match-all CPUs feature, avoid probing if
|
||||
* already detected.
|
||||
*/
|
||||
if (!match_all && caps_set)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* A match-all CPUs capability is only set when probing the
|
||||
* boot CPU. It may be cleared subsequently if not detected on
|
||||
* secondary ones.
|
||||
*/
|
||||
if (match_all && !caps_set && !boot_cpu)
|
||||
continue;
|
||||
|
||||
if (!caps->matches(caps, cpucap_default_scope(caps))) {
|
||||
if (match_all)
|
||||
__clear_bit(caps->capability, system_cpucaps);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Match-all CPUs capabilities are logged later when the
|
||||
* system capabilities are finalised.
|
||||
*/
|
||||
if (!match_all && caps->desc && !caps->cpus)
|
||||
pr_info("detected: %s\n", caps->desc);
|
||||
|
||||
__set_bit(caps->capability, system_cpucaps);
|
||||
|
||||
if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
|
||||
if (boot_cpu && (caps->type & SCOPE_BOOT_CPU))
|
||||
set_bit(caps->capability, boot_cpucaps);
|
||||
}
|
||||
}
|
||||
@@ -3782,17 +3860,24 @@ static void __init setup_system_capabilities(void)
|
||||
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
|
||||
apply_alternatives_all();
|
||||
|
||||
/*
|
||||
* Log any cpucaps with a cpumask as these aren't logged by
|
||||
* update_cpu_capabilities().
|
||||
*/
|
||||
for (int i = 0; i < ARM64_NCAPS; i++) {
|
||||
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
|
||||
|
||||
if (caps && caps->cpus && caps->desc &&
|
||||
cpumask_any(caps->cpus) < nr_cpu_ids)
|
||||
if (!caps || !caps->desc)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Log any cpucaps with a cpumask as these aren't logged by
|
||||
* update_cpu_capabilities().
|
||||
*/
|
||||
if (caps->cpus && cpumask_any(caps->cpus) < nr_cpu_ids)
|
||||
pr_info("detected: %s on CPU%*pbl\n",
|
||||
caps->desc, cpumask_pr_args(caps->cpus));
|
||||
|
||||
/* Log match-all CPUs capabilities */
|
||||
if (cpucap_match_all_early_cpus(caps) &&
|
||||
cpus_have_cap(caps->capability))
|
||||
pr_info("detected: %s\n", caps->desc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -160,6 +160,7 @@ static const char *const hwcap_str[] = {
|
||||
[KERNEL_HWCAP_SME_SFEXPA] = "smesfexpa",
|
||||
[KERNEL_HWCAP_SME_STMOP] = "smestmop",
|
||||
[KERNEL_HWCAP_SME_SMOP4] = "smesmop4",
|
||||
[KERNEL_HWCAP_MTE_FAR] = "mtefar",
|
||||
};
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
@@ -21,8 +21,12 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/kgdb.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uprobes.h>
|
||||
|
||||
/* Determine debug architecture. */
|
||||
u8 debug_monitors_arch(void)
|
||||
@@ -34,7 +38,7 @@ u8 debug_monitors_arch(void)
|
||||
/*
|
||||
* MDSCR access routines.
|
||||
*/
|
||||
static void mdscr_write(u32 mdscr)
|
||||
static void mdscr_write(u64 mdscr)
|
||||
{
|
||||
unsigned long flags;
|
||||
flags = local_daif_save();
|
||||
@@ -43,7 +47,7 @@ static void mdscr_write(u32 mdscr)
|
||||
}
|
||||
NOKPROBE_SYMBOL(mdscr_write);
|
||||
|
||||
static u32 mdscr_read(void)
|
||||
static u64 mdscr_read(void)
|
||||
{
|
||||
return read_sysreg(mdscr_el1);
|
||||
}
|
||||
@@ -79,16 +83,16 @@ static DEFINE_PER_CPU(int, kde_ref_count);
|
||||
|
||||
void enable_debug_monitors(enum dbg_active_el el)
|
||||
{
|
||||
u32 mdscr, enable = 0;
|
||||
u64 mdscr, enable = 0;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
if (this_cpu_inc_return(mde_ref_count) == 1)
|
||||
enable = DBG_MDSCR_MDE;
|
||||
enable = MDSCR_EL1_MDE;
|
||||
|
||||
if (el == DBG_ACTIVE_EL1 &&
|
||||
this_cpu_inc_return(kde_ref_count) == 1)
|
||||
enable |= DBG_MDSCR_KDE;
|
||||
enable |= MDSCR_EL1_KDE;
|
||||
|
||||
if (enable && debug_enabled) {
|
||||
mdscr = mdscr_read();
|
||||
@@ -100,16 +104,16 @@ NOKPROBE_SYMBOL(enable_debug_monitors);
|
||||
|
||||
void disable_debug_monitors(enum dbg_active_el el)
|
||||
{
|
||||
u32 mdscr, disable = 0;
|
||||
u64 mdscr, disable = 0;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
if (this_cpu_dec_return(mde_ref_count) == 0)
|
||||
disable = ~DBG_MDSCR_MDE;
|
||||
disable = ~MDSCR_EL1_MDE;
|
||||
|
||||
if (el == DBG_ACTIVE_EL1 &&
|
||||
this_cpu_dec_return(kde_ref_count) == 0)
|
||||
disable &= ~DBG_MDSCR_KDE;
|
||||
disable &= ~MDSCR_EL1_KDE;
|
||||
|
||||
if (disable) {
|
||||
mdscr = mdscr_read();
|
||||
@@ -156,74 +160,6 @@ NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
|
||||
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
|
||||
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
|
||||
|
||||
static DEFINE_SPINLOCK(debug_hook_lock);
|
||||
static LIST_HEAD(user_step_hook);
|
||||
static LIST_HEAD(kernel_step_hook);
|
||||
|
||||
static void register_debug_hook(struct list_head *node, struct list_head *list)
|
||||
{
|
||||
spin_lock(&debug_hook_lock);
|
||||
list_add_rcu(node, list);
|
||||
spin_unlock(&debug_hook_lock);
|
||||
|
||||
}
|
||||
|
||||
static void unregister_debug_hook(struct list_head *node)
|
||||
{
|
||||
spin_lock(&debug_hook_lock);
|
||||
list_del_rcu(node);
|
||||
spin_unlock(&debug_hook_lock);
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
void register_user_step_hook(struct step_hook *hook)
|
||||
{
|
||||
register_debug_hook(&hook->node, &user_step_hook);
|
||||
}
|
||||
|
||||
void unregister_user_step_hook(struct step_hook *hook)
|
||||
{
|
||||
unregister_debug_hook(&hook->node);
|
||||
}
|
||||
|
||||
void register_kernel_step_hook(struct step_hook *hook)
|
||||
{
|
||||
register_debug_hook(&hook->node, &kernel_step_hook);
|
||||
}
|
||||
|
||||
void unregister_kernel_step_hook(struct step_hook *hook)
|
||||
{
|
||||
unregister_debug_hook(&hook->node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call registered single step handlers
|
||||
* There is no Syndrome info to check for determining the handler.
|
||||
* So we call all the registered handlers, until the right handler is
|
||||
* found which returns zero.
|
||||
*/
|
||||
static int call_step_hook(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
struct step_hook *hook;
|
||||
struct list_head *list;
|
||||
int retval = DBG_HOOK_ERROR;
|
||||
|
||||
list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
|
||||
|
||||
/*
|
||||
* Since single-step exception disables interrupt, this function is
|
||||
* entirely not preemptible, and we can use rcu list safely here.
|
||||
*/
|
||||
list_for_each_entry_rcu(hook, list, node) {
|
||||
retval = hook->fn(regs, esr);
|
||||
if (retval == DBG_HOOK_HANDLED)
|
||||
break;
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
NOKPROBE_SYMBOL(call_step_hook);
|
||||
|
||||
static void send_user_sigtrap(int si_code)
|
||||
{
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
@@ -238,105 +174,110 @@ static void send_user_sigtrap(int si_code)
|
||||
"User debug trap");
|
||||
}
|
||||
|
||||
static int single_step_handler(unsigned long unused, unsigned long esr,
|
||||
struct pt_regs *regs)
|
||||
/*
|
||||
* We have already unmasked interrupts and enabled preemption
|
||||
* when calling do_el0_softstep() from entry-common.c.
|
||||
*/
|
||||
void do_el0_softstep(unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
bool handler_found = false;
|
||||
if (uprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
|
||||
return;
|
||||
|
||||
send_user_sigtrap(TRAP_TRACE);
|
||||
/*
|
||||
* If we are stepping a pending breakpoint, call the hw_breakpoint
|
||||
* handler first.
|
||||
* ptrace will disable single step unless explicitly
|
||||
* asked to re-enable it. For other clients, it makes
|
||||
* sense to leave it enabled (i.e. rewind the controls
|
||||
* to the active-not-pending state).
|
||||
*/
|
||||
if (!reinstall_suspended_bps(regs))
|
||||
return 0;
|
||||
user_rewind_single_step(current);
|
||||
}
|
||||
|
||||
if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
|
||||
handler_found = true;
|
||||
void do_el1_softstep(unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
if (kgdb_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
|
||||
return;
|
||||
|
||||
if (!handler_found && user_mode(regs)) {
|
||||
send_user_sigtrap(TRAP_TRACE);
|
||||
pr_warn("Unexpected kernel single-step exception at EL1\n");
|
||||
/*
|
||||
* Re-enable stepping since we know that we will be
|
||||
* returning to regs.
|
||||
*/
|
||||
set_regs_spsr_ss(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_el1_softstep);
|
||||
|
||||
/*
|
||||
* ptrace will disable single step unless explicitly
|
||||
* asked to re-enable it. For other clients, it makes
|
||||
* sense to leave it enabled (i.e. rewind the controls
|
||||
* to the active-not-pending state).
|
||||
*/
|
||||
user_rewind_single_step(current);
|
||||
} else if (!handler_found) {
|
||||
pr_warn("Unexpected kernel single-step exception at EL1\n");
|
||||
/*
|
||||
* Re-enable stepping since we know that we will be
|
||||
* returning to regs.
|
||||
*/
|
||||
set_regs_spsr_ss(regs);
|
||||
static int call_el1_break_hook(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
if (esr_brk_comment(esr) == BUG_BRK_IMM)
|
||||
return bug_brk_handler(regs, esr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr))
|
||||
return cfi_brk_handler(regs, esr);
|
||||
|
||||
if (esr_brk_comment(esr) == FAULT_BRK_IMM)
|
||||
return reserved_fault_brk_handler(regs, esr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
|
||||
(esr_brk_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
|
||||
return kasan_brk_handler(regs, esr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_UBSAN_TRAP) && esr_is_ubsan_brk(esr))
|
||||
return ubsan_brk_handler(regs, esr);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KGDB)) {
|
||||
if (esr_brk_comment(esr) == KGDB_DYN_DBG_BRK_IMM)
|
||||
return kgdb_brk_handler(regs, esr);
|
||||
if (esr_brk_comment(esr) == KGDB_COMPILED_DBG_BRK_IMM)
|
||||
return kgdb_compiled_brk_handler(regs, esr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(single_step_handler);
|
||||
|
||||
static LIST_HEAD(user_break_hook);
|
||||
static LIST_HEAD(kernel_break_hook);
|
||||
|
||||
void register_user_break_hook(struct break_hook *hook)
|
||||
{
|
||||
register_debug_hook(&hook->node, &user_break_hook);
|
||||
}
|
||||
|
||||
void unregister_user_break_hook(struct break_hook *hook)
|
||||
{
|
||||
unregister_debug_hook(&hook->node);
|
||||
}
|
||||
|
||||
void register_kernel_break_hook(struct break_hook *hook)
|
||||
{
|
||||
register_debug_hook(&hook->node, &kernel_break_hook);
|
||||
}
|
||||
|
||||
void unregister_kernel_break_hook(struct break_hook *hook)
|
||||
{
|
||||
unregister_debug_hook(&hook->node);
|
||||
}
|
||||
|
||||
static int call_break_hook(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
struct break_hook *hook;
|
||||
struct list_head *list;
|
||||
|
||||
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
|
||||
|
||||
/*
|
||||
* Since brk exception disables interrupt, this function is
|
||||
* entirely not preemptible, and we can use rcu list safely here.
|
||||
*/
|
||||
list_for_each_entry_rcu(hook, list, node) {
|
||||
if ((esr_brk_comment(esr) & ~hook->mask) == hook->imm)
|
||||
return hook->fn(regs, esr);
|
||||
if (IS_ENABLED(CONFIG_KPROBES)) {
|
||||
if (esr_brk_comment(esr) == KPROBES_BRK_IMM)
|
||||
return kprobe_brk_handler(regs, esr);
|
||||
if (esr_brk_comment(esr) == KPROBES_BRK_SS_IMM)
|
||||
return kprobe_ss_brk_handler(regs, esr);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_KRETPROBES) &&
|
||||
esr_brk_comment(esr) == KRETPROBES_BRK_IMM)
|
||||
return kretprobe_brk_handler(regs, esr);
|
||||
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
NOKPROBE_SYMBOL(call_break_hook);
|
||||
NOKPROBE_SYMBOL(call_el1_break_hook);
|
||||
|
||||
static int brk_handler(unsigned long unused, unsigned long esr,
|
||||
struct pt_regs *regs)
|
||||
/*
|
||||
* We have already unmasked interrupts and enabled preemption
|
||||
* when calling do_el0_brk64() from entry-common.c.
|
||||
*/
|
||||
void do_el0_brk64(unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
|
||||
return 0;
|
||||
if (IS_ENABLED(CONFIG_UPROBES) &&
|
||||
esr_brk_comment(esr) == UPROBES_BRK_IMM &&
|
||||
uprobe_brk_handler(regs, esr) == DBG_HOOK_HANDLED)
|
||||
return;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
send_user_sigtrap(TRAP_BRKPT);
|
||||
} else {
|
||||
pr_warn("Unexpected kernel BRK exception at EL1\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
send_user_sigtrap(TRAP_BRKPT);
|
||||
}
|
||||
NOKPROBE_SYMBOL(brk_handler);
|
||||
|
||||
int aarch32_break_handler(struct pt_regs *regs)
|
||||
void do_el1_brk64(unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
if (call_el1_break_hook(regs, esr) == DBG_HOOK_HANDLED)
|
||||
return;
|
||||
|
||||
die("Oops - BRK", regs, esr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_el1_brk64);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
void do_bkpt32(unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
arm64_notify_die("aarch32 BKPT", regs, SIGTRAP, TRAP_BRKPT, regs->pc, esr);
|
||||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
bool try_handle_aarch32_break(struct pt_regs *regs)
|
||||
{
|
||||
u32 arm_instr;
|
||||
u16 thumb_instr;
|
||||
@@ -344,7 +285,7 @@ int aarch32_break_handler(struct pt_regs *regs)
|
||||
void __user *pc = (void __user *)instruction_pointer(regs);
|
||||
|
||||
if (!compat_user_mode(regs))
|
||||
return -EFAULT;
|
||||
return false;
|
||||
|
||||
if (compat_thumb_mode(regs)) {
|
||||
/* get 16-bit Thumb instruction */
|
||||
@@ -368,20 +309,12 @@ int aarch32_break_handler(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
if (!bp)
|
||||
return -EFAULT;
|
||||
return false;
|
||||
|
||||
send_user_sigtrap(TRAP_BRKPT);
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(aarch32_break_handler);
|
||||
|
||||
void __init debug_traps_init(void)
|
||||
{
|
||||
hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP,
|
||||
TRAP_TRACE, "single-step handler");
|
||||
hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP,
|
||||
TRAP_BRKPT, "BRK handler");
|
||||
return true;
|
||||
}
|
||||
NOKPROBE_SYMBOL(try_handle_aarch32_break);
|
||||
|
||||
/* Re-enable single step for syscall restarting. */
|
||||
void user_rewind_single_step(struct task_struct *task)
|
||||
@@ -415,7 +348,7 @@ void kernel_enable_single_step(struct pt_regs *regs)
|
||||
{
|
||||
WARN_ON(!irqs_disabled());
|
||||
set_regs_spsr_ss(regs);
|
||||
mdscr_write(mdscr_read() | DBG_MDSCR_SS);
|
||||
mdscr_write(mdscr_read() | MDSCR_EL1_SS);
|
||||
enable_debug_monitors(DBG_ACTIVE_EL1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kernel_enable_single_step);
|
||||
@@ -423,7 +356,7 @@ NOKPROBE_SYMBOL(kernel_enable_single_step);
|
||||
void kernel_disable_single_step(void)
|
||||
{
|
||||
WARN_ON(!irqs_disabled());
|
||||
mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
|
||||
mdscr_write(mdscr_read() & ~MDSCR_EL1_SS);
|
||||
disable_debug_monitors(DBG_ACTIVE_EL1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kernel_disable_single_step);
|
||||
@@ -431,7 +364,7 @@ NOKPROBE_SYMBOL(kernel_disable_single_step);
|
||||
int kernel_active_single_step(void)
|
||||
{
|
||||
WARN_ON(!irqs_disabled());
|
||||
return mdscr_read() & DBG_MDSCR_SS;
|
||||
return mdscr_read() & MDSCR_EL1_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kernel_active_single_step);
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
|
||||
#include <asm/efi.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/vmap_stack.h>
|
||||
|
||||
static bool region_is_misaligned(const efi_memory_desc_t *md)
|
||||
{
|
||||
@@ -214,9 +215,8 @@ static int __init arm64_efi_rt_init(void)
|
||||
if (!efi_enabled(EFI_RUNTIME_SERVICES))
|
||||
return 0;
|
||||
|
||||
p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
|
||||
NUMA_NO_NODE, &&l);
|
||||
l: if (!p) {
|
||||
p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE);
|
||||
if (!p) {
|
||||
pr_warn("Failed to allocate EFI runtime stack\n");
|
||||
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <linux/context_tracking.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/livepatch.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/resume_user_mode.h>
|
||||
@@ -144,6 +145,9 @@ static void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
|
||||
(void __user *)NULL, current);
|
||||
}
|
||||
|
||||
if (thread_flags & _TIF_PATCH_PENDING)
|
||||
klp_update_patch_state(current);
|
||||
|
||||
if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
|
||||
do_signal(regs);
|
||||
|
||||
@@ -344,7 +348,7 @@ static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
|
||||
|
||||
static void cortex_a76_erratum_1463225_svc_handler(void)
|
||||
{
|
||||
u32 reg, val;
|
||||
u64 reg, val;
|
||||
|
||||
if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
|
||||
return;
|
||||
@@ -354,7 +358,7 @@ static void cortex_a76_erratum_1463225_svc_handler(void)
|
||||
|
||||
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
|
||||
reg = read_sysreg(mdscr_el1);
|
||||
val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
|
||||
val = reg | MDSCR_EL1_SS | MDSCR_EL1_KDE;
|
||||
write_sysreg(val, mdscr_el1);
|
||||
asm volatile("msr daifclr, #8");
|
||||
isb();
|
||||
@@ -441,6 +445,28 @@ static __always_inline void fpsimd_syscall_exit(void)
|
||||
__this_cpu_write(fpsimd_last_state.to_save, FP_STATE_CURRENT);
|
||||
}
|
||||
|
||||
/*
|
||||
* In debug exception context, we explicitly disable preemption despite
|
||||
* having interrupts disabled.
|
||||
* This serves two purposes: it makes it much less likely that we would
|
||||
* accidentally schedule in exception context and it will force a warning
|
||||
* if we somehow manage to schedule by accident.
|
||||
*/
|
||||
static void debug_exception_enter(struct pt_regs *regs)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
/* This code is a bit fragile. Test it. */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
|
||||
}
|
||||
NOKPROBE_SYMBOL(debug_exception_enter);
|
||||
|
||||
static void debug_exception_exit(struct pt_regs *regs)
|
||||
{
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
NOKPROBE_SYMBOL(debug_exception_exit);
|
||||
|
||||
UNHANDLED(el1t, 64, sync)
|
||||
UNHANDLED(el1t, 64, irq)
|
||||
UNHANDLED(el1t, 64, fiq)
|
||||
@@ -504,13 +530,51 @@ static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
arm64_enter_el1_dbg(regs);
|
||||
debug_exception_enter(regs);
|
||||
do_breakpoint(esr, regs);
|
||||
debug_exception_exit(regs);
|
||||
arm64_exit_el1_dbg(regs);
|
||||
}
|
||||
|
||||
static void noinstr el1_softstp(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
arm64_enter_el1_dbg(regs);
|
||||
if (!cortex_a76_erratum_1463225_debug_handler(regs)) {
|
||||
debug_exception_enter(regs);
|
||||
/*
|
||||
* After handling a breakpoint, we suspend the breakpoint
|
||||
* and use single-step to move to the next instruction.
|
||||
* If we are stepping a suspended breakpoint there's nothing more to do:
|
||||
* the single-step is complete.
|
||||
*/
|
||||
if (!try_step_suspended_breakpoints(regs))
|
||||
do_el1_softstep(esr, regs);
|
||||
debug_exception_exit(regs);
|
||||
}
|
||||
arm64_exit_el1_dbg(regs);
|
||||
}
|
||||
|
||||
static void noinstr el1_watchpt(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
/* Watchpoints are the only debug exception to write FAR_EL1 */
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
arm64_enter_el1_dbg(regs);
|
||||
if (!cortex_a76_erratum_1463225_debug_handler(regs))
|
||||
do_debug_exception(far, esr, regs);
|
||||
debug_exception_enter(regs);
|
||||
do_watchpoint(far, esr, regs);
|
||||
debug_exception_exit(regs);
|
||||
arm64_exit_el1_dbg(regs);
|
||||
}
|
||||
|
||||
static void noinstr el1_brk64(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
arm64_enter_el1_dbg(regs);
|
||||
debug_exception_enter(regs);
|
||||
do_el1_brk64(esr, regs);
|
||||
debug_exception_exit(regs);
|
||||
arm64_exit_el1_dbg(regs);
|
||||
}
|
||||
|
||||
@@ -553,10 +617,16 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
|
||||
el1_mops(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_BREAKPT_CUR:
|
||||
el1_breakpt(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_SOFTSTP_CUR:
|
||||
el1_softstp(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_WATCHPT_CUR:
|
||||
el1_watchpt(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_BRK64:
|
||||
el1_dbg(regs, esr);
|
||||
el1_brk64(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_FPAC:
|
||||
el1_fpac(regs, esr);
|
||||
@@ -747,17 +817,59 @@ static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
|
||||
exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
|
||||
if (!is_ttbr0_addr(regs->pc))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
enter_from_user_mode(regs);
|
||||
debug_exception_enter(regs);
|
||||
do_breakpoint(esr, regs);
|
||||
debug_exception_exit(regs);
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
if (!is_ttbr0_addr(regs->pc))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
enter_from_user_mode(regs);
|
||||
/*
|
||||
* After handling a breakpoint, we suspend the breakpoint
|
||||
* and use single-step to move to the next instruction.
|
||||
* If we are stepping a suspended breakpoint there's nothing more to do:
|
||||
* the single-step is complete.
|
||||
*/
|
||||
if (!try_step_suspended_breakpoints(regs)) {
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_el0_softstep(esr, regs);
|
||||
}
|
||||
exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el0_watchpt(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
/* Watchpoints are the only debug exception to write FAR_EL1 */
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
enter_from_user_mode(regs);
|
||||
do_debug_exception(far, esr, regs);
|
||||
debug_exception_enter(regs);
|
||||
do_watchpoint(far, esr, regs);
|
||||
debug_exception_exit(regs);
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el0_brk64(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
enter_from_user_mode(regs);
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_el0_brk64(esr, regs);
|
||||
exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el0_svc(struct pt_regs *regs)
|
||||
{
|
||||
enter_from_user_mode(regs);
|
||||
@@ -826,10 +938,16 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
|
||||
el0_gcs(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_BREAKPT_LOW:
|
||||
el0_breakpt(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_SOFTSTP_LOW:
|
||||
el0_softstp(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_WATCHPT_LOW:
|
||||
el0_watchpt(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_BRK64:
|
||||
el0_dbg(regs, esr);
|
||||
el0_brk64(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_FPAC:
|
||||
el0_fpac(regs, esr);
|
||||
@@ -912,6 +1030,14 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
|
||||
exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
static void noinstr el0_bkpt32(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
enter_from_user_mode(regs);
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
do_bkpt32(esr, regs);
|
||||
exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long esr = read_sysreg(esr_el1);
|
||||
@@ -946,10 +1072,16 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
|
||||
el0_cp15(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_BREAKPT_LOW:
|
||||
el0_breakpt(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_SOFTSTP_LOW:
|
||||
el0_softstp(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_WATCHPT_LOW:
|
||||
el0_watchpt(regs, esr);
|
||||
break;
|
||||
case ESR_ELx_EC_BKPT32:
|
||||
el0_dbg(regs, esr);
|
||||
el0_bkpt32(regs, esr);
|
||||
break;
|
||||
default:
|
||||
el0_inv(regs, esr);
|
||||
@@ -977,7 +1109,6 @@ UNHANDLED(el0t, 32, fiq)
|
||||
UNHANDLED(el0t, 32, error)
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long esr = read_sysreg(esr_el1);
|
||||
@@ -986,7 +1117,6 @@ asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs)
|
||||
arm64_enter_nmi(regs);
|
||||
panic_bad_stack(regs, esr, far);
|
||||
}
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
|
||||
#ifdef CONFIG_ARM_SDE_INTERFACE
|
||||
asmlinkage noinstr unsigned long
|
||||
|
||||
@@ -55,7 +55,6 @@
|
||||
.endif
|
||||
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* Test whether the SP has overflowed, without corrupting a GPR.
|
||||
* Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
|
||||
@@ -97,7 +96,6 @@
|
||||
/* We were already on the overflow stack. Restore sp/x0 and carry on. */
|
||||
sub sp, sp, x0
|
||||
mrs x0, tpidrro_el0
|
||||
#endif
|
||||
b el\el\ht\()_\regsize\()_\label
|
||||
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
|
||||
.endm
|
||||
@@ -540,7 +538,6 @@ SYM_CODE_START(vectors)
|
||||
kernel_ventry 0, t, 32, error // Error 32-bit EL0
|
||||
SYM_CODE_END(vectors)
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
SYM_CODE_START_LOCAL(__bad_stack)
|
||||
/*
|
||||
* We detected an overflow in kernel_ventry, which switched to the
|
||||
@@ -568,7 +565,6 @@ SYM_CODE_START_LOCAL(__bad_stack)
|
||||
bl handle_bad_stack
|
||||
ASM_BUG()
|
||||
SYM_CODE_END(__bad_stack)
|
||||
#endif /* CONFIG_VMAP_STACK */
|
||||
|
||||
|
||||
.macro entry_handler el:req, ht:req, regsize:req, label:req
|
||||
@@ -1003,7 +999,6 @@ SYM_CODE_START(__sdei_asm_handler)
|
||||
1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
|
||||
2: str x19, [x5]
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* entry.S may have been using sp as a scratch register, find whether
|
||||
* this is a normal or critical event and switch to the appropriate
|
||||
@@ -1016,7 +1011,6 @@ SYM_CODE_START(__sdei_asm_handler)
|
||||
2: mov x6, #SDEI_STACK_SIZE
|
||||
add x5, x5, x6
|
||||
mov sp, x5
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
/* Use a separate shadow call stack for normal and critical events */
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <asm/current.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/cputype.h>
|
||||
@@ -618,8 +619,7 @@ NOKPROBE_SYMBOL(toggle_bp_registers);
|
||||
/*
|
||||
* Debug exception handlers.
|
||||
*/
|
||||
static int breakpoint_handler(unsigned long unused, unsigned long esr,
|
||||
struct pt_regs *regs)
|
||||
void do_breakpoint(unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
int i, step = 0, *kernel_step;
|
||||
u32 ctrl_reg;
|
||||
@@ -662,7 +662,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
|
||||
}
|
||||
|
||||
if (!step)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
debug_info->bps_disabled = 1;
|
||||
@@ -670,7 +670,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
|
||||
|
||||
/* If we're already stepping a watchpoint, just return. */
|
||||
if (debug_info->wps_disabled)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
debug_info->suspended_step = 1;
|
||||
@@ -681,7 +681,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
|
||||
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
|
||||
|
||||
if (*kernel_step != ARM_KERNEL_STEP_NONE)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (kernel_active_single_step()) {
|
||||
*kernel_step = ARM_KERNEL_STEP_SUSPEND;
|
||||
@@ -690,10 +690,8 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
|
||||
kernel_enable_single_step(regs);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(breakpoint_handler);
|
||||
NOKPROBE_SYMBOL(do_breakpoint);
|
||||
|
||||
/*
|
||||
* Arm64 hardware does not always report a watchpoint hit address that matches
|
||||
@@ -752,8 +750,7 @@ static int watchpoint_report(struct perf_event *wp, unsigned long addr,
|
||||
return step;
|
||||
}
|
||||
|
||||
static int watchpoint_handler(unsigned long addr, unsigned long esr,
|
||||
struct pt_regs *regs)
|
||||
void do_watchpoint(unsigned long addr, unsigned long esr, struct pt_regs *regs)
|
||||
{
|
||||
int i, step = 0, *kernel_step, access, closest_match = 0;
|
||||
u64 min_dist = -1, dist;
|
||||
@@ -808,7 +805,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!step)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/*
|
||||
* We always disable EL0 watchpoints because the kernel can
|
||||
@@ -821,7 +818,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
|
||||
|
||||
/* If we're already stepping a breakpoint, just return. */
|
||||
if (debug_info->bps_disabled)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (test_thread_flag(TIF_SINGLESTEP))
|
||||
debug_info->suspended_step = 1;
|
||||
@@ -832,7 +829,7 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
|
||||
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
|
||||
|
||||
if (*kernel_step != ARM_KERNEL_STEP_NONE)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (kernel_active_single_step()) {
|
||||
*kernel_step = ARM_KERNEL_STEP_SUSPEND;
|
||||
@@ -841,44 +838,41 @@ static int watchpoint_handler(unsigned long addr, unsigned long esr,
|
||||
kernel_enable_single_step(regs);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(watchpoint_handler);
|
||||
NOKPROBE_SYMBOL(do_watchpoint);
|
||||
|
||||
/*
|
||||
* Handle single-step exception.
|
||||
*/
|
||||
int reinstall_suspended_bps(struct pt_regs *regs)
|
||||
bool try_step_suspended_breakpoints(struct pt_regs *regs)
|
||||
{
|
||||
struct debug_info *debug_info = ¤t->thread.debug;
|
||||
int handled_exception = 0, *kernel_step;
|
||||
|
||||
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
|
||||
int *kernel_step = this_cpu_ptr(&stepping_kernel_bp);
|
||||
bool handled_exception = false;
|
||||
|
||||
/*
|
||||
* Called from single-step exception handler.
|
||||
* Return 0 if execution can resume, 1 if a SIGTRAP should be
|
||||
* reported.
|
||||
* Called from single-step exception entry.
|
||||
* Return true if we stepped a breakpoint and can resume execution,
|
||||
* false if we need to handle a single-step.
|
||||
*/
|
||||
if (user_mode(regs)) {
|
||||
if (debug_info->bps_disabled) {
|
||||
debug_info->bps_disabled = 0;
|
||||
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
|
||||
handled_exception = 1;
|
||||
handled_exception = true;
|
||||
}
|
||||
|
||||
if (debug_info->wps_disabled) {
|
||||
debug_info->wps_disabled = 0;
|
||||
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
|
||||
handled_exception = 1;
|
||||
handled_exception = true;
|
||||
}
|
||||
|
||||
if (handled_exception) {
|
||||
if (debug_info->suspended_step) {
|
||||
debug_info->suspended_step = 0;
|
||||
/* Allow exception handling to fall-through. */
|
||||
handled_exception = 0;
|
||||
handled_exception = false;
|
||||
} else {
|
||||
user_disable_single_step(current);
|
||||
}
|
||||
@@ -892,17 +886,17 @@ int reinstall_suspended_bps(struct pt_regs *regs)
|
||||
|
||||
if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
|
||||
kernel_disable_single_step();
|
||||
handled_exception = 1;
|
||||
handled_exception = true;
|
||||
} else {
|
||||
handled_exception = 0;
|
||||
handled_exception = false;
|
||||
}
|
||||
|
||||
*kernel_step = ARM_KERNEL_STEP_NONE;
|
||||
}
|
||||
|
||||
return !handled_exception;
|
||||
return handled_exception;
|
||||
}
|
||||
NOKPROBE_SYMBOL(reinstall_suspended_bps);
|
||||
NOKPROBE_SYMBOL(try_step_suspended_breakpoints);
|
||||
|
||||
/*
|
||||
* Context-switcher for restoring suspended breakpoints.
|
||||
@@ -987,12 +981,6 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
pr_info("found %d breakpoint and %d watchpoint registers.\n",
|
||||
core_num_brps, core_num_wrps);
|
||||
|
||||
/* Register debug fault handlers. */
|
||||
hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
|
||||
TRAP_HWBKPT, "hw-breakpoint handler");
|
||||
hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
|
||||
TRAP_HWBKPT, "hw-watchpoint handler");
|
||||
|
||||
/*
|
||||
* Reset the breakpoint resources. We assume that a halting
|
||||
* debugger will leave the world in a nice state for us.
|
||||
|
||||
@@ -51,7 +51,6 @@ static void init_irq_scs(void)
|
||||
scs_alloc(early_cpu_to_node(cpu));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
static void __init init_irq_stacks(void)
|
||||
{
|
||||
int cpu;
|
||||
@@ -62,18 +61,6 @@ static void __init init_irq_stacks(void)
|
||||
per_cpu(irq_stack_ptr, cpu) = p;
|
||||
}
|
||||
}
|
||||
#else
|
||||
/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
|
||||
DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
|
||||
|
||||
static void init_irq_stacks(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
static void ____do_softirq(struct pt_regs *regs)
|
||||
|
||||
@@ -234,23 +234,23 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int kgdb_brk_fn(struct pt_regs *regs, unsigned long esr)
|
||||
int kgdb_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_brk_fn)
|
||||
NOKPROBE_SYMBOL(kgdb_brk_handler)
|
||||
|
||||
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned long esr)
|
||||
int kgdb_compiled_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
compiled_break = 1;
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
|
||||
NOKPROBE_SYMBOL(kgdb_compiled_brk_handler);
|
||||
|
||||
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr)
|
||||
int kgdb_single_step_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
if (!kgdb_single_step)
|
||||
return DBG_HOOK_ERROR;
|
||||
@@ -258,21 +258,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned long esr)
|
||||
kgdb_handle_exception(0, SIGTRAP, 0, regs);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
|
||||
|
||||
static struct break_hook kgdb_brkpt_hook = {
|
||||
.fn = kgdb_brk_fn,
|
||||
.imm = KGDB_DYN_DBG_BRK_IMM,
|
||||
};
|
||||
|
||||
static struct break_hook kgdb_compiled_brkpt_hook = {
|
||||
.fn = kgdb_compiled_brk_fn,
|
||||
.imm = KGDB_COMPILED_DBG_BRK_IMM,
|
||||
};
|
||||
|
||||
static struct step_hook kgdb_step_hook = {
|
||||
.fn = kgdb_step_brk_fn
|
||||
};
|
||||
NOKPROBE_SYMBOL(kgdb_single_step_handler);
|
||||
|
||||
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
|
||||
{
|
||||
@@ -311,15 +297,7 @@ static struct notifier_block kgdb_notifier = {
|
||||
*/
|
||||
int kgdb_arch_init(void)
|
||||
{
|
||||
int ret = register_die_notifier(&kgdb_notifier);
|
||||
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
register_kernel_break_hook(&kgdb_brkpt_hook);
|
||||
register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
|
||||
register_kernel_step_hook(&kgdb_step_hook);
|
||||
return 0;
|
||||
return register_die_notifier(&kgdb_notifier);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -329,9 +307,6 @@ int kgdb_arch_init(void)
|
||||
*/
|
||||
void kgdb_arch_exit(void)
|
||||
{
|
||||
unregister_kernel_break_hook(&kgdb_brkpt_hook);
|
||||
unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
|
||||
unregister_kernel_step_hook(&kgdb_step_hook);
|
||||
unregister_die_notifier(&kgdb_notifier);
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <asm/insn.h>
|
||||
#include <asm/scs.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
enum aarch64_reloc_op {
|
||||
RELOC_OP_NONE,
|
||||
@@ -48,7 +49,17 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
|
||||
#define WRITE_PLACE(place, val, mod) do { \
|
||||
__typeof__(val) __val = (val); \
|
||||
\
|
||||
if (mod->state == MODULE_STATE_UNFORMED) \
|
||||
*(place) = __val; \
|
||||
else \
|
||||
aarch64_insn_copy(place, &(__val), sizeof(*place)); \
|
||||
} while (0)
|
||||
|
||||
static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len,
|
||||
struct module *me)
|
||||
{
|
||||
s64 sval = do_reloc(op, place, val);
|
||||
|
||||
@@ -66,7 +77,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
|
||||
|
||||
switch (len) {
|
||||
case 16:
|
||||
*(s16 *)place = sval;
|
||||
WRITE_PLACE((s16 *)place, sval, me);
|
||||
switch (op) {
|
||||
case RELOC_OP_ABS:
|
||||
if (sval < 0 || sval > U16_MAX)
|
||||
@@ -82,7 +93,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
|
||||
}
|
||||
break;
|
||||
case 32:
|
||||
*(s32 *)place = sval;
|
||||
WRITE_PLACE((s32 *)place, sval, me);
|
||||
switch (op) {
|
||||
case RELOC_OP_ABS:
|
||||
if (sval < 0 || sval > U32_MAX)
|
||||
@@ -98,7 +109,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
|
||||
}
|
||||
break;
|
||||
case 64:
|
||||
*(s64 *)place = sval;
|
||||
WRITE_PLACE((s64 *)place, sval, me);
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid length (%d) for data relocation\n", len);
|
||||
@@ -113,7 +124,8 @@ enum aarch64_insn_movw_imm_type {
|
||||
};
|
||||
|
||||
static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||
int lsb, enum aarch64_insn_movw_imm_type imm_type)
|
||||
int lsb, enum aarch64_insn_movw_imm_type imm_type,
|
||||
struct module *me)
|
||||
{
|
||||
u64 imm;
|
||||
s64 sval;
|
||||
@@ -145,7 +157,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||
|
||||
/* Update the instruction with the new encoding. */
|
||||
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
|
||||
*place = cpu_to_le32(insn);
|
||||
WRITE_PLACE(place, cpu_to_le32(insn), me);
|
||||
|
||||
if (imm > U16_MAX)
|
||||
return -ERANGE;
|
||||
@@ -154,7 +166,8 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||
}
|
||||
|
||||
static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||
int lsb, int len, enum aarch64_insn_imm_type imm_type)
|
||||
int lsb, int len, enum aarch64_insn_imm_type imm_type,
|
||||
struct module *me)
|
||||
{
|
||||
u64 imm, imm_mask;
|
||||
s64 sval;
|
||||
@@ -170,7 +183,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||
|
||||
/* Update the instruction's immediate field. */
|
||||
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
|
||||
*place = cpu_to_le32(insn);
|
||||
WRITE_PLACE(place, cpu_to_le32(insn), me);
|
||||
|
||||
/*
|
||||
* Extract the upper value bits (including the sign bit) and
|
||||
@@ -189,17 +202,17 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||
}
|
||||
|
||||
static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
__le32 *place, u64 val)
|
||||
__le32 *place, u64 val, struct module *me)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
if (!is_forbidden_offset_for_adrp(place))
|
||||
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
|
||||
AARCH64_INSN_IMM_ADR);
|
||||
AARCH64_INSN_IMM_ADR, me);
|
||||
|
||||
/* patch ADRP to ADR if it is in range */
|
||||
if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
|
||||
AARCH64_INSN_IMM_ADR)) {
|
||||
AARCH64_INSN_IMM_ADR, me)) {
|
||||
insn = le32_to_cpu(*place);
|
||||
insn &= ~BIT(31);
|
||||
} else {
|
||||
@@ -211,7 +224,7 @@ static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
AARCH64_INSN_BRANCH_NOLINK);
|
||||
}
|
||||
|
||||
*place = cpu_to_le32(insn);
|
||||
WRITE_PLACE(place, cpu_to_le32(insn), me);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -255,23 +268,23 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
/* Data relocations. */
|
||||
case R_AARCH64_ABS64:
|
||||
overflow_check = false;
|
||||
ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
|
||||
ovf = reloc_data(RELOC_OP_ABS, loc, val, 64, me);
|
||||
break;
|
||||
case R_AARCH64_ABS32:
|
||||
ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
|
||||
ovf = reloc_data(RELOC_OP_ABS, loc, val, 32, me);
|
||||
break;
|
||||
case R_AARCH64_ABS16:
|
||||
ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
|
||||
ovf = reloc_data(RELOC_OP_ABS, loc, val, 16, me);
|
||||
break;
|
||||
case R_AARCH64_PREL64:
|
||||
overflow_check = false;
|
||||
ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
|
||||
ovf = reloc_data(RELOC_OP_PREL, loc, val, 64, me);
|
||||
break;
|
||||
case R_AARCH64_PREL32:
|
||||
ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
|
||||
ovf = reloc_data(RELOC_OP_PREL, loc, val, 32, me);
|
||||
break;
|
||||
case R_AARCH64_PREL16:
|
||||
ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
|
||||
ovf = reloc_data(RELOC_OP_PREL, loc, val, 16, me);
|
||||
break;
|
||||
|
||||
/* MOVW instruction relocations. */
|
||||
@@ -280,88 +293,88 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
fallthrough;
|
||||
case R_AARCH64_MOVW_UABS_G0:
|
||||
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
||||
AARCH64_INSN_IMM_MOVKZ);
|
||||
AARCH64_INSN_IMM_MOVKZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_UABS_G1_NC:
|
||||
overflow_check = false;
|
||||
fallthrough;
|
||||
case R_AARCH64_MOVW_UABS_G1:
|
||||
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
||||
AARCH64_INSN_IMM_MOVKZ);
|
||||
AARCH64_INSN_IMM_MOVKZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_UABS_G2_NC:
|
||||
overflow_check = false;
|
||||
fallthrough;
|
||||
case R_AARCH64_MOVW_UABS_G2:
|
||||
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
||||
AARCH64_INSN_IMM_MOVKZ);
|
||||
AARCH64_INSN_IMM_MOVKZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_UABS_G3:
|
||||
/* We're using the top bits so we can't overflow. */
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
|
||||
AARCH64_INSN_IMM_MOVKZ);
|
||||
AARCH64_INSN_IMM_MOVKZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_SABS_G0:
|
||||
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
|
||||
AARCH64_INSN_IMM_MOVNZ);
|
||||
AARCH64_INSN_IMM_MOVNZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_SABS_G1:
|
||||
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
|
||||
AARCH64_INSN_IMM_MOVNZ);
|
||||
AARCH64_INSN_IMM_MOVNZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_SABS_G2:
|
||||
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
|
||||
AARCH64_INSN_IMM_MOVNZ);
|
||||
AARCH64_INSN_IMM_MOVNZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_PREL_G0_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
||||
AARCH64_INSN_IMM_MOVKZ);
|
||||
AARCH64_INSN_IMM_MOVKZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_PREL_G0:
|
||||
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
|
||||
AARCH64_INSN_IMM_MOVNZ);
|
||||
AARCH64_INSN_IMM_MOVNZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_PREL_G1_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
||||
AARCH64_INSN_IMM_MOVKZ);
|
||||
AARCH64_INSN_IMM_MOVKZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_PREL_G1:
|
||||
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
|
||||
AARCH64_INSN_IMM_MOVNZ);
|
||||
AARCH64_INSN_IMM_MOVNZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_PREL_G2_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
||||
AARCH64_INSN_IMM_MOVKZ);
|
||||
AARCH64_INSN_IMM_MOVKZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_PREL_G2:
|
||||
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
|
||||
AARCH64_INSN_IMM_MOVNZ);
|
||||
AARCH64_INSN_IMM_MOVNZ, me);
|
||||
break;
|
||||
case R_AARCH64_MOVW_PREL_G3:
|
||||
/* We're using the top bits so we can't overflow. */
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
|
||||
AARCH64_INSN_IMM_MOVNZ);
|
||||
AARCH64_INSN_IMM_MOVNZ, me);
|
||||
break;
|
||||
|
||||
/* Immediate instruction relocations. */
|
||||
case R_AARCH64_LD_PREL_LO19:
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
||||
AARCH64_INSN_IMM_19);
|
||||
AARCH64_INSN_IMM_19, me);
|
||||
break;
|
||||
case R_AARCH64_ADR_PREL_LO21:
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
|
||||
AARCH64_INSN_IMM_ADR);
|
||||
AARCH64_INSN_IMM_ADR, me);
|
||||
break;
|
||||
case R_AARCH64_ADR_PREL_PG_HI21_NC:
|
||||
overflow_check = false;
|
||||
fallthrough;
|
||||
case R_AARCH64_ADR_PREL_PG_HI21:
|
||||
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
|
||||
ovf = reloc_insn_adrp(me, sechdrs, loc, val, me);
|
||||
if (ovf && ovf != -ERANGE)
|
||||
return ovf;
|
||||
break;
|
||||
@@ -369,46 +382,46 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
case R_AARCH64_LDST8_ABS_LO12_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
|
||||
AARCH64_INSN_IMM_12);
|
||||
AARCH64_INSN_IMM_12, me);
|
||||
break;
|
||||
case R_AARCH64_LDST16_ABS_LO12_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
|
||||
AARCH64_INSN_IMM_12);
|
||||
AARCH64_INSN_IMM_12, me);
|
||||
break;
|
||||
case R_AARCH64_LDST32_ABS_LO12_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
|
||||
AARCH64_INSN_IMM_12);
|
||||
AARCH64_INSN_IMM_12, me);
|
||||
break;
|
||||
case R_AARCH64_LDST64_ABS_LO12_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
|
||||
AARCH64_INSN_IMM_12);
|
||||
AARCH64_INSN_IMM_12, me);
|
||||
break;
|
||||
case R_AARCH64_LDST128_ABS_LO12_NC:
|
||||
overflow_check = false;
|
||||
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
|
||||
AARCH64_INSN_IMM_12);
|
||||
AARCH64_INSN_IMM_12, me);
|
||||
break;
|
||||
case R_AARCH64_TSTBR14:
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
|
||||
AARCH64_INSN_IMM_14);
|
||||
AARCH64_INSN_IMM_14, me);
|
||||
break;
|
||||
case R_AARCH64_CONDBR19:
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
|
||||
AARCH64_INSN_IMM_19);
|
||||
AARCH64_INSN_IMM_19, me);
|
||||
break;
|
||||
case R_AARCH64_JUMP26:
|
||||
case R_AARCH64_CALL26:
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
|
||||
AARCH64_INSN_IMM_26);
|
||||
AARCH64_INSN_IMM_26, me);
|
||||
if (ovf == -ERANGE) {
|
||||
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
|
||||
if (!val)
|
||||
return -ENOEXEC;
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
|
||||
26, AARCH64_INSN_IMM_26);
|
||||
26, AARCH64_INSN_IMM_26, me);
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
@@ -41,4 +41,4 @@ obj-y := idreg-override.pi.o \
|
||||
obj-$(CONFIG_RELOCATABLE) += relocate.pi.o
|
||||
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_early.pi.o
|
||||
obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.pi.o
|
||||
extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
|
||||
targets := $(patsubst %.pi.o,%.o,$(obj-y))
|
||||
|
||||
@@ -292,8 +292,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int __kprobes
|
||||
kprobe_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
struct kprobe *p, *cur_kprobe;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
@@ -336,13 +336,8 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
static struct break_hook kprobes_break_hook = {
|
||||
.imm = KPROBES_BRK_IMM,
|
||||
.fn = kprobe_breakpoint_handler,
|
||||
};
|
||||
|
||||
static int __kprobes
|
||||
kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int __kprobes
|
||||
kprobe_ss_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long addr = instruction_pointer(regs);
|
||||
@@ -360,13 +355,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned long esr)
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
|
||||
static struct break_hook kprobes_break_ss_hook = {
|
||||
.imm = KPROBES_BRK_SS_IMM,
|
||||
.fn = kprobe_breakpoint_ss_handler,
|
||||
};
|
||||
|
||||
static int __kprobes
|
||||
kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int __kprobes
|
||||
kretprobe_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
if (regs->pc != (unsigned long)__kretprobe_trampoline)
|
||||
return DBG_HOOK_ERROR;
|
||||
@@ -375,11 +365,6 @@ kretprobe_breakpoint_handler(struct pt_regs *regs, unsigned long esr)
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
static struct break_hook kretprobes_break_hook = {
|
||||
.imm = KRETPROBES_BRK_IMM,
|
||||
.fn = kretprobe_breakpoint_handler,
|
||||
};
|
||||
|
||||
/*
|
||||
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
|
||||
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
|
||||
@@ -422,9 +407,5 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
register_kernel_break_hook(&kprobes_break_hook);
|
||||
register_kernel_break_hook(&kprobes_break_ss_hook);
|
||||
register_kernel_break_hook(&kretprobes_break_hook);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
SYM_CODE_START(__kretprobe_trampoline)
|
||||
/*
|
||||
* Trigger a breakpoint exception. The PC will be adjusted by
|
||||
* kretprobe_breakpoint_handler(), and no subsequent instructions will
|
||||
* kretprobe_brk_handler(), and no subsequent instructions will
|
||||
* be executed from the trampoline.
|
||||
*/
|
||||
brk #KRETPROBES_BRK_IMM
|
||||
|
||||
@@ -173,7 +173,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int uprobe_breakpoint_handler(struct pt_regs *regs,
|
||||
int uprobe_brk_handler(struct pt_regs *regs,
|
||||
unsigned long esr)
|
||||
{
|
||||
if (uprobe_pre_sstep_notifier(regs))
|
||||
@@ -182,7 +182,7 @@ static int uprobe_breakpoint_handler(struct pt_regs *regs,
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
|
||||
static int uprobe_single_step_handler(struct pt_regs *regs,
|
||||
int uprobe_single_step_handler(struct pt_regs *regs,
|
||||
unsigned long esr)
|
||||
{
|
||||
struct uprobe_task *utask = current->utask;
|
||||
@@ -194,23 +194,3 @@ static int uprobe_single_step_handler(struct pt_regs *regs,
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
|
||||
/* uprobe breakpoint handler hook */
|
||||
static struct break_hook uprobes_break_hook = {
|
||||
.imm = UPROBES_BRK_IMM,
|
||||
.fn = uprobe_breakpoint_handler,
|
||||
};
|
||||
|
||||
/* uprobe single step handler hook */
|
||||
static struct step_hook uprobes_step_hook = {
|
||||
.fn = uprobe_single_step_handler,
|
||||
};
|
||||
|
||||
static int __init arch_init_uprobes(void)
|
||||
{
|
||||
register_user_break_hook(&uprobes_break_hook);
|
||||
register_user_step_hook(&uprobes_step_hook);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(arch_init_uprobes);
|
||||
|
||||
@@ -288,7 +288,9 @@ static void flush_gcs(void)
|
||||
if (!system_supports_gcs())
|
||||
return;
|
||||
|
||||
gcs_free(current);
|
||||
current->thread.gcspr_el0 = 0;
|
||||
current->thread.gcs_base = 0;
|
||||
current->thread.gcs_size = 0;
|
||||
current->thread.gcs_el0_mode = 0;
|
||||
write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1);
|
||||
write_sysreg_s(0, SYS_GCSPR_EL0);
|
||||
@@ -305,13 +307,13 @@ static int copy_thread_gcs(struct task_struct *p,
|
||||
p->thread.gcs_base = 0;
|
||||
p->thread.gcs_size = 0;
|
||||
|
||||
p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
|
||||
p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
|
||||
|
||||
gcs = gcs_alloc_thread_stack(p, args);
|
||||
if (IS_ERR_VALUE(gcs))
|
||||
return PTR_ERR((void *)gcs);
|
||||
|
||||
p->thread.gcs_el0_mode = current->thread.gcs_el0_mode;
|
||||
p->thread.gcs_el0_locked = current->thread.gcs_el0_locked;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -339,7 +341,6 @@ void flush_thread(void)
|
||||
void arch_release_task_struct(struct task_struct *tsk)
|
||||
{
|
||||
fpsimd_release_task(tsk);
|
||||
gcs_free(tsk);
|
||||
}
|
||||
|
||||
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
|
||||
@@ -141,7 +141,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
|
||||
|
||||
addr += n;
|
||||
if (regs_within_kernel_stack(regs, (unsigned long)addr))
|
||||
return *addr;
|
||||
return READ_ONCE_NOCHECK(*addr);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -34,10 +34,8 @@ unsigned long sdei_exit_mode;
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
|
||||
DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
|
||||
#endif
|
||||
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
|
||||
DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
|
||||
@@ -65,8 +63,7 @@ static void free_sdei_stacks(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return;
|
||||
BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
_free_sdei_stack(&sdei_stack_normal_ptr, cpu);
|
||||
@@ -91,8 +88,7 @@ static int init_sdei_stacks(void)
|
||||
int cpu;
|
||||
int err = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return 0;
|
||||
BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
|
||||
|
||||
@@ -95,8 +95,11 @@ static void save_reset_user_access_state(struct user_access_state *ua_state)
|
||||
|
||||
ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
|
||||
write_sysreg_s(por_enable_all, SYS_POR_EL0);
|
||||
/* Ensure that any subsequent uaccess observes the updated value */
|
||||
isb();
|
||||
/*
|
||||
* No ISB required as we can tolerate spurious Overlay faults -
|
||||
* the fault handler will check again based on the new value
|
||||
* of POR_EL0.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -152,6 +152,8 @@ kunwind_recover_return_address(struct kunwind_state *state)
|
||||
orig_pc = kretprobe_find_ret_addr(state->task,
|
||||
(void *)state->common.fp,
|
||||
&state->kr_cur);
|
||||
if (!orig_pc)
|
||||
return -EINVAL;
|
||||
state->common.pc = orig_pc;
|
||||
state->flags.kretprobe = 1;
|
||||
}
|
||||
@@ -277,21 +279,24 @@ kunwind_next(struct kunwind_state *state)
|
||||
|
||||
typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
|
||||
|
||||
static __always_inline void
|
||||
static __always_inline int
|
||||
do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
|
||||
void *cookie)
|
||||
{
|
||||
if (kunwind_recover_return_address(state))
|
||||
return;
|
||||
int ret;
|
||||
|
||||
ret = kunwind_recover_return_address(state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
while (1) {
|
||||
int ret;
|
||||
|
||||
if (!consume_state(state, cookie))
|
||||
break;
|
||||
return -EINVAL;
|
||||
ret = kunwind_next(state);
|
||||
if (ret == -ENOENT)
|
||||
return 0;
|
||||
if (ret < 0)
|
||||
break;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,7 +329,7 @@ do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
|
||||
: stackinfo_get_unknown(); \
|
||||
})
|
||||
|
||||
static __always_inline void
|
||||
static __always_inline int
|
||||
kunwind_stack_walk(kunwind_consume_fn consume_state,
|
||||
void *cookie, struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
@@ -332,10 +337,8 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
|
||||
struct stack_info stacks[] = {
|
||||
stackinfo_get_task(task),
|
||||
STACKINFO_CPU(irq),
|
||||
#if defined(CONFIG_VMAP_STACK)
|
||||
STACKINFO_CPU(overflow),
|
||||
#endif
|
||||
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
|
||||
#if defined(CONFIG_ARM_SDE_INTERFACE)
|
||||
STACKINFO_SDEI(normal),
|
||||
STACKINFO_SDEI(critical),
|
||||
#endif
|
||||
@@ -352,7 +355,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
|
||||
|
||||
if (regs) {
|
||||
if (task != current)
|
||||
return;
|
||||
return -EINVAL;
|
||||
kunwind_init_from_regs(&state, regs);
|
||||
} else if (task == current) {
|
||||
kunwind_init_from_caller(&state);
|
||||
@@ -360,7 +363,7 @@ kunwind_stack_walk(kunwind_consume_fn consume_state,
|
||||
kunwind_init_from_task(&state, task);
|
||||
}
|
||||
|
||||
do_kunwind(&state, consume_state, cookie);
|
||||
return do_kunwind(&state, consume_state, cookie);
|
||||
}
|
||||
|
||||
struct kunwind_consume_entry_data {
|
||||
@@ -387,6 +390,36 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
|
||||
kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
arch_reliable_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
|
||||
{
|
||||
/*
|
||||
* At an exception boundary we can reliably consume the saved PC. We do
|
||||
* not know whether the LR was live when the exception was taken, and
|
||||
* so we cannot perform the next unwind step reliably.
|
||||
*
|
||||
* All that matters is whether the *entire* unwind is reliable, so give
|
||||
* up as soon as we hit an exception boundary.
|
||||
*/
|
||||
if (state->source == KUNWIND_SOURCE_REGS_PC)
|
||||
return false;
|
||||
|
||||
return arch_kunwind_consume_entry(state, cookie);
|
||||
}
|
||||
|
||||
noinline noinstr int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
void *cookie,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct kunwind_consume_entry_data data = {
|
||||
.consume_entry = consume_entry,
|
||||
.cookie = cookie,
|
||||
};
|
||||
|
||||
return kunwind_stack_walk(arch_reliable_kunwind_consume_entry, &data,
|
||||
task, NULL);
|
||||
}
|
||||
|
||||
struct bpf_unwind_consume_entry_data {
|
||||
bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
|
||||
void *cookie;
|
||||
|
||||
@@ -454,7 +454,7 @@ void do_el0_undef(struct pt_regs *regs, unsigned long esr)
|
||||
u32 insn;
|
||||
|
||||
/* check for AArch32 breakpoint instructions */
|
||||
if (!aarch32_break_handler(regs))
|
||||
if (try_handle_aarch32_break(regs))
|
||||
return;
|
||||
|
||||
if (user_insn_read(regs, &insn))
|
||||
@@ -894,8 +894,6 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
|
||||
"Bad EL0 synchronous exception");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
|
||||
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
|
||||
__aligned(16);
|
||||
|
||||
@@ -927,10 +925,10 @@ void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigne
|
||||
nmi_panic(NULL, "kernel stack overflow");
|
||||
cpu_park_loop();
|
||||
}
|
||||
#endif
|
||||
|
||||
void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
|
||||
console_verbose();
|
||||
|
||||
pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
|
||||
@@ -987,7 +985,7 @@ void do_serror(struct pt_regs *regs, unsigned long esr)
|
||||
int is_valid_bugaddr(unsigned long addr)
|
||||
{
|
||||
/*
|
||||
* bug_handler() only called for BRK #BUG_BRK_IMM.
|
||||
* bug_brk_handler() only called for BRK #BUG_BRK_IMM.
|
||||
* So the answer is trivial -- any spurious instances with no
|
||||
* bug table entry will be rejected by report_bug() and passed
|
||||
* back to the debug-monitors code and handled as a fatal
|
||||
@@ -997,7 +995,7 @@ int is_valid_bugaddr(unsigned long addr)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int bug_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int bug_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
switch (report_bug(regs->pc, regs)) {
|
||||
case BUG_TRAP_TYPE_BUG:
|
||||
@@ -1017,13 +1015,8 @@ static int bug_handler(struct pt_regs *regs, unsigned long esr)
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
static struct break_hook bug_break_hook = {
|
||||
.fn = bug_handler,
|
||||
.imm = BUG_BRK_IMM,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
static int cfi_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int cfi_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long target;
|
||||
u32 type;
|
||||
@@ -1046,15 +1039,9 @@ static int cfi_handler(struct pt_regs *regs, unsigned long esr)
|
||||
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
static struct break_hook cfi_break_hook = {
|
||||
.fn = cfi_handler,
|
||||
.imm = CFI_BRK_IMM_BASE,
|
||||
.mask = CFI_BRK_IMM_MASK,
|
||||
};
|
||||
#endif /* CONFIG_CFI_CLANG */
|
||||
|
||||
static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int reserved_fault_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
pr_err("%s generated an invalid instruction at %pS!\n",
|
||||
"Kernel text patching",
|
||||
@@ -1064,11 +1051,6 @@ static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
|
||||
static struct break_hook fault_break_hook = {
|
||||
.fn = reserved_fault_handler,
|
||||
.imm = FAULT_BRK_IMM,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
|
||||
#define KASAN_ESR_RECOVER 0x20
|
||||
@@ -1076,7 +1058,7 @@ static struct break_hook fault_break_hook = {
|
||||
#define KASAN_ESR_SIZE_MASK 0x0f
|
||||
#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
|
||||
|
||||
static int kasan_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int kasan_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
bool recover = esr & KASAN_ESR_RECOVER;
|
||||
bool write = esr & KASAN_ESR_WRITE;
|
||||
@@ -1107,62 +1089,12 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr)
|
||||
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
static struct break_hook kasan_break_hook = {
|
||||
.fn = kasan_handler,
|
||||
.imm = KASAN_BRK_IMM,
|
||||
.mask = KASAN_BRK_MASK,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_UBSAN_TRAP
|
||||
static int ubsan_handler(struct pt_regs *regs, unsigned long esr)
|
||||
int ubsan_brk_handler(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
die(report_ubsan_failure(esr & UBSAN_BRK_MASK), regs, esr);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
static struct break_hook ubsan_break_hook = {
|
||||
.fn = ubsan_handler,
|
||||
.imm = UBSAN_BRK_IMM,
|
||||
.mask = UBSAN_BRK_MASK,
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initial handler for AArch64 BRK exceptions
|
||||
* This handler only used until debug_traps_init().
|
||||
*/
|
||||
int __init early_brk64(unsigned long addr, unsigned long esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
if (esr_is_cfi_brk(esr))
|
||||
return cfi_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
#endif
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
if ((esr_brk_comment(esr) & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
|
||||
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
#endif
|
||||
#ifdef CONFIG_UBSAN_TRAP
|
||||
if (esr_is_ubsan_brk(esr))
|
||||
return ubsan_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
#endif
|
||||
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
register_kernel_break_hook(&bug_break_hook);
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
register_kernel_break_hook(&cfi_break_hook);
|
||||
#endif
|
||||
register_kernel_break_hook(&fault_break_hook);
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
register_kernel_break_hook(&kasan_break_hook);
|
||||
#endif
|
||||
#ifdef CONFIG_UBSAN_TRAP
|
||||
register_kernel_break_hook(&ubsan_break_hook);
|
||||
#endif
|
||||
debug_traps_init();
|
||||
}
|
||||
|
||||
@@ -1617,8 +1617,8 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
|
||||
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
|
||||
break;
|
||||
case SYS_ID_AA64PFR2_EL1:
|
||||
/* We only expose FPMR */
|
||||
val &= ID_AA64PFR2_EL1_FPMR;
|
||||
val &= ID_AA64PFR2_EL1_FPMR |
|
||||
(kvm_has_mte(vcpu->kvm) ? ID_AA64PFR2_EL1_MTEFAR : 0);
|
||||
break;
|
||||
case SYS_ID_AA64ISAR1_EL1:
|
||||
if (!vcpu_has_ptrauth(vcpu))
|
||||
@@ -2876,7 +2876,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
ID_AA64PFR1_EL1_MPAM_frac |
|
||||
ID_AA64PFR1_EL1_RAS_frac |
|
||||
ID_AA64PFR1_EL1_MTE)),
|
||||
ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
|
||||
ID_WRITABLE(ID_AA64PFR2_EL1,
|
||||
ID_AA64PFR2_EL1_FPMR |
|
||||
ID_AA64PFR2_EL1_MTEFAR),
|
||||
ID_UNALLOCATED(4,3),
|
||||
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
|
||||
ID_HIDDEN(ID_AA64SMFR0_EL1),
|
||||
|
||||
@@ -68,7 +68,144 @@ static void contpte_convert(struct mm_struct *mm, unsigned long addr,
|
||||
pte = pte_mkyoung(pte);
|
||||
}
|
||||
|
||||
__flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
|
||||
/*
|
||||
* On eliding the __tlb_flush_range() under BBML2+noabort:
|
||||
*
|
||||
* NOTE: Instead of using N=16 as the contiguous block length, we use
|
||||
* N=4 for clarity.
|
||||
*
|
||||
* NOTE: 'n' and 'c' are used to denote the "contiguous bit" being
|
||||
* unset and set, respectively.
|
||||
*
|
||||
* We worry about two cases where contiguous bit is used:
|
||||
* - When folding N smaller non-contiguous ptes as 1 contiguous block.
|
||||
* - When unfolding a contiguous block into N smaller non-contiguous ptes.
|
||||
*
|
||||
* Currently, the BBML0 folding case looks as follows:
|
||||
*
|
||||
* 0) Initial page-table layout:
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* |RO,n|RO,n|RO,n|RW,n| <--- last page being set as RO
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* 1) Aggregate AF + dirty flags using __ptep_get_and_clear():
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* | 0 | 0 | 0 | 0 |
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* 2) __flush_tlb_range():
|
||||
*
|
||||
* |____ tlbi + dsb ____|
|
||||
*
|
||||
* 3) __set_ptes() to repaint contiguous block:
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* |RO,c|RO,c|RO,c|RO,c|
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* 4) The kernel will eventually __flush_tlb() for changed page:
|
||||
*
|
||||
* |____| <--- tlbi + dsb
|
||||
*
|
||||
* As expected, the intermediate tlbi+dsb ensures that other PEs
|
||||
* only ever see an invalid (0) entry, or the new contiguous TLB entry.
|
||||
* The final tlbi+dsb will always throw away the newly installed
|
||||
* contiguous TLB entry, which is a micro-optimisation opportunity,
|
||||
* but does not affect correctness.
|
||||
*
|
||||
* In the BBML2 case, the change is avoiding the intermediate tlbi+dsb.
|
||||
* This means a few things, but notably other PEs will still "see" any
|
||||
* stale cached TLB entries. This could lead to a "contiguous bit
|
||||
* misprogramming" issue until the final tlbi+dsb of the changed page,
|
||||
* which would clear out both the stale (RW,n) entry and the new (RO,c)
|
||||
* contiguous entry installed in its place.
|
||||
*
|
||||
* What this is saying, is the following:
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* |RO,n|RO,n|RO,n|RW,n| <--- old page tables, all non-contiguous
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* |RO,c|RO,c|RO,c|RO,c| <--- new page tables, all contiguous
|
||||
* +----+----+----+----+
|
||||
* /\
|
||||
* ||
|
||||
*
|
||||
* If both the old single (RW,n) and new contiguous (RO,c) TLB entries
|
||||
* are present, and a write is made to this address, do we fault or
|
||||
* is the write permitted (via amalgamation)?
|
||||
*
|
||||
* The relevant Arm ARM DDI 0487L.a requirements are RNGLXZ and RJQQTC,
|
||||
* and together state that when BBML1 or BBML2 are implemented, either
|
||||
* a TLB conflict abort is raised (which we expressly forbid), or will
|
||||
* "produce an OA, access permissions, and memory attributes that are
|
||||
* consistent with any of the programmed translation table values".
|
||||
*
|
||||
* That is to say, will either raise a TLB conflict, or produce one of
|
||||
* the cached TLB entries, but never amalgamate.
|
||||
*
|
||||
* Thus, as the page tables are only considered "consistent" after
|
||||
* the final tlbi+dsb (which evicts both the single stale (RW,n) TLB
|
||||
* entry as well as the new contiguous (RO,c) TLB entry), omitting the
|
||||
* initial tlbi+dsb is correct.
|
||||
*
|
||||
* It is also important to note that at the end of the BBML2 folding
|
||||
* case, we are still left with potentially all N TLB entries still
|
||||
* cached (the N-1 non-contiguous ptes, and the single contiguous
|
||||
* block). However, over time, natural TLB pressure will cause the
|
||||
* non-contiguous pte TLB entries to be flushed, leaving only the
|
||||
* contiguous block TLB entry. This means that omitting the tlbi+dsb is
|
||||
* not only correct, but also keeps our eventual performance benefits.
|
||||
*
|
||||
* For the unfolding case, BBML0 looks as follows:
|
||||
*
|
||||
* 0) Initial page-table layout:
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* |RW,c|RW,c|RW,c|RW,c| <--- last page being set as RO
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* 1) Aggregate AF + dirty flags using __ptep_get_and_clear():
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* | 0 | 0 | 0 | 0 |
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* 2) __flush_tlb_range():
|
||||
*
|
||||
* |____ tlbi + dsb ____|
|
||||
*
|
||||
* 3) __set_ptes() to repaint as non-contiguous:
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* |RW,n|RW,n|RW,n|RW,n|
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* 4) Update changed page permissions:
|
||||
*
|
||||
* +----+----+----+----+
|
||||
* |RW,n|RW,n|RW,n|RO,n| <--- last page permissions set
|
||||
* +----+----+----+----+
|
||||
*
|
||||
* 5) The kernel will eventually __flush_tlb() for changed page:
|
||||
*
|
||||
* |____| <--- tlbi + dsb
|
||||
*
|
||||
* For BBML2, we again remove the intermediate tlbi+dsb. Here, there
|
||||
* are no issues, as the final tlbi+dsb covering the changed page is
|
||||
* guaranteed to remove the original large contiguous (RW,c) TLB entry,
|
||||
* as well as the intermediate (RW,n) TLB entry; the next access will
|
||||
* install the new (RO,n) TLB entry and the page tables are only
|
||||
* considered "consistent" after the final tlbi+dsb, so software must
|
||||
* be prepared for this inconsistency prior to finishing the mm dance
|
||||
* regardless.
|
||||
*/
|
||||
|
||||
if (!system_supports_bbml2_noabort())
|
||||
__flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3);
|
||||
|
||||
__set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES);
|
||||
}
|
||||
@@ -169,17 +306,46 @@ pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte)
|
||||
for (i = 0; i < CONT_PTES; i++, ptep++) {
|
||||
pte = __ptep_get(ptep);
|
||||
|
||||
if (pte_dirty(pte))
|
||||
if (pte_dirty(pte)) {
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
for (; i < CONT_PTES; i++, ptep++) {
|
||||
pte = __ptep_get(ptep);
|
||||
if (pte_young(pte)) {
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (pte_young(pte))
|
||||
if (pte_young(pte)) {
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
i++;
|
||||
ptep++;
|
||||
for (; i < CONT_PTES; i++, ptep++) {
|
||||
pte = __ptep_get(ptep);
|
||||
if (pte_dirty(pte)) {
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return orig_pte;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(contpte_ptep_get);
|
||||
|
||||
static inline bool contpte_is_consistent(pte_t pte, unsigned long pfn,
|
||||
pgprot_t orig_prot)
|
||||
{
|
||||
pgprot_t prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
|
||||
|
||||
return pte_valid_cont(pte) && pte_pfn(pte) == pfn &&
|
||||
pgprot_val(prot) == pgprot_val(orig_prot);
|
||||
}
|
||||
|
||||
pte_t contpte_ptep_get_lockless(pte_t *orig_ptep)
|
||||
{
|
||||
/*
|
||||
@@ -202,7 +368,6 @@ pte_t contpte_ptep_get_lockless(pte_t *orig_ptep)
|
||||
pgprot_t orig_prot;
|
||||
unsigned long pfn;
|
||||
pte_t orig_pte;
|
||||
pgprot_t prot;
|
||||
pte_t *ptep;
|
||||
pte_t pte;
|
||||
int i;
|
||||
@@ -219,18 +384,44 @@ pte_t contpte_ptep_get_lockless(pte_t *orig_ptep)
|
||||
|
||||
for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) {
|
||||
pte = __ptep_get(ptep);
|
||||
prot = pte_pgprot(pte_mkold(pte_mkclean(pte)));
|
||||
|
||||
if (!pte_valid_cont(pte) ||
|
||||
pte_pfn(pte) != pfn ||
|
||||
pgprot_val(prot) != pgprot_val(orig_prot))
|
||||
if (!contpte_is_consistent(pte, pfn, orig_prot))
|
||||
goto retry;
|
||||
|
||||
if (pte_dirty(pte))
|
||||
if (pte_dirty(pte)) {
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
for (; i < CONT_PTES; i++, ptep++, pfn++) {
|
||||
pte = __ptep_get(ptep);
|
||||
|
||||
if (pte_young(pte))
|
||||
if (!contpte_is_consistent(pte, pfn, orig_prot))
|
||||
goto retry;
|
||||
|
||||
if (pte_young(pte)) {
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (pte_young(pte)) {
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
i++;
|
||||
ptep++;
|
||||
pfn++;
|
||||
for (; i < CONT_PTES; i++, ptep++, pfn++) {
|
||||
pte = __ptep_get(ptep);
|
||||
|
||||
if (!contpte_is_consistent(pte, pfn, orig_prot))
|
||||
goto retry;
|
||||
|
||||
if (pte_dirty(pte)) {
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return orig_pte;
|
||||
|
||||
@@ -53,18 +53,12 @@ struct fault_info {
|
||||
};
|
||||
|
||||
static const struct fault_info fault_info[];
|
||||
static struct fault_info debug_fault_info[];
|
||||
|
||||
static inline const struct fault_info *esr_to_fault_info(unsigned long esr)
|
||||
{
|
||||
return fault_info + (esr & ESR_ELx_FSC);
|
||||
}
|
||||
|
||||
static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr)
|
||||
{
|
||||
return debug_fault_info + DBG_ESR_EVT(esr);
|
||||
}
|
||||
|
||||
static void data_abort_decode(unsigned long esr)
|
||||
{
|
||||
unsigned long iss2 = ESR_ELx_ISS2(esr);
|
||||
@@ -826,6 +820,7 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
|
||||
*/
|
||||
siaddr = untagged_addr(far);
|
||||
}
|
||||
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
|
||||
arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
|
||||
|
||||
return 0;
|
||||
@@ -837,9 +832,12 @@ static int do_tag_check_fault(unsigned long far, unsigned long esr,
|
||||
/*
|
||||
* The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
|
||||
* for tag check faults. Set them to corresponding bits in the untagged
|
||||
* address.
|
||||
* address if ARM64_MTE_FAR isn't supported.
|
||||
* Otherwise, bits 63:60 of FAR_EL1 are not UNKNOWN.
|
||||
*/
|
||||
far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
|
||||
if (!cpus_have_cap(ARM64_MTE_FAR))
|
||||
far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
|
||||
|
||||
do_bad_area(far, esr, regs);
|
||||
return 0;
|
||||
}
|
||||
@@ -938,75 +936,6 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_sp_pc_abort);
|
||||
|
||||
/*
|
||||
* __refdata because early_brk64 is __init, but the reference to it is
|
||||
* clobbered at arch_initcall time.
|
||||
* See traps.c and debug-monitors.c:debug_traps_init().
|
||||
*/
|
||||
static struct fault_info __refdata debug_fault_info[] = {
|
||||
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
|
||||
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
|
||||
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
|
||||
{ do_bad, SIGKILL, SI_KERNEL, "unknown 3" },
|
||||
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
|
||||
{ do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" },
|
||||
{ early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
|
||||
{ do_bad, SIGKILL, SI_KERNEL, "unknown 7" },
|
||||
};
|
||||
|
||||
void __init hook_debug_fault_code(int nr,
|
||||
int (*fn)(unsigned long, unsigned long, struct pt_regs *),
|
||||
int sig, int code, const char *name)
|
||||
{
|
||||
BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
|
||||
|
||||
debug_fault_info[nr].fn = fn;
|
||||
debug_fault_info[nr].sig = sig;
|
||||
debug_fault_info[nr].code = code;
|
||||
debug_fault_info[nr].name = name;
|
||||
}
|
||||
|
||||
/*
|
||||
* In debug exception context, we explicitly disable preemption despite
|
||||
* having interrupts disabled.
|
||||
* This serves two purposes: it makes it much less likely that we would
|
||||
* accidentally schedule in exception context and it will force a warning
|
||||
* if we somehow manage to schedule by accident.
|
||||
*/
|
||||
static void debug_exception_enter(struct pt_regs *regs)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
/* This code is a bit fragile. Test it. */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
|
||||
}
|
||||
NOKPROBE_SYMBOL(debug_exception_enter);
|
||||
|
||||
static void debug_exception_exit(struct pt_regs *regs)
|
||||
{
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
NOKPROBE_SYMBOL(debug_exception_exit);
|
||||
|
||||
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = esr_to_debug_fault_info(esr);
|
||||
unsigned long pc = instruction_pointer(regs);
|
||||
|
||||
debug_exception_enter(regs);
|
||||
|
||||
if (user_mode(regs) && !is_ttbr0_addr(pc))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
if (inf->fn(addr_if_watchpoint, esr, regs)) {
|
||||
arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr);
|
||||
}
|
||||
|
||||
debug_exception_exit(regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_debug_exception);
|
||||
|
||||
/*
|
||||
* Used during anonymous page fault handling.
|
||||
*/
|
||||
|
||||
@@ -157,12 +157,6 @@ void gcs_free(struct task_struct *task)
|
||||
if (!system_supports_gcs())
|
||||
return;
|
||||
|
||||
/*
|
||||
* When fork() with CLONE_VM fails, the child (tsk) already
|
||||
* has a GCS allocated, and exit_thread() calls this function
|
||||
* to free it. In this case the parent (current) and the
|
||||
* child share the same mm struct.
|
||||
*/
|
||||
if (!task->mm || task->mm != current->mm)
|
||||
return;
|
||||
|
||||
|
||||
@@ -225,7 +225,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
ncontig = num_contig_ptes(sz, &pgsize);
|
||||
|
||||
if (!pte_present(pte)) {
|
||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
|
||||
for (i = 0; i < ncontig; i++, ptep++)
|
||||
__set_ptes_anysz(mm, ptep, pte, 1, pgsize);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1305,7 +1305,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
|
||||
next = addr;
|
||||
end = addr + PUD_SIZE;
|
||||
do {
|
||||
pmd_free_pte_page(pmdp, next);
|
||||
if (pmd_present(pmdp_get(pmdp)))
|
||||
pmd_free_pte_page(pmdp, next);
|
||||
} while (pmdp++, next += PMD_SIZE, next != end);
|
||||
|
||||
pud_clear(pudp);
|
||||
|
||||
@@ -454,7 +454,7 @@ SYM_FUNC_START(__cpu_setup)
|
||||
dsb nsh
|
||||
|
||||
msr cpacr_el1, xzr // Reset cpacr_el1
|
||||
mov x1, #1 << 12 // Reset mdscr_el1 and disable
|
||||
mov x1, MDSCR_EL1_TDCC // Reset mdscr_el1 and disable
|
||||
msr mdscr_el1, x1 // access to the DCC from EL0
|
||||
reset_pmuserenr_el0 x1 // Disable PMU access from EL0
|
||||
reset_amuserenr_el0 x1 // Disable AMU access from EL0
|
||||
|
||||
@@ -45,6 +45,7 @@ HAS_LPA2
|
||||
HAS_LSE_ATOMICS
|
||||
HAS_MOPS
|
||||
HAS_NESTED_VIRT
|
||||
HAS_BBML2_NOABORT
|
||||
HAS_PAN
|
||||
HAS_PMUV3
|
||||
HAS_S1PIE
|
||||
@@ -68,6 +69,7 @@ MPAM
|
||||
MPAM_HCR
|
||||
MTE
|
||||
MTE_ASYMM
|
||||
MTE_FAR
|
||||
SME
|
||||
SME_FA64
|
||||
SME2
|
||||
|
||||
@@ -36,7 +36,7 @@ aflags-zboot-header-$(EFI_ZBOOT_FORWARD_CFI) := \
|
||||
-DPE_DLL_CHAR_EX=IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT
|
||||
|
||||
AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
|
||||
-DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
|
||||
-DZBOOT_EFI_PATH="\"$(abspath $(obj)/vmlinuz.efi.elf)\"" \
|
||||
-DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
|
||||
-DCOMP_TYPE="\"$(comp-type-y)\"" \
|
||||
$(aflags-zboot-header-y)
|
||||
|
||||
@@ -220,6 +220,9 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
|
||||
feat_mask |= ARM_SMMU_FEAT_VAX;
|
||||
}
|
||||
|
||||
if (system_supports_bbml2_noabort())
|
||||
feat_mask |= ARM_SMMU_FEAT_BBML2;
|
||||
|
||||
if ((smmu->features & feat_mask) != feat_mask)
|
||||
return false;
|
||||
|
||||
|
||||
@@ -4457,6 +4457,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
|
||||
if (FIELD_GET(IDR3_FWB, reg))
|
||||
smmu->features |= ARM_SMMU_FEAT_S2FWB;
|
||||
|
||||
if (FIELD_GET(IDR3_BBM, reg) == 2)
|
||||
smmu->features |= ARM_SMMU_FEAT_BBML2;
|
||||
|
||||
/* IDR5 */
|
||||
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
|
||||
|
||||
|
||||
@@ -60,6 +60,7 @@ struct arm_smmu_device;
|
||||
#define ARM_SMMU_IDR3 0xc
|
||||
#define IDR3_FWB (1 << 8)
|
||||
#define IDR3_RIL (1 << 10)
|
||||
#define IDR3_BBM GENMASK(12, 11)
|
||||
|
||||
#define ARM_SMMU_IDR5 0x14
|
||||
#define IDR5_STALL_MAX GENMASK(31, 16)
|
||||
@@ -755,6 +756,7 @@ struct arm_smmu_device {
|
||||
#define ARM_SMMU_FEAT_HA (1 << 21)
|
||||
#define ARM_SMMU_FEAT_HD (1 << 22)
|
||||
#define ARM_SMMU_FEAT_S2FWB (1 << 23)
|
||||
#define ARM_SMMU_FEAT_BBML2 (1 << 24)
|
||||
u32 features;
|
||||
|
||||
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
|
||||
|
||||
@@ -1503,7 +1503,7 @@ int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
|
||||
#else
|
||||
static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
|
||||
{
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -12,4 +12,4 @@ $(OUTPUT)/syscall-abi: syscall-abi.c syscall-abi-asm.S
|
||||
$(OUTPUT)/tpidr2: tpidr2.c
|
||||
$(CC) -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
|
||||
-static -include ../../../../include/nolibc/nolibc.h \
|
||||
-ffreestanding -Wall $^ -o $@ -lgcc
|
||||
-I../.. -ffreestanding -Wall $^ -o $@ -lgcc
|
||||
|
||||
@@ -21,6 +21,10 @@
|
||||
|
||||
#define TESTS_PER_HWCAP 3
|
||||
|
||||
#ifndef AT_HWCAP3
|
||||
#define AT_HWCAP3 29
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Function expected to generate exception when the feature is not
|
||||
* supported and return when it is supported. If the specific exception
|
||||
@@ -1098,6 +1102,12 @@ static const struct hwcap_data {
|
||||
.sigill_fn = hbc_sigill,
|
||||
.sigill_reliable = true,
|
||||
},
|
||||
{
|
||||
.name = "MTE_FAR",
|
||||
.at_hwcap = AT_HWCAP3,
|
||||
.hwcap_bit = HWCAP3_MTE_FAR,
|
||||
.cpuinfo = "mtefar",
|
||||
},
|
||||
};
|
||||
|
||||
typedef void (*sighandler_fn)(int, siginfo_t *, void *);
|
||||
|
||||
@@ -3,31 +3,12 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include "kselftest.h"
|
||||
|
||||
#define SYS_TPIDR2 "S3_3_C13_C0_5"
|
||||
|
||||
#define EXPECTED_TESTS 5
|
||||
|
||||
static void putstr(const char *str)
|
||||
{
|
||||
write(1, str, strlen(str));
|
||||
}
|
||||
|
||||
static void putnum(unsigned int num)
|
||||
{
|
||||
char c;
|
||||
|
||||
if (num / 10)
|
||||
putnum(num / 10);
|
||||
|
||||
c = '0' + (num % 10);
|
||||
write(1, &c, 1);
|
||||
}
|
||||
|
||||
static int tests_run;
|
||||
static int tests_passed;
|
||||
static int tests_failed;
|
||||
static int tests_skipped;
|
||||
|
||||
static void set_tpidr2(uint64_t val)
|
||||
{
|
||||
asm volatile (
|
||||
@@ -50,20 +31,6 @@ static uint64_t get_tpidr2(void)
|
||||
return val;
|
||||
}
|
||||
|
||||
static void print_summary(void)
|
||||
{
|
||||
if (tests_passed + tests_failed + tests_skipped != EXPECTED_TESTS)
|
||||
putstr("# UNEXPECTED TEST COUNT: ");
|
||||
|
||||
putstr("# Totals: pass:");
|
||||
putnum(tests_passed);
|
||||
putstr(" fail:");
|
||||
putnum(tests_failed);
|
||||
putstr(" xfail:0 xpass:0 skip:");
|
||||
putnum(tests_skipped);
|
||||
putstr(" error:0\n");
|
||||
}
|
||||
|
||||
/* Processes should start with TPIDR2 == 0 */
|
||||
static int default_value(void)
|
||||
{
|
||||
@@ -105,9 +72,8 @@ static int write_fork_read(void)
|
||||
if (newpid == 0) {
|
||||
/* In child */
|
||||
if (get_tpidr2() != oldpid) {
|
||||
putstr("# TPIDR2 changed in child: ");
|
||||
putnum(get_tpidr2());
|
||||
putstr("\n");
|
||||
ksft_print_msg("TPIDR2 changed in child: %llx\n",
|
||||
get_tpidr2());
|
||||
exit(0);
|
||||
}
|
||||
|
||||
@@ -115,14 +81,12 @@ static int write_fork_read(void)
|
||||
if (get_tpidr2() == getpid()) {
|
||||
exit(1);
|
||||
} else {
|
||||
putstr("# Failed to set TPIDR2 in child\n");
|
||||
ksft_print_msg("Failed to set TPIDR2 in child\n");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
if (newpid < 0) {
|
||||
putstr("# fork() failed: -");
|
||||
putnum(-newpid);
|
||||
putstr("\n");
|
||||
ksft_print_msg("fork() failed: %d\n", newpid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -132,23 +96,22 @@ static int write_fork_read(void)
|
||||
if (waiting < 0) {
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
putstr("# waitpid() failed: ");
|
||||
putnum(errno);
|
||||
putstr("\n");
|
||||
ksft_print_msg("waitpid() failed: %d\n", errno);
|
||||
return 0;
|
||||
}
|
||||
if (waiting != newpid) {
|
||||
putstr("# waitpid() returned wrong PID\n");
|
||||
ksft_print_msg("waitpid() returned wrong PID: %d != %d\n",
|
||||
waiting, newpid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!WIFEXITED(status)) {
|
||||
putstr("# child did not exit\n");
|
||||
ksft_print_msg("child did not exit\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (getpid() != get_tpidr2()) {
|
||||
putstr("# TPIDR2 corrupted in parent\n");
|
||||
ksft_print_msg("TPIDR2 corrupted in parent\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -188,35 +151,32 @@ static int write_clone_read(void)
|
||||
|
||||
stack = malloc(__STACK_SIZE);
|
||||
if (!stack) {
|
||||
putstr("# malloc() failed\n");
|
||||
ksft_print_msg("malloc() failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = sys_clone(CLONE_VM, (unsigned long)stack + __STACK_SIZE,
|
||||
&parent_tid, 0, &child_tid);
|
||||
if (ret == -1) {
|
||||
putstr("# clone() failed\n");
|
||||
putnum(errno);
|
||||
putstr("\n");
|
||||
ksft_print_msg("clone() failed: %d\n", errno);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
/* In child */
|
||||
if (get_tpidr2() != 0) {
|
||||
putstr("# TPIDR2 non-zero in child: ");
|
||||
putnum(get_tpidr2());
|
||||
putstr("\n");
|
||||
ksft_print_msg("TPIDR2 non-zero in child: %llx\n",
|
||||
get_tpidr2());
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if (gettid() == 0)
|
||||
putstr("# Child TID==0\n");
|
||||
ksft_print_msg("Child TID==0\n");
|
||||
set_tpidr2(gettid());
|
||||
if (get_tpidr2() == gettid()) {
|
||||
exit(1);
|
||||
} else {
|
||||
putstr("# Failed to set TPIDR2 in child\n");
|
||||
ksft_print_msg("Failed to set TPIDR2 in child\n");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
@@ -227,25 +187,22 @@ static int write_clone_read(void)
|
||||
if (waiting < 0) {
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
putstr("# wait4() failed: ");
|
||||
putnum(errno);
|
||||
putstr("\n");
|
||||
ksft_print_msg("wait4() failed: %d\n", errno);
|
||||
return 0;
|
||||
}
|
||||
if (waiting != ret) {
|
||||
putstr("# wait4() returned wrong PID ");
|
||||
putnum(waiting);
|
||||
putstr("\n");
|
||||
ksft_print_msg("wait4() returned wrong PID %d\n",
|
||||
waiting);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!WIFEXITED(status)) {
|
||||
putstr("# child did not exit\n");
|
||||
ksft_print_msg("child did not exit\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parent != get_tpidr2()) {
|
||||
putstr("# TPIDR2 corrupted in parent\n");
|
||||
ksft_print_msg("TPIDR2 corrupted in parent\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -253,35 +210,14 @@ static int write_clone_read(void)
|
||||
}
|
||||
}
|
||||
|
||||
#define run_test(name) \
|
||||
if (name()) { \
|
||||
tests_passed++; \
|
||||
} else { \
|
||||
tests_failed++; \
|
||||
putstr("not "); \
|
||||
} \
|
||||
putstr("ok "); \
|
||||
putnum(++tests_run); \
|
||||
putstr(" " #name "\n");
|
||||
|
||||
#define skip_test(name) \
|
||||
tests_skipped++; \
|
||||
putstr("ok "); \
|
||||
putnum(++tests_run); \
|
||||
putstr(" # SKIP " #name "\n");
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
putstr("TAP version 13\n");
|
||||
putstr("1..");
|
||||
putnum(EXPECTED_TESTS);
|
||||
putstr("\n");
|
||||
ksft_print_header();
|
||||
ksft_set_plan(5);
|
||||
|
||||
putstr("# PID: ");
|
||||
putnum(getpid());
|
||||
putstr("\n");
|
||||
ksft_print_msg("PID: %d\n", getpid());
|
||||
|
||||
/*
|
||||
* This test is run with nolibc which doesn't support hwcap and
|
||||
@@ -290,23 +226,21 @@ int main(int argc, char **argv)
|
||||
*/
|
||||
ret = open("/proc/sys/abi/sme_default_vector_length", O_RDONLY, 0);
|
||||
if (ret >= 0) {
|
||||
run_test(default_value);
|
||||
run_test(write_read);
|
||||
run_test(write_sleep_read);
|
||||
run_test(write_fork_read);
|
||||
run_test(write_clone_read);
|
||||
ksft_test_result(default_value(), "default_value\n");
|
||||
ksft_test_result(write_read, "write_read\n");
|
||||
ksft_test_result(write_sleep_read, "write_sleep_read\n");
|
||||
ksft_test_result(write_fork_read, "write_fork_read\n");
|
||||
ksft_test_result(write_clone_read, "write_clone_read\n");
|
||||
|
||||
} else {
|
||||
putstr("# SME support not present\n");
|
||||
ksft_print_msg("SME support not present\n");
|
||||
|
||||
skip_test(default_value);
|
||||
skip_test(write_read);
|
||||
skip_test(write_sleep_read);
|
||||
skip_test(write_fork_read);
|
||||
skip_test(write_clone_read);
|
||||
ksft_test_result_skip("default_value\n");
|
||||
ksft_test_result_skip("write_read\n");
|
||||
ksft_test_result_skip("write_sleep_read\n");
|
||||
ksft_test_result_skip("write_fork_read\n");
|
||||
ksft_test_result_skip("write_clone_read\n");
|
||||
}
|
||||
|
||||
print_summary();
|
||||
|
||||
return 0;
|
||||
ksft_finished();
|
||||
}
|
||||
|
||||
@@ -1061,11 +1061,31 @@ static bool sve_write_supported(struct test_config *config)
|
||||
if (config->sme_vl_in != config->sme_vl_expected) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!sve_supported())
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool sve_write_fpsimd_supported(struct test_config *config)
|
||||
{
|
||||
if (!sve_supported())
|
||||
return false;
|
||||
|
||||
if ((config->svcr_in & SVCR_ZA) != (config->svcr_expected & SVCR_ZA))
|
||||
return false;
|
||||
|
||||
if (config->svcr_expected & SVCR_SM)
|
||||
return false;
|
||||
|
||||
if (config->sme_vl_in != config->sme_vl_expected)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void fpsimd_write_expected(struct test_config *config)
|
||||
{
|
||||
int vl;
|
||||
@@ -1134,6 +1154,9 @@ static void sve_write_expected(struct test_config *config)
|
||||
int vl = vl_expected(config);
|
||||
int sme_vq = __sve_vq_from_vl(config->sme_vl_expected);
|
||||
|
||||
if (!vl)
|
||||
return;
|
||||
|
||||
fill_random(z_expected, __SVE_ZREGS_SIZE(__sve_vq_from_vl(vl)));
|
||||
fill_random(p_expected, __SVE_PREGS_SIZE(__sve_vq_from_vl(vl)));
|
||||
|
||||
@@ -1152,7 +1175,7 @@ static void sve_write_expected(struct test_config *config)
|
||||
}
|
||||
}
|
||||
|
||||
static void sve_write(pid_t child, struct test_config *config)
|
||||
static void sve_write_sve(pid_t child, struct test_config *config)
|
||||
{
|
||||
struct user_sve_header *sve;
|
||||
struct iovec iov;
|
||||
@@ -1161,6 +1184,9 @@ static void sve_write(pid_t child, struct test_config *config)
|
||||
vl = vl_expected(config);
|
||||
vq = __sve_vq_from_vl(vl);
|
||||
|
||||
if (!vl)
|
||||
return;
|
||||
|
||||
iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE);
|
||||
iov.iov_base = malloc(iov.iov_len);
|
||||
if (!iov.iov_base) {
|
||||
@@ -1195,6 +1221,45 @@ static void sve_write(pid_t child, struct test_config *config)
|
||||
free(iov.iov_base);
|
||||
}
|
||||
|
||||
static void sve_write_fpsimd(pid_t child, struct test_config *config)
|
||||
{
|
||||
struct user_sve_header *sve;
|
||||
struct user_fpsimd_state *fpsimd;
|
||||
struct iovec iov;
|
||||
int ret, vl, vq;
|
||||
|
||||
vl = vl_expected(config);
|
||||
vq = __sve_vq_from_vl(vl);
|
||||
|
||||
if (!vl)
|
||||
return;
|
||||
|
||||
iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq,
|
||||
SVE_PT_REGS_FPSIMD);
|
||||
iov.iov_base = malloc(iov.iov_len);
|
||||
if (!iov.iov_base) {
|
||||
ksft_print_msg("Failed allocating %lu byte SVE write buffer\n",
|
||||
iov.iov_len);
|
||||
return;
|
||||
}
|
||||
memset(iov.iov_base, 0, iov.iov_len);
|
||||
|
||||
sve = iov.iov_base;
|
||||
sve->size = iov.iov_len;
|
||||
sve->flags = SVE_PT_REGS_FPSIMD;
|
||||
sve->vl = vl;
|
||||
|
||||
fpsimd = iov.iov_base + SVE_PT_REGS_OFFSET;
|
||||
memcpy(&fpsimd->vregs, v_expected, sizeof(v_expected));
|
||||
|
||||
ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_SVE, &iov);
|
||||
if (ret != 0)
|
||||
ksft_print_msg("Failed to write SVE: %s (%d)\n",
|
||||
strerror(errno), errno);
|
||||
|
||||
free(iov.iov_base);
|
||||
}
|
||||
|
||||
static bool za_write_supported(struct test_config *config)
|
||||
{
|
||||
if ((config->svcr_in & SVCR_SM) != (config->svcr_expected & SVCR_SM))
|
||||
@@ -1386,7 +1451,13 @@ static struct test_definition sve_test_defs[] = {
|
||||
.name = "SVE write",
|
||||
.supported = sve_write_supported,
|
||||
.set_expected_values = sve_write_expected,
|
||||
.modify_values = sve_write,
|
||||
.modify_values = sve_write_sve,
|
||||
},
|
||||
{
|
||||
.name = "SVE write FPSIMD format",
|
||||
.supported = sve_write_fpsimd_supported,
|
||||
.set_expected_values = fpsimd_write_expected,
|
||||
.modify_values = sve_write_fpsimd,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1607,7 +1678,7 @@ int main(void)
|
||||
* Run the test set if there is no SVE or SME, with those we
|
||||
* have to pick a VL for each run.
|
||||
*/
|
||||
if (!sve_supported()) {
|
||||
if (!sve_supported() && !sme_supported()) {
|
||||
test_config.sve_vl_in = 0;
|
||||
test_config.sve_vl_expected = 0;
|
||||
test_config.sme_vl_in = 0;
|
||||
|
||||
@@ -170,7 +170,7 @@ static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type)
|
||||
memset(&sve, 0, sizeof(sve));
|
||||
sve.size = sizeof(sve);
|
||||
sve.vl = sve_vl_from_vq(SVE_VQ_MIN);
|
||||
sve.flags = SVE_PT_VL_INHERIT;
|
||||
sve.flags = SVE_PT_VL_INHERIT | SVE_PT_REGS_SVE;
|
||||
ret = set_sve(child, type, &sve);
|
||||
if (ret != 0) {
|
||||
ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n",
|
||||
@@ -235,6 +235,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
|
||||
/* Set the VL by doing a set with no register payload */
|
||||
memset(&sve, 0, sizeof(sve));
|
||||
sve.size = sizeof(sve);
|
||||
sve.flags = SVE_PT_REGS_SVE;
|
||||
sve.vl = vl;
|
||||
ret = set_sve(child, type, &sve);
|
||||
if (ret != 0) {
|
||||
@@ -253,7 +254,7 @@ static void ptrace_set_get_vl(pid_t child, const struct vec_type *type,
|
||||
return;
|
||||
}
|
||||
|
||||
ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n",
|
||||
ksft_test_result(new_sve->vl == prctl_vl, "Set %s VL %u\n",
|
||||
type->name, vl);
|
||||
|
||||
free(new_sve);
|
||||
@@ -301,8 +302,10 @@ static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type)
|
||||
p[j] = j;
|
||||
}
|
||||
|
||||
/* This should only succeed for SVE */
|
||||
ret = set_sve(child, type, sve);
|
||||
ksft_test_result(ret == 0, "%s FPSIMD set via SVE: %d\n",
|
||||
ksft_test_result((type->regset == NT_ARM_SVE) == (ret == 0),
|
||||
"%s FPSIMD set via SVE: %d\n",
|
||||
type->name, ret);
|
||||
if (ret)
|
||||
goto out;
|
||||
@@ -750,9 +753,6 @@ int main(void)
|
||||
ksft_print_header();
|
||||
ksft_set_plan(EXPECTED_TESTS);
|
||||
|
||||
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
|
||||
ksft_exit_skip("SVE not available\n");
|
||||
|
||||
child = fork();
|
||||
if (!child)
|
||||
return do_child();
|
||||
|
||||
@@ -415,7 +415,7 @@ int main(int argc, char *argv[])
|
||||
return err;
|
||||
|
||||
/* Register SIGSEGV handler */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler, false);
|
||||
|
||||
/* Set test plan */
|
||||
ksft_set_plan(20);
|
||||
|
||||
@@ -160,8 +160,8 @@ int main(int argc, char *argv[])
|
||||
return err;
|
||||
|
||||
/* Register SIGSEGV handler */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
mte_register_signal(SIGBUS, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler, false);
|
||||
mte_register_signal(SIGBUS, mte_default_handler, false);
|
||||
|
||||
/* Set test plan */
|
||||
ksft_set_plan(12);
|
||||
|
||||
@@ -235,8 +235,8 @@ int main(int argc, char *argv[])
|
||||
return err;
|
||||
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGBUS, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
mte_register_signal(SIGBUS, mte_default_handler, false);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler, false);
|
||||
|
||||
allocate_hugetlb();
|
||||
|
||||
|
||||
@@ -141,8 +141,8 @@ int main(int argc, char *argv[])
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGBUS, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
mte_register_signal(SIGBUS, mte_default_handler, false);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler, false);
|
||||
|
||||
/* Set test plan */
|
||||
ksft_set_plan(4);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#define _GNU_SOURCE
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
@@ -23,6 +24,26 @@
|
||||
#define OVERFLOW MT_GRANULE_SIZE
|
||||
#define TAG_CHECK_ON 0
|
||||
#define TAG_CHECK_OFF 1
|
||||
#define ATAG_CHECK_ON 1
|
||||
#define ATAG_CHECK_OFF 0
|
||||
|
||||
#define TEST_NAME_MAX 256
|
||||
|
||||
enum mte_mem_check_type {
|
||||
CHECK_ANON_MEM = 0,
|
||||
CHECK_FILE_MEM = 1,
|
||||
CHECK_CLEAR_PROT_MTE = 2,
|
||||
};
|
||||
|
||||
struct check_mmap_testcase {
|
||||
int check_type;
|
||||
int mem_type;
|
||||
int mte_sync;
|
||||
int mapping;
|
||||
int tag_check;
|
||||
int atag_check;
|
||||
bool enable_tco;
|
||||
};
|
||||
|
||||
static size_t page_size;
|
||||
static int sizes[] = {
|
||||
@@ -30,8 +51,14 @@ static int sizes[] = {
|
||||
/* page size - 1*/ 0, /* page_size */ 0, /* page size + 1 */ 0
|
||||
};
|
||||
|
||||
static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
|
||||
static int check_mte_memory(char *ptr, int size, int mode, int tag_check, int atag_check)
|
||||
{
|
||||
if (!mtefar_support && atag_check == ATAG_CHECK_ON)
|
||||
return KSFT_SKIP;
|
||||
|
||||
if (atag_check == ATAG_CHECK_ON)
|
||||
ptr = mte_insert_atag(ptr);
|
||||
|
||||
mte_initialize_current_context(mode, (uintptr_t)ptr, size);
|
||||
memset(ptr, '1', size);
|
||||
mte_wait_after_trig();
|
||||
@@ -57,7 +84,7 @@ static int check_mte_memory(char *ptr, int size, int mode, int tag_check)
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
|
||||
static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, int tag_check, int atag_check)
|
||||
{
|
||||
char *ptr, *map_ptr;
|
||||
int run, result, map_size;
|
||||
@@ -79,16 +106,16 @@ static int check_anonymous_memory_mapping(int mem_type, int mode, int mapping, i
|
||||
munmap((void *)map_ptr, map_size);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, tag_check);
|
||||
result = check_mte_memory(ptr, sizes[run], mode, tag_check, atag_check);
|
||||
mte_clear_tags((void *)ptr, sizes[run]);
|
||||
mte_free_memory((void *)map_ptr, map_size, mem_type, false);
|
||||
if (result == KSFT_FAIL)
|
||||
return KSFT_FAIL;
|
||||
if (result != KSFT_PASS)
|
||||
return result;
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check)
|
||||
static int check_file_memory_mapping(int mem_type, int mode, int mapping, int tag_check, int atag_check)
|
||||
{
|
||||
char *ptr, *map_ptr;
|
||||
int run, fd, map_size;
|
||||
@@ -117,17 +144,17 @@ static int check_file_memory_mapping(int mem_type, int mode, int mapping, int ta
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, tag_check);
|
||||
result = check_mte_memory(ptr, sizes[run], mode, tag_check, atag_check);
|
||||
mte_clear_tags((void *)ptr, sizes[run]);
|
||||
munmap((void *)map_ptr, map_size);
|
||||
close(fd);
|
||||
if (result == KSFT_FAIL)
|
||||
break;
|
||||
if (result != KSFT_PASS)
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
|
||||
static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping, int atag_check)
|
||||
{
|
||||
char *ptr, *map_ptr;
|
||||
int run, prot_flag, result, fd, map_size;
|
||||
@@ -150,7 +177,7 @@ static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
|
||||
ksft_print_msg("FAIL: mprotect not ignoring clear PROT_MTE property\n");
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
|
||||
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON, atag_check);
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
|
||||
if (result != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
@@ -174,19 +201,414 @@ static int check_clear_prot_mte_flag(int mem_type, int mode, int mapping)
|
||||
close(fd);
|
||||
return KSFT_FAIL;
|
||||
}
|
||||
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON);
|
||||
result = check_mte_memory(ptr, sizes[run], mode, TAG_CHECK_ON, atag_check);
|
||||
mte_free_memory_tag_range((void *)ptr, sizes[run], mem_type, UNDERFLOW, OVERFLOW);
|
||||
close(fd);
|
||||
if (result != KSFT_PASS)
|
||||
return KSFT_FAIL;
|
||||
return result;
|
||||
}
|
||||
return KSFT_PASS;
|
||||
}
|
||||
|
||||
const char *format_test_name(struct check_mmap_testcase *tc)
|
||||
{
|
||||
static char test_name[TEST_NAME_MAX];
|
||||
const char *check_type_str;
|
||||
const char *mem_type_str;
|
||||
const char *sync_str;
|
||||
const char *mapping_str;
|
||||
const char *tag_check_str;
|
||||
const char *atag_check_str;
|
||||
|
||||
switch (tc->check_type) {
|
||||
case CHECK_ANON_MEM:
|
||||
check_type_str = "anonymous memory";
|
||||
break;
|
||||
case CHECK_FILE_MEM:
|
||||
check_type_str = "file memory";
|
||||
break;
|
||||
case CHECK_CLEAR_PROT_MTE:
|
||||
check_type_str = "clear PROT_MTE flags";
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (tc->mem_type) {
|
||||
case USE_MMAP:
|
||||
mem_type_str = "mmap";
|
||||
break;
|
||||
case USE_MPROTECT:
|
||||
mem_type_str = "mmap/mprotect";
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (tc->mte_sync) {
|
||||
case MTE_NONE_ERR:
|
||||
sync_str = "no error";
|
||||
break;
|
||||
case MTE_SYNC_ERR:
|
||||
sync_str = "sync error";
|
||||
break;
|
||||
case MTE_ASYNC_ERR:
|
||||
sync_str = "async error";
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (tc->mapping) {
|
||||
case MAP_SHARED:
|
||||
mapping_str = "shared";
|
||||
break;
|
||||
case MAP_PRIVATE:
|
||||
mapping_str = "private";
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (tc->tag_check) {
|
||||
case TAG_CHECK_ON:
|
||||
tag_check_str = "tag check on";
|
||||
break;
|
||||
case TAG_CHECK_OFF:
|
||||
tag_check_str = "tag check off";
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (tc->atag_check) {
|
||||
case ATAG_CHECK_ON:
|
||||
atag_check_str = "with address tag [63:60]";
|
||||
break;
|
||||
case ATAG_CHECK_OFF:
|
||||
atag_check_str = "without address tag [63:60]";
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
|
||||
snprintf(test_name, sizeof(test_name),
|
||||
"Check %s with %s mapping, %s mode, %s memory and %s (%s)\n",
|
||||
check_type_str, mapping_str, sync_str, mem_type_str,
|
||||
tag_check_str, atag_check_str);
|
||||
|
||||
return test_name;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int err;
|
||||
int err, i;
|
||||
int item = ARRAY_SIZE(sizes);
|
||||
struct check_mmap_testcase test_cases[]= {
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_OFF,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = true,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_OFF,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = true,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_NONE_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_OFF,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_NONE_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_OFF,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_CLEAR_PROT_MTE,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_CLEAR_PROT_MTE,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_OFF,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_ANON_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_SHARED,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_FILE_MEM,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_ASYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_CLEAR_PROT_MTE,
|
||||
.mem_type = USE_MMAP,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
{
|
||||
.check_type = CHECK_CLEAR_PROT_MTE,
|
||||
.mem_type = USE_MPROTECT,
|
||||
.mte_sync = MTE_SYNC_ERR,
|
||||
.mapping = MAP_PRIVATE,
|
||||
.tag_check = TAG_CHECK_ON,
|
||||
.atag_check = ATAG_CHECK_ON,
|
||||
.enable_tco = false,
|
||||
},
|
||||
};
|
||||
|
||||
err = mte_default_setup();
|
||||
if (err)
|
||||
@@ -200,64 +622,49 @@ int main(int argc, char *argv[])
|
||||
sizes[item - 2] = page_size;
|
||||
sizes[item - 1] = page_size + 1;
|
||||
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGBUS, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
|
||||
/* Set test plan */
|
||||
ksft_set_plan(22);
|
||||
ksft_set_plan(ARRAY_SIZE(test_cases));
|
||||
|
||||
mte_enable_pstate_tco();
|
||||
for (i = 0 ; i < ARRAY_SIZE(test_cases); i++) {
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGBUS, mte_default_handler,
|
||||
test_cases[i].atag_check == ATAG_CHECK_ON);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler,
|
||||
test_cases[i].atag_check == ATAG_CHECK_ON);
|
||||
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check anonymous memory with private mapping, sync error mode, mmap memory and tag check off\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check off\n");
|
||||
if (test_cases[i].enable_tco)
|
||||
mte_enable_pstate_tco();
|
||||
else
|
||||
mte_disable_pstate_tco();
|
||||
|
||||
mte_disable_pstate_tco();
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check anonymous memory with private mapping, no error mode, mmap memory and tag check off\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_NONE_ERR, MAP_PRIVATE, TAG_CHECK_OFF),
|
||||
"Check file memory with private mapping, no error mode, mmap/mprotect memory and tag check off\n");
|
||||
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check anonymous memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_anonymous_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check anonymous memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, sync error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, sync error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE, TAG_CHECK_ON),
|
||||
"Check file memory with private mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, async error mode, mmap memory and tag check on\n");
|
||||
evaluate_test(check_file_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_SHARED, TAG_CHECK_ON),
|
||||
"Check file memory with shared mapping, async error mode, mmap/mprotect memory and tag check on\n");
|
||||
|
||||
evaluate_test(check_clear_prot_mte_flag(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check clear PROT_MTE flags with private mapping, sync error mode and mmap memory\n");
|
||||
evaluate_test(check_clear_prot_mte_flag(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE),
|
||||
"Check clear PROT_MTE flags with private mapping and sync error mode and mmap/mprotect memory\n");
|
||||
switch (test_cases[i].check_type) {
|
||||
case CHECK_ANON_MEM:
|
||||
evaluate_test(check_anonymous_memory_mapping(test_cases[i].mem_type,
|
||||
test_cases[i].mte_sync,
|
||||
test_cases[i].mapping,
|
||||
test_cases[i].tag_check,
|
||||
test_cases[i].atag_check),
|
||||
format_test_name(&test_cases[i]));
|
||||
break;
|
||||
case CHECK_FILE_MEM:
|
||||
evaluate_test(check_file_memory_mapping(test_cases[i].mem_type,
|
||||
test_cases[i].mte_sync,
|
||||
test_cases[i].mapping,
|
||||
test_cases[i].tag_check,
|
||||
test_cases[i].atag_check),
|
||||
format_test_name(&test_cases[i]));
|
||||
break;
|
||||
case CHECK_CLEAR_PROT_MTE:
|
||||
evaluate_test(check_clear_prot_mte_flag(test_cases[i].mem_type,
|
||||
test_cases[i].mte_sync,
|
||||
test_cases[i].mapping,
|
||||
test_cases[i].atag_check),
|
||||
format_test_name(&test_cases[i]));
|
||||
break;
|
||||
default:
|
||||
exit(KSFT_FAIL);
|
||||
}
|
||||
}
|
||||
|
||||
mte_restore_setup();
|
||||
ksft_print_cnts();
|
||||
|
||||
@@ -12,6 +12,10 @@
|
||||
|
||||
#include "kselftest.h"
|
||||
|
||||
#ifndef AT_HWCAP3
|
||||
#define AT_HWCAP3 29
|
||||
#endif
|
||||
|
||||
static int set_tagged_addr_ctrl(int val)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -180,7 +180,7 @@ int main(int argc, char *argv[])
|
||||
return err;
|
||||
|
||||
/* Register SIGSEGV handler */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler, false);
|
||||
|
||||
/* Set test plan */
|
||||
ksft_set_plan(4);
|
||||
|
||||
@@ -211,7 +211,7 @@ int main(int argc, char *argv[])
|
||||
return err;
|
||||
|
||||
/* Register signal handlers */
|
||||
mte_register_signal(SIGSEGV, mte_default_handler);
|
||||
mte_register_signal(SIGSEGV, mte_default_handler, false);
|
||||
|
||||
/* Set test plan */
|
||||
ksft_set_plan(64);
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <linux/auxvec.h>
|
||||
@@ -19,20 +20,38 @@
|
||||
#include "mte_common_util.h"
|
||||
#include "mte_def.h"
|
||||
|
||||
#ifndef SA_EXPOSE_TAGBITS
|
||||
#define SA_EXPOSE_TAGBITS 0x00000800
|
||||
#endif
|
||||
|
||||
#define INIT_BUFFER_SIZE 256
|
||||
|
||||
struct mte_fault_cxt cur_mte_cxt;
|
||||
bool mtefar_support;
|
||||
static unsigned int mte_cur_mode;
|
||||
static unsigned int mte_cur_pstate_tco;
|
||||
|
||||
void mte_default_handler(int signum, siginfo_t *si, void *uc)
|
||||
{
|
||||
struct sigaction sa;
|
||||
unsigned long addr = (unsigned long)si->si_addr;
|
||||
unsigned char si_tag, si_atag;
|
||||
|
||||
sigaction(signum, NULL, &sa);
|
||||
|
||||
if (sa.sa_flags & SA_EXPOSE_TAGBITS) {
|
||||
si_tag = MT_FETCH_TAG(addr);
|
||||
si_atag = MT_FETCH_ATAG(addr);
|
||||
addr = MT_CLEAR_TAGS(addr);
|
||||
} else {
|
||||
si_tag = 0;
|
||||
si_atag = 0;
|
||||
}
|
||||
|
||||
if (signum == SIGSEGV) {
|
||||
#ifdef DEBUG
|
||||
ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx\n",
|
||||
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
|
||||
ksft_print_msg("INFO: SIGSEGV signal at pc=%lx, fault addr=%lx, si_code=%lx, si_tag=%x, si_atag=%x\n",
|
||||
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code, si_tag, si_atag);
|
||||
#endif
|
||||
if (si->si_code == SEGV_MTEAERR) {
|
||||
if (cur_mte_cxt.trig_si_code == si->si_code)
|
||||
@@ -45,13 +64,18 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
|
||||
}
|
||||
/* Compare the context for precise error */
|
||||
else if (si->si_code == SEGV_MTESERR) {
|
||||
if ((!mtefar_support && si_atag) || (si_atag != MT_FETCH_ATAG(cur_mte_cxt.trig_addr))) {
|
||||
ksft_print_msg("Invalid MTE synchronous exception caught for address tag! si_tag=%x, si_atag: %x\n", si_tag, si_atag);
|
||||
exit(KSFT_FAIL);
|
||||
}
|
||||
|
||||
if (cur_mte_cxt.trig_si_code == si->si_code &&
|
||||
((cur_mte_cxt.trig_range >= 0 &&
|
||||
addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
|
||||
addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
|
||||
addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
|
||||
(cur_mte_cxt.trig_range < 0 &&
|
||||
addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
|
||||
addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
|
||||
addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)))) {
|
||||
cur_mte_cxt.fault_valid = true;
|
||||
/* Adjust the pc by 4 */
|
||||
((ucontext_t *)uc)->uc_mcontext.pc += 4;
|
||||
@@ -67,11 +91,11 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
|
||||
ksft_print_msg("INFO: SIGBUS signal at pc=%llx, fault addr=%lx, si_code=%x\n",
|
||||
((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code);
|
||||
if ((cur_mte_cxt.trig_range >= 0 &&
|
||||
addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr <= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
|
||||
addr >= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
|
||||
addr <= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range)) ||
|
||||
(cur_mte_cxt.trig_range < 0 &&
|
||||
addr <= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) &&
|
||||
addr >= (MT_CLEAR_TAG(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
|
||||
addr <= MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) &&
|
||||
addr >= (MT_CLEAR_TAGS(cur_mte_cxt.trig_addr) + cur_mte_cxt.trig_range))) {
|
||||
cur_mte_cxt.fault_valid = true;
|
||||
/* Adjust the pc by 4 */
|
||||
((ucontext_t *)uc)->uc_mcontext.pc += 4;
|
||||
@@ -79,12 +103,17 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc)
|
||||
}
|
||||
}
|
||||
|
||||
void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *))
|
||||
void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *),
|
||||
bool export_tags)
|
||||
{
|
||||
struct sigaction sa;
|
||||
|
||||
sa.sa_sigaction = handler;
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
|
||||
if (export_tags && signal == SIGSEGV)
|
||||
sa.sa_flags |= SA_EXPOSE_TAGBITS;
|
||||
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sigaction(signal, &sa, NULL);
|
||||
}
|
||||
@@ -120,6 +149,19 @@ void mte_clear_tags(void *ptr, size_t size)
|
||||
mte_clear_tag_address_range(ptr, size);
|
||||
}
|
||||
|
||||
void *mte_insert_atag(void *ptr)
|
||||
{
|
||||
unsigned char atag;
|
||||
|
||||
atag = mtefar_support ? (random() % MT_ATAG_MASK) + 1 : 0;
|
||||
return (void *)MT_SET_ATAG((unsigned long)ptr, atag);
|
||||
}
|
||||
|
||||
void *mte_clear_atag(void *ptr)
|
||||
{
|
||||
return (void *)MT_CLEAR_ATAG((unsigned long)ptr);
|
||||
}
|
||||
|
||||
static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping,
|
||||
size_t range_before, size_t range_after,
|
||||
bool tags, int fd)
|
||||
@@ -316,12 +358,18 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
|
||||
int mte_default_setup(void)
|
||||
{
|
||||
unsigned long hwcaps2 = getauxval(AT_HWCAP2);
|
||||
unsigned long hwcaps3 = getauxval(AT_HWCAP3);
|
||||
unsigned long en = 0;
|
||||
int ret;
|
||||
|
||||
/* To generate random address tag */
|
||||
srandom(time(NULL));
|
||||
|
||||
if (!(hwcaps2 & HWCAP2_MTE))
|
||||
ksft_exit_skip("MTE features unavailable\n");
|
||||
|
||||
mtefar_support = !!(hwcaps3 & HWCAP3_MTE_FAR);
|
||||
|
||||
/* Get current mte mode */
|
||||
ret = prctl(PR_GET_TAGGED_ADDR_CTRL, en, 0, 0, 0);
|
||||
if (ret < 0) {
|
||||
|
||||
@@ -37,10 +37,12 @@ struct mte_fault_cxt {
|
||||
};
|
||||
|
||||
extern struct mte_fault_cxt cur_mte_cxt;
|
||||
extern bool mtefar_support;
|
||||
|
||||
/* MTE utility functions */
|
||||
void mte_default_handler(int signum, siginfo_t *si, void *uc);
|
||||
void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *));
|
||||
void mte_register_signal(int signal, void (*handler)(int, siginfo_t *, void *),
|
||||
bool export_tags);
|
||||
void mte_wait_after_trig(void);
|
||||
void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags);
|
||||
void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping,
|
||||
@@ -54,6 +56,8 @@ void mte_free_memory_tag_range(void *ptr, size_t size, int mem_type,
|
||||
size_t range_before, size_t range_after);
|
||||
void *mte_insert_tags(void *ptr, size_t size);
|
||||
void mte_clear_tags(void *ptr, size_t size);
|
||||
void *mte_insert_atag(void *ptr);
|
||||
void *mte_clear_atag(void *ptr);
|
||||
int mte_default_setup(void);
|
||||
void mte_restore_setup(void);
|
||||
int mte_switch_mode(int mte_option, unsigned long incl_mask);
|
||||
|
||||
@@ -42,6 +42,8 @@
|
||||
#define MT_TAG_COUNT 16
|
||||
#define MT_INCLUDE_TAG_MASK 0xFFFF
|
||||
#define MT_EXCLUDE_TAG_MASK 0x0
|
||||
#define MT_ATAG_SHIFT 60
|
||||
#define MT_ATAG_MASK 0xFUL
|
||||
|
||||
#define MT_ALIGN_GRANULE (MT_GRANULE_SIZE - 1)
|
||||
#define MT_CLEAR_TAG(x) ((x) & ~(MT_TAG_MASK << MT_TAG_SHIFT))
|
||||
@@ -49,6 +51,12 @@
|
||||
#define MT_FETCH_TAG(x) ((x >> MT_TAG_SHIFT) & (MT_TAG_MASK))
|
||||
#define MT_ALIGN_UP(x) ((x + MT_ALIGN_GRANULE) & ~(MT_ALIGN_GRANULE))
|
||||
|
||||
#define MT_CLEAR_ATAG(x) ((x) & ~(MT_TAG_MASK << MT_ATAG_SHIFT))
|
||||
#define MT_SET_ATAG(x, y) ((x) | (((y) & MT_ATAG_MASK) << MT_ATAG_SHIFT))
|
||||
#define MT_FETCH_ATAG(x) ((x >> MT_ATAG_SHIFT) & (MT_ATAG_MASK))
|
||||
|
||||
#define MT_CLEAR_TAGS(x) (MT_CLEAR_ATAG(MT_CLEAR_TAG(x)))
|
||||
|
||||
#define MT_PSTATE_TCO_SHIFT 25
|
||||
#define MT_PSTATE_TCO_MASK ~(0x1 << MT_PSTATE_TCO_SHIFT)
|
||||
#define MT_PSTATE_TCO_EN 1
|
||||
|
||||
@@ -140,7 +140,7 @@ static void enable_os_lock(void)
|
||||
|
||||
static void enable_monitor_debug_exceptions(void)
|
||||
{
|
||||
uint32_t mdscr;
|
||||
uint64_t mdscr;
|
||||
|
||||
asm volatile("msr daifclr, #8");
|
||||
|
||||
@@ -223,7 +223,7 @@ void install_hw_bp_ctx(uint8_t addr_bp, uint8_t ctx_bp, uint64_t addr,
|
||||
|
||||
static void install_ss(void)
|
||||
{
|
||||
uint32_t mdscr;
|
||||
uint64_t mdscr;
|
||||
|
||||
asm volatile("msr daifclr, #8");
|
||||
|
||||
|
||||
Reference in New Issue
Block a user