Files
linux/arch/arm64/include/asm/exception.h
Jinjie Ruan b3cf07851b arm64: entry: Switch to generic IRQ entry
Currently, x86, Riscv and Loongarch use the generic entry code, which
makes maintainer's work easier and code more elegant. Start converting
arm64 to use the generic entry infrastructure from kernel/entry/* by
switching it to generic IRQ entry, which removes 100+ lines of duplicate
code. arm64 will completely switch to generic entry in a later series.

The changes are below:
 - Remove *enter_from/exit_to_kernel_mode(), and wrap with generic
   irqentry_enter/exit() as their code and functionality are almost
   identical.

 - Define ARCH_EXIT_TO_USER_MODE_WORK and implement
   arch_exit_to_user_mode_work() to check arm64-specific thread flags
   "_TIF_MTE_ASYNC_FAULT" and "_TIF_FOREIGN_FPSTATE".
   So also remove *enter_from/exit_to_user_mode(), and wrap with
   generic enter_from/exit_to_user_mode() because they are
   exactly the same.

 - Remove arm64_enter/exit_nmi() and use generic irqentry_nmi_enter/exit()
   because they're exactly the same, so the temporary arm64 version
   irqentry_state can also be removed.

 - Remove PREEMPT_DYNAMIC code, as generic irqentry_exit_cond_resched()
   has the same functionality.

 - Implement arch_irqentry_exit_need_resched() with
   arm64_preempt_schedule_irq() for arm64 which will allow arm64 to do
   its architecture specific checks.

Tested-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Suggested-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
2025-09-11 15:55:35 +01:00

95 lines
3.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Based on arch/arm/include/asm/exception.h
*
* Copyright (C) 2012 ARM Ltd.
*/
#ifndef __ASM_EXCEPTION_H
#define __ASM_EXCEPTION_H
#include <asm/esr.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h>
#define __exception_irq_entry __irq_entry
static inline unsigned long disr_to_esr(u64 disr)
{
unsigned long esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
if ((disr & DISR_EL1_IDS) == 0)
esr |= (disr & DISR_EL1_ESR_MASK);
else
esr |= (disr & ESR_ELx_ISS_MASK);
return esr;
}
asmlinkage void __noreturn handle_bad_stack(struct pt_regs *regs);
asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *));
asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs);
void do_el0_undef(struct pt_regs *regs, unsigned long esr);
void do_el1_undef(struct pt_regs *regs, unsigned long esr);
void do_el0_bti(struct pt_regs *regs);
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
void do_el0_gcs(struct pt_regs *regs, unsigned long esr);
void do_el1_gcs(struct pt_regs *regs, unsigned long esr);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void do_breakpoint(unsigned long esr, struct pt_regs *regs);
void do_watchpoint(unsigned long addr, unsigned long esr,
struct pt_regs *regs);
#else
static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {}
static inline void do_watchpoint(unsigned long addr, unsigned long esr,
struct pt_regs *regs) {}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
void do_el0_softstep(unsigned long esr, struct pt_regs *regs);
void do_el1_softstep(unsigned long esr, struct pt_regs *regs);
void do_el0_brk64(unsigned long esr, struct pt_regs *regs);
void do_el1_brk64(unsigned long esr, struct pt_regs *regs);
void do_bkpt32(unsigned long esr, struct pt_regs *regs);
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
void do_sve_acc(unsigned long esr, struct pt_regs *regs);
void do_sme_acc(unsigned long esr, struct pt_regs *regs);
void do_fpsimd_exc(unsigned long esr, struct pt_regs *regs);
void do_el0_sys(unsigned long esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs);
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
void do_el0_cp15(unsigned long esr, struct pt_regs *regs);
int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_el0_mops(struct pt_regs *regs, unsigned long esr);
void do_el1_mops(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */