Files
linux/arch/arm64/include/asm/preempt.h
Jinjie Ruan b3cf07851b arm64: entry: Switch to generic IRQ entry
Currently, x86, Riscv and Loongarch use the generic entry code, which
makes maintainer's work easier and code more elegant. Start converting
arm64 to use the generic entry infrastructure from kernel/entry/* by
switching it to generic IRQ entry, which removes 100+ lines of duplicate
code. arm64 will completely switch to generic entry in a later series.

The changes are below:
 - Remove *enter_from/exit_to_kernel_mode(), and wrap with generic
   irqentry_enter/exit() as their code and functionality are almost
   identical.

 - Define ARCH_EXIT_TO_USER_MODE_WORK and implement
   arch_exit_to_user_mode_work() to check arm64-specific thread flags
   "_TIF_MTE_ASYNC_FAULT" and "_TIF_FOREIGN_FPSTATE".
   So also remove *enter_from/exit_to_user_mode(), and wrap with
   generic enter_from/exit_to_user_mode() because they are
   exactly the same.

 - Remove arm64_enter/exit_nmi() and use generic irqentry_nmi_enter/exit()
   because they're exactly the same, so the temporary arm64 version
   irqentry_state can also be removed.

 - Remove PREEMPT_DYNAMIC code, as generic irqentry_exit_cond_resched()
   has the same functionality.

 - Implement arch_irqentry_exit_need_resched() with
   arm64_preempt_schedule_irq() for arm64 which will allow arm64 to do
   its architecture specific checks.

Tested-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Suggested-by: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
2025-09-11 15:55:35 +01:00

103 lines
2.6 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_PREEMPT_H
#define __ASM_PREEMPT_H
#include <linux/thread_info.h>
#define PREEMPT_NEED_RESCHED BIT(32)
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
static inline int preempt_count(void)
{
return READ_ONCE(current_thread_info()->preempt.count);
}
static inline void preempt_count_set(u64 pc)
{
/* Preserve existing value of PREEMPT_NEED_RESCHED */
WRITE_ONCE(current_thread_info()->preempt.count, pc);
}
#define init_task_preempt_count(p) do { \
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
{
current_thread_info()->preempt.need_resched = 0;
}
static inline void clear_preempt_need_resched(void)
{
current_thread_info()->preempt.need_resched = 1;
}
static inline bool test_preempt_need_resched(void)
{
return !current_thread_info()->preempt.need_resched;
}
static inline void __preempt_count_add(int val)
{
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
pc += val;
WRITE_ONCE(current_thread_info()->preempt.count, pc);
}
static inline void __preempt_count_sub(int val)
{
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
pc -= val;
WRITE_ONCE(current_thread_info()->preempt.count, pc);
}
static inline bool __preempt_count_dec_and_test(void)
{
struct thread_info *ti = current_thread_info();
u64 pc = READ_ONCE(ti->preempt_count);
/* Update only the count field, leaving need_resched unchanged */
WRITE_ONCE(ti->preempt.count, --pc);
/*
* If we wrote back all zeroes, then we're preemptible and in
* need of a reschedule. Otherwise, we need to reload the
* preempt_count in case the need_resched flag was cleared by an
* interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
* pair.
*/
return !pc || !READ_ONCE(ti->preempt_count);
}
static inline bool should_resched(int preempt_offset)
{
u64 pc = READ_ONCE(current_thread_info()->preempt_count);
return pc == preempt_offset;
}
#ifdef CONFIG_PREEMPTION
void preempt_schedule(void);
void preempt_schedule_notrace(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
void dynamic_preempt_schedule(void);
#define __preempt_schedule() dynamic_preempt_schedule()
void dynamic_preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
#else /* CONFIG_PREEMPT_DYNAMIC */
#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT_DYNAMIC */
#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */