mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 11:06:41 -05:00
x86/percpu: Move top_of_stack to percpu hot section
No functional change. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Uros Bizjak <ubizjak@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/20250303165246.2175811-9-brgerst@gmail.com
This commit is contained in:
@@ -1153,7 +1153,7 @@ SYM_CODE_START(asm_exc_nmi)
|
||||
* is using the thread stack right now, so it's safe for us to use it.
|
||||
*/
|
||||
movl %esp, %ebx
|
||||
movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esp
|
||||
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
|
||||
call exc_nmi
|
||||
movl %ebx, %esp
|
||||
|
||||
@@ -1217,7 +1217,7 @@ SYM_CODE_START(rewind_stack_and_make_dead)
|
||||
/* Prevent any naive code from trying to unwind to our caller. */
|
||||
xorl %ebp, %ebp
|
||||
|
||||
movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esi
|
||||
movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
|
||||
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
|
||||
|
||||
call make_task_dead
|
||||
|
||||
@@ -92,7 +92,7 @@ SYM_CODE_START(entry_SYSCALL_64)
|
||||
/* tss.sp2 is scratch space. */
|
||||
movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
|
||||
movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
||||
SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
|
||||
ANNOTATE_NOENDBR
|
||||
@@ -1168,7 +1168,7 @@ SYM_CODE_START(asm_exc_nmi)
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
|
||||
movq %rsp, %rdx
|
||||
movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
UNWIND_HINT_IRET_REGS base=%rdx offset=8
|
||||
pushq 5*8(%rdx) /* pt_regs->ss */
|
||||
pushq 4*8(%rdx) /* pt_regs->rsp */
|
||||
@@ -1486,7 +1486,7 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
|
||||
/* Prevent any naive code from trying to unwind to our caller. */
|
||||
xorl %ebp, %ebp
|
||||
|
||||
movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
|
||||
leaq -PTREGS_SIZE(%rax), %rsp
|
||||
UNWIND_HINT_REGS
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||
popq %rax
|
||||
|
||||
movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
||||
/* Construct struct pt_regs on stack */
|
||||
pushq $__USER_DS /* pt_regs->ss */
|
||||
@@ -193,7 +193,7 @@ SYM_CODE_START(entry_SYSCALL_compat)
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
|
||||
|
||||
/* Switch to the kernel stack */
|
||||
movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
||||
SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
|
||||
ANNOTATE_NOENDBR
|
||||
|
||||
@@ -14,7 +14,6 @@ struct task_struct;
|
||||
|
||||
struct pcpu_hot {
|
||||
struct task_struct *current_task;
|
||||
unsigned long top_of_stack;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot);
|
||||
|
||||
@@ -551,7 +551,7 @@ do { \
|
||||
* it is accessed while this_cpu_read_stable() allows the value to be cached.
|
||||
* this_cpu_read_stable() is more efficient and can be used if its value
|
||||
* is guaranteed to be valid across CPUs. The current users include
|
||||
* pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
|
||||
* pcpu_hot.current_task and cpu_current_top_of_stack, both of which are
|
||||
* actually per-thread variables implemented as per-CPU variables and
|
||||
* thus stable for the duration of the respective task.
|
||||
*/
|
||||
|
||||
@@ -422,6 +422,11 @@ DECLARE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
|
||||
DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
|
||||
#endif
|
||||
|
||||
DECLARE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack);
|
||||
/* const-qualified alias provided by the linker. */
|
||||
DECLARE_PER_CPU_CACHE_HOT(const unsigned long __percpu_seg_override,
|
||||
const_cpu_current_top_of_stack);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
|
||||
{
|
||||
@@ -547,9 +552,9 @@ static __always_inline unsigned long current_top_of_stack(void)
|
||||
* entry trampoline.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
|
||||
return this_cpu_read_const(const_pcpu_hot.top_of_stack);
|
||||
return this_cpu_read_const(const_cpu_current_top_of_stack);
|
||||
|
||||
return this_cpu_read_stable(pcpu_hot.top_of_stack);
|
||||
return this_cpu_read_stable(cpu_current_top_of_stack);
|
||||
}
|
||||
|
||||
static __always_inline bool on_thread_stack(void)
|
||||
|
||||
@@ -107,7 +107,6 @@ static void __used common(void)
|
||||
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
|
||||
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
|
||||
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
|
||||
OFFSET(X86_top_of_stack, pcpu_hot, top_of_stack);
|
||||
OFFSET(X86_current_task, pcpu_hot, current_task);
|
||||
#if IS_ENABLED(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64)
|
||||
/* Offset for fields in aria_ctx */
|
||||
|
||||
@@ -2066,7 +2066,6 @@ __setup("setcpuid=", setup_setcpuid);
|
||||
|
||||
DEFINE_PER_CPU_CACHE_HOT(struct pcpu_hot, pcpu_hot) = {
|
||||
.current_task = &init_task,
|
||||
.top_of_stack = TOP_OF_INIT_STACK,
|
||||
};
|
||||
EXPORT_PER_CPU_SYMBOL(pcpu_hot);
|
||||
EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
|
||||
@@ -2074,6 +2073,8 @@ EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
|
||||
DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
|
||||
EXPORT_PER_CPU_SYMBOL(__preempt_count);
|
||||
|
||||
DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
|
||||
|
||||
@@ -190,13 +190,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
arch_end_context_switch(next_p);
|
||||
|
||||
/*
|
||||
* Reload esp0 and pcpu_hot.top_of_stack. This changes
|
||||
* Reload esp0 and cpu_current_top_of_stack. This changes
|
||||
* current_thread_info(). Refresh the SYSENTER configuration in
|
||||
* case prev or next is vm86.
|
||||
*/
|
||||
update_task_stack(next_p);
|
||||
refresh_sysenter_cs(next);
|
||||
this_cpu_write(pcpu_hot.top_of_stack,
|
||||
this_cpu_write(cpu_current_top_of_stack,
|
||||
(unsigned long)task_stack_page(next_p) +
|
||||
THREAD_SIZE);
|
||||
|
||||
|
||||
@@ -669,7 +669,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
* Switch the PDA and FPU contexts.
|
||||
*/
|
||||
raw_cpu_write(pcpu_hot.current_task, next_p);
|
||||
raw_cpu_write(pcpu_hot.top_of_stack, task_top_of_stack(next_p));
|
||||
raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
|
||||
|
||||
switch_fpu_finish(next_p);
|
||||
|
||||
|
||||
@@ -832,7 +832,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Stack for startup_32 can be just as for start_secondary onwards */
|
||||
per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle);
|
||||
per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ ENTRY(phys_startup_64)
|
||||
|
||||
jiffies = jiffies_64;
|
||||
const_pcpu_hot = pcpu_hot;
|
||||
const_cpu_current_top_of_stack = cpu_current_top_of_stack;
|
||||
|
||||
#if defined(CONFIG_X86_64)
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user