mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 12:21:22 -05:00
Merge tag 'loongarch-fixes-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
Pull LoongArch fixes from Huacai Chen: "Add a missing Kconfig option, fix some bugs in exception handlers, memory management and KVM" * tag 'loongarch-fixes-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: KVM: Fix PMU pass-through issue if VM exits to host finally LoongArch: KVM: Fully clear some CSRs when VM reboot LoongArch: KVM: Fix multiple typos of KVM code LoongArch: Return NULL from huge_pte_offset() for invalid PMD LoongArch: Remove a bogus reference to ZONE_DMA LoongArch: Handle fp, lsx, lasx and lbt assembly symbols LoongArch: Make do_xyz() exception handlers more robust LoongArch: Make regs_irqs_disabled() more clear LoongArch: Select ARCH_USE_MEMTEST
This commit is contained in:
@@ -73,6 +73,7 @@ config LOONGARCH
|
||||
select ARCH_SUPPORTS_RT
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_USE_MEMTEST
|
||||
select ARCH_USE_QUEUED_RWLOCKS
|
||||
select ARCH_USE_QUEUED_SPINLOCKS
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
|
||||
@@ -22,22 +22,29 @@
|
||||
struct sigcontext;
|
||||
|
||||
#define kernel_fpu_available() cpu_has_fpu
|
||||
extern void kernel_fpu_begin(void);
|
||||
extern void kernel_fpu_end(void);
|
||||
|
||||
extern void _init_fpu(unsigned int);
|
||||
extern void _save_fp(struct loongarch_fpu *);
|
||||
extern void _restore_fp(struct loongarch_fpu *);
|
||||
void kernel_fpu_begin(void);
|
||||
void kernel_fpu_end(void);
|
||||
|
||||
extern void _save_lsx(struct loongarch_fpu *fpu);
|
||||
extern void _restore_lsx(struct loongarch_fpu *fpu);
|
||||
extern void _init_lsx_upper(void);
|
||||
extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_fpu(unsigned int);
|
||||
asmlinkage void _save_fp(struct loongarch_fpu *);
|
||||
asmlinkage void _restore_fp(struct loongarch_fpu *);
|
||||
asmlinkage int _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
asmlinkage int _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
|
||||
extern void _save_lasx(struct loongarch_fpu *fpu);
|
||||
extern void _restore_lasx(struct loongarch_fpu *fpu);
|
||||
extern void _init_lasx_upper(void);
|
||||
extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _save_lsx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _restore_lsx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_lsx_upper(void);
|
||||
asmlinkage void _restore_lsx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage int _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
asmlinkage int _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
asmlinkage void _save_lasx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _restore_lasx(struct loongarch_fpu *fpu);
|
||||
asmlinkage void _init_lasx_upper(void);
|
||||
asmlinkage void _restore_lasx_upper(struct loongarch_fpu *fpu);
|
||||
asmlinkage int _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
asmlinkage int _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
static inline void enable_lsx(void);
|
||||
static inline void disable_lsx(void);
|
||||
|
||||
@@ -12,9 +12,13 @@
|
||||
#include <asm/loongarch.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
extern void _init_lbt(void);
|
||||
extern void _save_lbt(struct loongarch_lbt *);
|
||||
extern void _restore_lbt(struct loongarch_lbt *);
|
||||
asmlinkage void _init_lbt(void);
|
||||
asmlinkage void _save_lbt(struct loongarch_lbt *);
|
||||
asmlinkage void _restore_lbt(struct loongarch_lbt *);
|
||||
asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
|
||||
asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
|
||||
asmlinkage int _save_ftop_context(void __user *ftop);
|
||||
asmlinkage int _restore_ftop_context(void __user *ftop);
|
||||
|
||||
static inline int is_lbt_enabled(void)
|
||||
{
|
||||
|
||||
@@ -33,9 +33,9 @@ struct pt_regs {
|
||||
unsigned long __last[];
|
||||
} __aligned(8);
|
||||
|
||||
static inline int regs_irqs_disabled(struct pt_regs *regs)
|
||||
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
|
||||
{
|
||||
return arch_irqs_disabled_flags(regs->csr_prmd);
|
||||
return !(regs->csr_prmd & CSR_PRMD_PIE);
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||
|
||||
@@ -458,6 +458,7 @@ SYM_FUNC_START(_save_fp_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_fp_context)
|
||||
EXPORT_SYMBOL_GPL(_save_fp_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@@ -471,6 +472,7 @@ SYM_FUNC_START(_restore_fp_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_fp_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_fp_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@@ -484,6 +486,7 @@ SYM_FUNC_START(_save_lsx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lsx_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lsx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@@ -497,6 +500,7 @@ SYM_FUNC_START(_restore_lsx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lsx_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lsx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@@ -510,6 +514,7 @@ SYM_FUNC_START(_save_lasx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lasx_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lasx_context)
|
||||
|
||||
/*
|
||||
* a0: fpregs
|
||||
@@ -523,6 +528,7 @@ SYM_FUNC_START(_restore_lasx_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lasx_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lasx_context)
|
||||
|
||||
.L_fpu_fault:
|
||||
li.w a0, -EFAULT # failure
|
||||
|
||||
@@ -90,6 +90,7 @@ SYM_FUNC_START(_save_lbt_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_lbt_context)
|
||||
EXPORT_SYMBOL_GPL(_save_lbt_context)
|
||||
|
||||
/*
|
||||
* a0: scr
|
||||
@@ -110,6 +111,7 @@ SYM_FUNC_START(_restore_lbt_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_lbt_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_lbt_context)
|
||||
|
||||
/*
|
||||
* a0: ftop
|
||||
@@ -120,6 +122,7 @@ SYM_FUNC_START(_save_ftop_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_save_ftop_context)
|
||||
EXPORT_SYMBOL_GPL(_save_ftop_context)
|
||||
|
||||
/*
|
||||
* a0: ftop
|
||||
@@ -150,6 +153,7 @@ SYM_FUNC_START(_restore_ftop_context)
|
||||
li.w a0, 0 # success
|
||||
jr ra
|
||||
SYM_FUNC_END(_restore_ftop_context)
|
||||
EXPORT_SYMBOL_GPL(_restore_ftop_context)
|
||||
|
||||
.L_lbt_fault:
|
||||
li.w a0, -EFAULT # failure
|
||||
|
||||
@@ -51,27 +51,6 @@
|
||||
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
|
||||
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
|
||||
|
||||
/* Assembly functions to move context to/from the FPU */
|
||||
extern asmlinkage int
|
||||
_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
extern asmlinkage int
|
||||
_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
|
||||
extern asmlinkage int
|
||||
_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
extern asmlinkage int
|
||||
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
|
||||
extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
|
||||
extern asmlinkage int _save_ftop_context(void __user *ftop);
|
||||
extern asmlinkage int _restore_ftop_context(void __user *ftop);
|
||||
#endif
|
||||
|
||||
struct rt_sigframe {
|
||||
struct siginfo rs_info;
|
||||
struct ucontext rs_uctx;
|
||||
|
||||
@@ -553,9 +553,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
|
||||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
#else
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int *pc;
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
|
||||
@@ -582,7 +583,7 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
|
||||
die_if_kernel("Kernel ale access", regs);
|
||||
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
#endif
|
||||
irqentry_exit(regs, state);
|
||||
@@ -621,12 +622,13 @@ static void bug_handler(struct pt_regs *regs)
|
||||
asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned long era = exception_era(regs);
|
||||
u64 badv = 0, lower = 0, upper = ULONG_MAX;
|
||||
union loongarch_instruction insn;
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
current->thread.trap_nr = read_csr_excode();
|
||||
@@ -692,7 +694,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
||||
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
@@ -710,11 +712,12 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
|
||||
asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
||||
{
|
||||
bool user = user_mode(regs);
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
unsigned int opcode, bcode;
|
||||
unsigned long era = exception_era(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (__get_inst(&opcode, (u32 *)era, user))
|
||||
@@ -780,7 +783,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
@@ -1015,6 +1018,7 @@ static void init_restore_lbt(void)
|
||||
|
||||
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
{
|
||||
bool pie = regs_irqs_disabled(regs);
|
||||
irqentry_state_t state = irqentry_enter(regs);
|
||||
|
||||
/*
|
||||
@@ -1024,7 +1028,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
* (including the user using 'MOVGR2GCSR' to turn on TM, which
|
||||
* will not trigger the BTE), we need to check PRMD first.
|
||||
*/
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_enable();
|
||||
|
||||
if (!cpu_has_lbt) {
|
||||
@@ -1038,7 +1042,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
if (regs->csr_prmd & CSR_PRMD_PIE)
|
||||
if (!pie)
|
||||
local_irq_disable();
|
||||
|
||||
irqentry_exit(regs, state);
|
||||
|
||||
@@ -111,7 +111,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret)) {
|
||||
kvm_err("%s: : read date from addr %llx failed\n", __func__, addr);
|
||||
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
|
||||
return ret;
|
||||
}
|
||||
/* Construct the mask by scanning the bit 27-30 */
|
||||
@@ -127,7 +127,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret))
|
||||
kvm_err("%s: : write date to addr %llx failed\n", __func__, addr);
|
||||
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -296,10 +296,10 @@ int kvm_arch_enable_virtualization_cpu(void)
|
||||
/*
|
||||
* Enable virtualization features granting guest direct control of
|
||||
* certain features:
|
||||
* GCI=2: Trap on init or unimplement cache instruction.
|
||||
* GCI=2: Trap on init or unimplemented cache instruction.
|
||||
* TORU=0: Trap on Root Unimplement.
|
||||
* CACTRL=1: Root control cache.
|
||||
* TOP=0: Trap on Previlege.
|
||||
* TOP=0: Trap on Privilege.
|
||||
* TOE=0: Trap on Exception.
|
||||
* TIT=0: Trap on Timer.
|
||||
*/
|
||||
|
||||
@@ -294,6 +294,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
|
||||
|
||||
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
|
||||
kvm_lose_pmu(vcpu);
|
||||
/* make sure the vcpu mode has been written */
|
||||
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
|
||||
local_irq_enable();
|
||||
@@ -902,6 +903,13 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.st.guest_addr = 0;
|
||||
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
|
||||
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
|
||||
|
||||
/*
|
||||
* When vCPU reset, clear the ESTAT and GINTC registers
|
||||
* Other CSR registers are cleared with function _kvm_setcsr().
|
||||
*/
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
||||
@@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
}
|
||||
}
|
||||
return (pte_t *) pmd;
|
||||
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
|
||||
}
|
||||
|
||||
uint64_t pmd_to_entrylo(unsigned long pmd_val)
|
||||
|
||||
@@ -65,9 +65,6 @@ void __init paging_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user