mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 08:45:26 -05:00
Merge tag 'x86_urgent_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Fix an interrupt vector setup race which leads to a non-functioning device - Add new Intel CPU models *and* a family: 0x12. Finally. Yippie! :-) * tag 'x86_urgent_for_v6.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/irq: Plug vector setup race x86/cpu: Add new Intel CPU model numbers for Wildcatlake and Novalake
This commit is contained in:
@@ -92,8 +92,6 @@ struct irq_cfg {
|
||||
|
||||
extern struct irq_cfg *irq_cfg(unsigned int irq);
|
||||
extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
|
||||
extern void lock_vector_lock(void);
|
||||
extern void unlock_vector_lock(void);
|
||||
#ifdef CONFIG_SMP
|
||||
extern void vector_schedule_cleanup(struct irq_cfg *);
|
||||
extern void irq_complete_move(struct irq_cfg *cfg);
|
||||
@@ -101,12 +99,16 @@ extern void irq_complete_move(struct irq_cfg *cfg);
|
||||
static inline void vector_schedule_cleanup(struct irq_cfg *c) { }
|
||||
static inline void irq_complete_move(struct irq_cfg *c) { }
|
||||
#endif
|
||||
|
||||
extern void apic_ack_edge(struct irq_data *data);
|
||||
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
extern void lock_vector_lock(void);
|
||||
extern void unlock_vector_lock(void);
|
||||
#else
|
||||
static inline void lock_vector_lock(void) {}
|
||||
static inline void unlock_vector_lock(void) {}
|
||||
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
|
||||
#endif
|
||||
|
||||
/* Statistics */
|
||||
extern atomic_t irq_err_count;
|
||||
|
||||
@@ -150,6 +150,11 @@
|
||||
|
||||
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
|
||||
|
||||
#define INTEL_WILDCATLAKE_L IFM(6, 0xD5)
|
||||
|
||||
#define INTEL_NOVALAKE IFM(18, 0x01)
|
||||
#define INTEL_NOVALAKE_L IFM(18, 0x03)
|
||||
|
||||
/* "Small Core" Processors (Atom/E-Core) */
|
||||
|
||||
#define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
|
||||
|
||||
@@ -256,26 +256,59 @@ static __always_inline void handle_irq(struct irq_desc *desc,
|
||||
__handle_irq(desc, regs);
|
||||
}
|
||||
|
||||
static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
|
||||
static struct irq_desc *reevaluate_vector(int vector)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int ret = 0;
|
||||
struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
if (!IS_ERR_OR_NULL(desc))
|
||||
return desc;
|
||||
|
||||
if (desc == VECTOR_UNUSED)
|
||||
pr_emerg_ratelimited("No irq handler for %d.%u\n", smp_processor_id(), vector);
|
||||
else
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __always_inline bool call_irq_handler(int vector, struct pt_regs *regs)
|
||||
{
|
||||
struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
desc = __this_cpu_read(vector_irq[vector]);
|
||||
if (likely(!IS_ERR_OR_NULL(desc))) {
|
||||
handle_irq(desc, regs);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
if (desc == VECTOR_UNUSED) {
|
||||
pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
|
||||
__func__, smp_processor_id(),
|
||||
vector);
|
||||
} else {
|
||||
__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
/*
|
||||
* Reevaluate with vector_lock held to prevent a race against
|
||||
* request_irq() setting up the vector:
|
||||
*
|
||||
* CPU0 CPU1
|
||||
* interrupt is raised in APIC IRR
|
||||
* but not handled
|
||||
* free_irq()
|
||||
* per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN;
|
||||
*
|
||||
* request_irq() common_interrupt()
|
||||
* d = this_cpu_read(vector_irq[vector]);
|
||||
*
|
||||
* per_cpu(vector_irq, CPU1)[vector] = desc;
|
||||
*
|
||||
* if (d == VECTOR_SHUTDOWN)
|
||||
* this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
|
||||
*
|
||||
* This requires that the same vector on the same target CPU is
|
||||
* handed out or that a spurious interrupt hits that CPU/vector.
|
||||
*/
|
||||
lock_vector_lock();
|
||||
desc = reevaluate_vector(vector);
|
||||
unlock_vector_lock();
|
||||
|
||||
if (!desc)
|
||||
return false;
|
||||
|
||||
handle_irq(desc, regs);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -289,7 +322,7 @@ DEFINE_IDTENTRY_IRQ(common_interrupt)
|
||||
/* entry code tells RCU that we're not quiescent. Check it. */
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
|
||||
|
||||
if (unlikely(call_irq_handler(vector, regs)))
|
||||
if (unlikely(!call_irq_handler(vector, regs)))
|
||||
apic_eoi();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
|
||||
Reference in New Issue
Block a user