mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-28 05:34:13 -05:00
rqspinlock: Add basic support for CONFIG_PARAVIRT
We ripped out PV and virtualization related bits from rqspinlock in an earlier commit, however, a fair lock performs poorly within a virtual machine when the lock holder is preempted. As such, retain the virt_spin_lock fallback to test and set lock, but with timeout and deadlock detection. We can do this by simply depending on the resilient_tas_spin_lock implementation from the previous patch. We don't integrate support for CONFIG_PARAVIRT_SPINLOCKS yet, as that requires more involved algorithmic changes and introduces more complexity. It can be done when the need arises in the future. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250316040541.108729-15-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
c9102a68c0
commit
ecbd804752
33
arch/x86/include/asm/rqspinlock.h
Normal file
33
arch/x86/include/asm/rqspinlock.h
Normal file
@@ -0,0 +1,33 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_X86_RQSPINLOCK_H
|
||||
#define _ASM_X86_RQSPINLOCK_H
|
||||
|
||||
#include <asm/paravirt.h>
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
||||
|
||||
#define resilient_virt_spin_lock_enabled resilient_virt_spin_lock_enabled
|
||||
static __always_inline bool resilient_virt_spin_lock_enabled(void)
|
||||
{
|
||||
return static_branch_likely(&virt_spin_lock_key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QUEUED_SPINLOCKS
|
||||
typedef struct qspinlock rqspinlock_t;
|
||||
#else
|
||||
typedef struct rqspinlock rqspinlock_t;
|
||||
#endif
|
||||
extern int resilient_tas_spin_lock(rqspinlock_t *lock);
|
||||
|
||||
#define resilient_virt_spin_lock resilient_virt_spin_lock
|
||||
static inline int resilient_virt_spin_lock(rqspinlock_t *lock)
|
||||
{
|
||||
return resilient_tas_spin_lock(lock);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
#include <asm-generic/rqspinlock.h>
|
||||
|
||||
#endif /* _ASM_X86_RQSPINLOCK_H */
|
||||
@@ -35,6 +35,20 @@ extern int resilient_tas_spin_lock(rqspinlock_t *lock);
|
||||
extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val);
|
||||
#endif
|
||||
|
||||
#ifndef resilient_virt_spin_lock_enabled
|
||||
static __always_inline bool resilient_virt_spin_lock_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef resilient_virt_spin_lock
|
||||
static __always_inline int resilient_virt_spin_lock(rqspinlock_t *lock)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Default timeout for waiting loops is 0.25 seconds
|
||||
*/
|
||||
|
||||
@@ -352,6 +352,9 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
|
||||
|
||||
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
|
||||
|
||||
if (resilient_virt_spin_lock_enabled())
|
||||
return resilient_virt_spin_lock(lock);
|
||||
|
||||
RES_INIT_TIMEOUT(ts);
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user