mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-13 10:38:44 -04:00
Merge tag 'locking-core-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The biggest change to core locking facilities in this cycle is the introduction of local_lock_t - this primitive comes from the -rt project and identifies CPU-local locking dependencies normally handled opaquely beind preempt_disable() or local_irq_save/disable() critical sections. The generated code on mainline kernels doesn't change as a result, but still there are benefits: improved debugging and better documentation of data structure accesses. The new local_lock_t primitives are introduced and then utilized in a couple of kernel subsystems. No change in functionality is intended. There's also other smaller changes and cleanups" * tag 'locking-core-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: zram: Use local lock to protect per-CPU data zram: Allocate struct zcomp_strm as per-CPU memory connector/cn_proc: Protect send_msg() with a local lock squashfs: Make use of local lock in multi_cpu decompressor mm/swap: Use local_lock for protection radix-tree: Use local_lock for protection locking: Introduce local_lock() locking/lockdep: Replace zero-length array with flexible-array locking/rtmutex: Remove unused rt_mutex_cmpxchg_relaxed()
This commit is contained in:
@@ -171,7 +171,7 @@ static inline bool idr_is_empty(const struct idr *idr)
|
||||
*/
|
||||
static inline void idr_preload_end(void)
|
||||
{
|
||||
preempt_enable();
|
||||
local_unlock(&radix_tree_preloads.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
54
include/linux/local_lock.h
Normal file
54
include/linux/local_lock.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_LOCAL_LOCK_H
|
||||
#define _LINUX_LOCAL_LOCK_H
|
||||
|
||||
#include <linux/local_lock_internal.h>
|
||||
|
||||
/**
|
||||
* local_lock_init - Runtime initialize a lock instance
|
||||
*/
|
||||
#define local_lock_init(lock) __local_lock_init(lock)
|
||||
|
||||
/**
|
||||
* local_lock - Acquire a per CPU local lock
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_lock(lock) __local_lock(lock)
|
||||
|
||||
/**
|
||||
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_lock_irq(lock) __local_lock_irq(lock)
|
||||
|
||||
/**
|
||||
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
|
||||
* interrupts
|
||||
* @lock: The lock variable
|
||||
* @flags: Storage for interrupt flags
|
||||
*/
|
||||
#define local_lock_irqsave(lock, flags) \
|
||||
__local_lock_irqsave(lock, flags)
|
||||
|
||||
/**
|
||||
* local_unlock - Release a per CPU local lock
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_unlock(lock) __local_unlock(lock)
|
||||
|
||||
/**
|
||||
* local_unlock_irq - Release a per CPU local lock and enable interrupts
|
||||
* @lock: The lock variable
|
||||
*/
|
||||
#define local_unlock_irq(lock) __local_unlock_irq(lock)
|
||||
|
||||
/**
|
||||
* local_unlock_irqrestore - Release a per CPU local lock and restore
|
||||
* interrupt flags
|
||||
* @lock: The lock variable
|
||||
* @flags: Interrupt flags to restore
|
||||
*/
|
||||
#define local_unlock_irqrestore(lock, flags) \
|
||||
__local_unlock_irqrestore(lock, flags)
|
||||
|
||||
#endif
|
||||
90
include/linux/local_lock_internal.h
Normal file
90
include/linux/local_lock_internal.h
Normal file
@@ -0,0 +1,90 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_LOCAL_LOCK_H
|
||||
# error "Do not include directly, include linux/local_lock.h"
|
||||
#endif
|
||||
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
struct task_struct *owner;
|
||||
#endif
|
||||
} local_lock_t;
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define LL_DEP_MAP_INIT(lockname) \
|
||||
.dep_map = { \
|
||||
.name = #lockname, \
|
||||
.wait_type_inner = LD_WAIT_CONFIG, \
|
||||
}
|
||||
#else
|
||||
# define LL_DEP_MAP_INIT(lockname)
|
||||
#endif
|
||||
|
||||
#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
|
||||
|
||||
#define __local_lock_init(lock) \
|
||||
do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
|
||||
lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
static inline void local_lock_acquire(local_lock_t *l)
|
||||
{
|
||||
lock_map_acquire(&l->dep_map);
|
||||
DEBUG_LOCKS_WARN_ON(l->owner);
|
||||
l->owner = current;
|
||||
}
|
||||
|
||||
static inline void local_lock_release(local_lock_t *l)
|
||||
{
|
||||
DEBUG_LOCKS_WARN_ON(l->owner != current);
|
||||
l->owner = NULL;
|
||||
lock_map_release(&l->dep_map);
|
||||
}
|
||||
|
||||
#else /* CONFIG_DEBUG_LOCK_ALLOC */
|
||||
static inline void local_lock_acquire(local_lock_t *l) { }
|
||||
static inline void local_lock_release(local_lock_t *l) { }
|
||||
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
#define __local_lock(lock) \
|
||||
do { \
|
||||
preempt_disable(); \
|
||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_lock_irq(lock) \
|
||||
do { \
|
||||
local_irq_disable(); \
|
||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_lock_irqsave(lock, flags) \
|
||||
do { \
|
||||
local_irq_save(flags); \
|
||||
local_lock_acquire(this_cpu_ptr(lock)); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock(lock) \
|
||||
do { \
|
||||
local_lock_release(this_cpu_ptr(lock)); \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock_irq(lock) \
|
||||
do { \
|
||||
local_lock_release(this_cpu_ptr(lock)); \
|
||||
local_irq_enable(); \
|
||||
} while (0)
|
||||
|
||||
#define __local_unlock_irqrestore(lock, flags) \
|
||||
do { \
|
||||
local_lock_release(this_cpu_ptr(lock)); \
|
||||
local_irq_restore(flags); \
|
||||
} while (0)
|
||||
@@ -16,11 +16,20 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/local_lock.h>
|
||||
|
||||
/* Keep unconverted code working */
|
||||
#define radix_tree_root xarray
|
||||
#define radix_tree_node xa_node
|
||||
|
||||
struct radix_tree_preload {
|
||||
local_lock_t lock;
|
||||
unsigned nr;
|
||||
/* nodes->parent points to next preallocated node */
|
||||
struct radix_tree_node *nodes;
|
||||
};
|
||||
DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
|
||||
|
||||
/*
|
||||
* The bottom two bits of the slot determine how the remaining bits in the
|
||||
* slot are interpreted:
|
||||
@@ -245,7 +254,7 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
|
||||
|
||||
static inline void radix_tree_preload_end(void)
|
||||
{
|
||||
preempt_enable();
|
||||
local_unlock(&radix_tree_preloads.lock);
|
||||
}
|
||||
|
||||
void __rcu **idr_get_free(struct radix_tree_root *root,
|
||||
|
||||
@@ -337,6 +337,7 @@ extern void activate_page(struct page *);
|
||||
extern void mark_page_accessed(struct page *);
|
||||
extern void lru_add_drain(void);
|
||||
extern void lru_add_drain_cpu(int cpu);
|
||||
extern void lru_add_drain_cpu_zone(struct zone *zone);
|
||||
extern void lru_add_drain_all(void);
|
||||
extern void rotate_reclaimable_page(struct page *page);
|
||||
extern void deactivate_file_page(struct page *page);
|
||||
|
||||
Reference in New Issue
Block a user