mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 13:30:45 -05:00
mm/slab: simplify SLAB_* flag handling
SLUB is the only remaining allocator. We can therefore get rid of the logic for allocator-specific flags: * Merge SLAB_CACHE_FLAGS into SLAB_CORE_FLAGS. * Remove CACHE_CREATE_MASK and instead mask out SLAB_DEBUG_FLAGS if !CONFIG_SLUB_DEBUG. SLAB_DEBUG_FLAGS is now defined unconditionally (no impact on existing code, which ignores it if !CONFIG_SLUB_DEBUG). * Define SLAB_FLAGS_PERMITTED in terms of SLAB_CORE_FLAGS and SLAB_DEBUG_FLAGS (no functional change). While at it also remove misleading comments that suggest that multiple allocators are available. Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
committed by
Vlastimil Babka
parent
dfd3df31c9
commit
12f4888c9d
32
mm/slab.h
32
mm/slab.h
@@ -457,39 +457,17 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
|
||||
return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
|
||||
}
|
||||
|
||||
/* Legal flag mask for kmem_cache_create(), for various configurations */
|
||||
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
|
||||
SLAB_CACHE_DMA32 | SLAB_PANIC | \
|
||||
SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
|
||||
SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \
|
||||
SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
|
||||
SLAB_TEMPORARY | SLAB_ACCOUNT | \
|
||||
SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
||||
SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
|
||||
#else
|
||||
#define SLAB_DEBUG_FLAGS (0)
|
||||
#endif
|
||||
|
||||
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
|
||||
SLAB_TEMPORARY | SLAB_ACCOUNT | \
|
||||
SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
|
||||
|
||||
/* Common flags available with current configuration */
|
||||
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
|
||||
|
||||
/* Common flags permitted for kmem_cache_create */
|
||||
#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
|
||||
SLAB_RED_ZONE | \
|
||||
SLAB_POISON | \
|
||||
SLAB_STORE_USER | \
|
||||
SLAB_TRACE | \
|
||||
SLAB_CONSISTENCY_CHECKS | \
|
||||
SLAB_NOLEAKTRACE | \
|
||||
SLAB_RECLAIM_ACCOUNT | \
|
||||
SLAB_TEMPORARY | \
|
||||
SLAB_ACCOUNT | \
|
||||
SLAB_KMALLOC | \
|
||||
SLAB_NO_MERGE | \
|
||||
SLAB_NO_USER_FLAGS)
|
||||
#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS)
|
||||
|
||||
bool __kmem_cache_empty(struct kmem_cache *);
|
||||
int __kmem_cache_shutdown(struct kmem_cache *);
|
||||
|
||||
@@ -298,6 +298,8 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
|
||||
static_branch_enable(&slub_debug_enabled);
|
||||
if (flags & SLAB_STORE_USER)
|
||||
stack_depot_init();
|
||||
#else
|
||||
flags &= ~SLAB_DEBUG_FLAGS;
|
||||
#endif
|
||||
|
||||
mutex_lock(&slab_mutex);
|
||||
@@ -307,20 +309,11 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Refuse requests with allocator specific flags */
|
||||
if (flags & ~SLAB_FLAGS_PERMITTED) {
|
||||
err = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some allocators will constraint the set of valid flags to a subset
|
||||
* of all flags. We expect them to define CACHE_CREATE_MASK in this
|
||||
* case, and we'll just provide them with a sanitized version of the
|
||||
* passed flags.
|
||||
*/
|
||||
flags &= CACHE_CREATE_MASK;
|
||||
|
||||
/* Fail closed on bad usersize of useroffset values. */
|
||||
if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
|
||||
WARN_ON(!args->usersize && args->useroffset) ||
|
||||
|
||||
Reference in New Issue
Block a user