mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-04-02 05:51:14 -04:00
mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT
alloc_empty_sheaf() allocates sheaves from SLAB_KMALLOC caches using __GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their allocation tags empty before freeing, which results in a warning when CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation tags for such sheaves as empty. The problem was technically introduced in commit4c0a17e283but only becomes possible to hit with commit913ffd3a1b. Fixes:4c0a17e283("slab: prevent recursive kmalloc() in alloc_empty_sheaf()") Fixes:913ffd3a1b("slab: handle kmalloc sheaves bootstrap") Reported-by: David Wang <00107082@163.com> Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@163.com/ Analyzed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Tested-by: Harry Yoo <harry.yoo@oracle.com> Tested-by: David Wang <00107082@163.com> Link: https://patch.msgid.link/20260225163407.2218712-1-surenb@google.com Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
This commit is contained in:
committed by
Vlastimil Babka (SUSE)
parent
021ca6b670
commit
f3ec502b67
@@ -139,6 +139,8 @@ enum {
|
||||
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
|
||||
*
|
||||
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
|
||||
* mark_obj_codetag_empty() should be called upon freeing for objects allocated
|
||||
* with this flag to indicate that their NULL tags are expected and normal.
|
||||
*/
|
||||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
|
||||
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
|
||||
|
||||
@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
|
||||
|
||||
/* Determine object index from a given position */
|
||||
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
|
||||
void *addr, void *obj)
|
||||
void *addr, const void *obj)
|
||||
{
|
||||
return reciprocal_divide(kasan_reset_tag(obj) - addr,
|
||||
cache->reciprocal_size);
|
||||
}
|
||||
|
||||
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
||||
const struct slab *slab, void *obj)
|
||||
const struct slab *slab, const void *obj)
|
||||
{
|
||||
if (is_kfence_address(obj))
|
||||
return 0;
|
||||
|
||||
33
mm/slub.c
33
mm/slub.c
@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
|
||||
|
||||
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
|
||||
|
||||
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
|
||||
static inline void mark_obj_codetag_empty(const void *obj)
|
||||
{
|
||||
struct slab *obj_exts_slab;
|
||||
struct slab *obj_slab;
|
||||
unsigned long slab_exts;
|
||||
|
||||
obj_exts_slab = virt_to_slab(obj_exts);
|
||||
slab_exts = slab_obj_exts(obj_exts_slab);
|
||||
obj_slab = virt_to_slab(obj);
|
||||
slab_exts = slab_obj_exts(obj_slab);
|
||||
if (slab_exts) {
|
||||
get_slab_obj_exts(slab_exts);
|
||||
unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
|
||||
obj_exts_slab, obj_exts);
|
||||
struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
|
||||
unsigned int offs = obj_to_index(obj_slab->slab_cache,
|
||||
obj_slab, obj);
|
||||
struct slabobj_ext *ext = slab_obj_ext(obj_slab,
|
||||
slab_exts, offs);
|
||||
|
||||
if (unlikely(is_codetag_empty(&ext->ref))) {
|
||||
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||
|
||||
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
|
||||
|
||||
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
|
||||
static inline void mark_obj_codetag_empty(const void *obj) {}
|
||||
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
|
||||
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||
struct slabobj_ext *vec, unsigned int objects) {}
|
||||
@@ -2211,7 +2211,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
||||
* assign slabobj_exts in parallel. In this case the existing
|
||||
* objcg vector should be reused.
|
||||
*/
|
||||
mark_objexts_empty(vec);
|
||||
mark_obj_codetag_empty(vec);
|
||||
if (unlikely(!allow_spin))
|
||||
kfree_nolock(vec);
|
||||
else
|
||||
@@ -2254,7 +2254,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
|
||||
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
|
||||
* the extension for obj_exts is expected to be NULL.
|
||||
*/
|
||||
mark_objexts_empty(obj_exts);
|
||||
mark_obj_codetag_empty(obj_exts);
|
||||
if (allow_spin)
|
||||
kfree(obj_exts);
|
||||
else
|
||||
@@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
|
||||
|
||||
#else /* CONFIG_SLAB_OBJ_EXT */
|
||||
|
||||
static inline void mark_obj_codetag_empty(const void *obj)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void init_slab_obj_exts(struct slab *slab)
|
||||
{
|
||||
}
|
||||
@@ -2783,6 +2787,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
|
||||
|
||||
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
|
||||
{
|
||||
/*
|
||||
* If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
|
||||
* corresponding extension is NULL and alloc_tag_sub() will throw a
|
||||
* warning, therefore replace NULL with CODETAG_EMPTY to indicate
|
||||
* that the extension for this sheaf is expected to be NULL.
|
||||
*/
|
||||
if (s->flags & SLAB_KMALLOC)
|
||||
mark_obj_codetag_empty(sheaf);
|
||||
|
||||
kfree(sheaf);
|
||||
|
||||
stat(s, SHEAF_FREE);
|
||||
|
||||
Reference in New Issue
Block a user