From bd53ce4da252ddb1ae4257c164f80aea3d8ab90c Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 17 Feb 2022 16:58:42 +0800 Subject: [PATCH 1/6] mm/slob: make kmem_cache_boot static kmem_cache_boot is never accessed outside slob.c. Make it static. Signed-off-by: Miaohe Lin Acked-by: David Rientjes Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220217085842.29032-1-linmiaohe@huawei.com --- mm/slob.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slob.c b/mm/slob.c index 60c5842215f1..1179bcad2df8 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -708,7 +708,7 @@ int __kmem_cache_shrink(struct kmem_cache *d) return 0; } -struct kmem_cache kmem_cache_boot = { +static struct kmem_cache kmem_cache_boot = { .name = "kmem_cache", .size = sizeof(struct kmem_cache), .flags = SLAB_PANIC, From 7d6b6cc355378a66044cbd25ccee4153bf60dea9 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 17 Feb 2022 17:16:09 +0800 Subject: [PATCH 2/6] mm/slab_common: use helper function is_power_of_2() Use helper function is_power_of_2() to check if KMALLOC_MIN_SIZE is power of two. Minor readability improvement. Signed-off-by: Miaohe Lin Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220217091609.8214-1-linmiaohe@huawei.com --- mm/slab_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index 23f2ab0713b7..6ee64d6208b3 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -807,7 +807,7 @@ void __init setup_kmalloc_cache_index_table(void) unsigned int i; BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || - (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); + !is_power_of_2(KMALLOC_MIN_SIZE)); for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { unsigned int elem = size_index_elem(i); From d1d28bd9a0f896c7d2f7147ca631f86c49b16d7f Mon Sep 17 00:00:00 2001 From: Lianjie Zhang Date: Sun, 6 Mar 2022 15:38:18 +0800 Subject: [PATCH 3/6] mm/slub: use helper macro __ATTR_XX_MODE for SLAB_ATTR(_RO) This allows more concise code, and VERIFY_OCTAL_PERMISSIONS() can help validate any future change. Signed-off-by: Lianjie Zhang Acked-by: David Rientjes Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220306073818.15089-1-zhanglianjie@uniontech.com --- mm/slub.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 261474092e43..d33aada17985 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5344,12 +5344,10 @@ struct slab_attribute { }; #define SLAB_ATTR_RO(_name) \ - static struct slab_attribute _name##_attr = \ - __ATTR(_name, 0400, _name##_show, NULL) + static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400) #define SLAB_ATTR(_name) \ - static struct slab_attribute _name##_attr = \ - __ATTR(_name, 0600, _name##_show, _name##_store) + static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600) static ssize_t slab_size_show(struct kmem_cache *s, char *buf) { From ae44d81d502756cb97e7b698207083967d6f20a3 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Wed, 9 Mar 2022 17:20:36 +0800 Subject: [PATCH 4/6] mm/slub: remove forced_order parameter in calculate_sizes Since commit 32a6f409b693 ("mm, slub: remove runtime allocation order changes"), forced_order is always -1. Remove this unneeded parameter to simplify the code. Signed-off-by: Miaohe Lin Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220309092036.50844-1-linmiaohe@huawei.com --- mm/slub.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index d33aada17985..498b56f66f5e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4046,7 +4046,7 @@ static void set_cpu_partial(struct kmem_cache *s) * calculate_sizes() determines the order and the distribution of data within * a slab object. */ -static int calculate_sizes(struct kmem_cache *s, int forced_order) +static int calculate_sizes(struct kmem_cache *s) { slab_flags_t flags = s->flags; unsigned int size = s->object_size; @@ -4150,10 +4150,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) size = ALIGN(size, s->align); s->size = size; s->reciprocal_size = reciprocal_value(size); - if (forced_order >= 0) - order = forced_order; - else - order = calculate_order(size); + order = calculate_order(size); if ((int)order < 0) return 0; @@ -4189,7 +4186,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) s->random = get_random_long(); #endif - if (!calculate_sizes(s, -1)) + if (!calculate_sizes(s)) goto error; if (disable_higher_order_debug) { /* @@ -4199,7 +4196,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) if (get_order(s->size) > get_order(s->object_size)) { s->flags &= ~DEBUG_METADATA_FLAGS; s->offset = 0; - if (!calculate_sizes(s, -1)) + if (!calculate_sizes(s)) goto error; } } From 382627824afb09cd86192f4b649fca8c5bfe5f40 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 10 Mar 2022 22:07:00 +0800 Subject: [PATCH 5/6] mm: slab: Delete unused SLAB_DEACTIVATED flag Since commit 9855609bde03 ("mm: memcg/slab: use a single set of kmem_caches for all accounted allocations") deleted all SLAB_DEACTIVATED users, therefore this flag is not needed any more, let's delete it. Signed-off-by: Xiongwei Song Acked-by: David Rientjes Reviewed-by: Roman Gushchin Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220310140701.87908-2-sxwjean@me.com --- include/linux/slab.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 37bde99b74af..b6b3eed6c7c4 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -117,9 +117,6 @@ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ -/* Slab deactivation flag */ -#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) - /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * From a485e1dacdb09802ad0ba1126bc4d029773ae1b2 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Thu, 10 Mar 2022 22:07:01 +0800 Subject: [PATCH 6/6] mm: slub: Delete useless parameter of alloc_slab_page() The parameter @s is useless for alloc_slab_page(). It was added in 2014 by commit 5dfb41750992 ("sl[au]b: charge slabs to kmemcg explicitly"). The need for it was removed in 2020 by commit 1f3147b49d75 ("mm: slub: call account_slab_page() after slab page initialization"). Let's delete it. [willy@infradead.org: Added detailed history of @s] Signed-off-by: Xiongwei Song Reviewed-by: Matthew Wilcox (Oracle) Acked-by: David Rientjes Reviewed-by: Roman Gushchin Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka Link: https://lore.kernel.org/r/20220310140701.87908-3-sxwjean@me.com --- mm/slub.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 498b56f66f5e..c2d3fc51377d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1788,8 +1788,8 @@ static void *setup_object(struct kmem_cache *s, struct slab *slab, /* * Slab allocation and freeing */ -static inline struct slab *alloc_slab_page(struct kmem_cache *s, - gfp_t flags, int node, struct kmem_cache_order_objects oo) +static inline struct slab *alloc_slab_page(gfp_t flags, int node, + struct kmem_cache_order_objects oo) { struct folio *folio; struct slab *slab; @@ -1941,7 +1941,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); - slab = alloc_slab_page(s, alloc_gfp, node, oo); + slab = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!slab)) { oo = s->min; alloc_gfp = flags; @@ -1949,7 +1949,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) * Allocation may have failed due to fragmentation. * Try a lower order alloc if possible */ - slab = alloc_slab_page(s, alloc_gfp, node, oo); + slab = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!slab)) goto out; stat(s, ORDER_FALLBACK);