mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-27 11:06:41 -05:00
slab: Add SL_pfmemalloc flag
Give slab its own name for this flag. Move the implementation from slab.h to slub.c since it's only used inside slub.c. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20250611155916.2579160-5-willy@infradead.org Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
committed by
Vlastimil Babka
parent
c5c44900f4
commit
3df29914d9
24
mm/slab.h
24
mm/slab.h
@@ -167,30 +167,6 @@ static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)
|
||||
*/
|
||||
#define slab_page(s) folio_page(slab_folio(s), 0)
|
||||
|
||||
/*
|
||||
* If network-based swap is enabled, sl*b must keep track of whether pages
|
||||
* were allocated from pfmemalloc reserves.
|
||||
*/
|
||||
static inline bool slab_test_pfmemalloc(const struct slab *slab)
|
||||
{
|
||||
return folio_test_active(slab_folio(slab));
|
||||
}
|
||||
|
||||
static inline void slab_set_pfmemalloc(struct slab *slab)
|
||||
{
|
||||
folio_set_active(slab_folio(slab));
|
||||
}
|
||||
|
||||
static inline void slab_clear_pfmemalloc(struct slab *slab)
|
||||
{
|
||||
folio_clear_active(slab_folio(slab));
|
||||
}
|
||||
|
||||
static inline void __slab_clear_pfmemalloc(struct slab *slab)
|
||||
{
|
||||
__folio_clear_active(slab_folio(slab));
|
||||
}
|
||||
|
||||
static inline void *slab_address(const struct slab *slab)
|
||||
{
|
||||
return folio_address(slab_folio(slab));
|
||||
|
||||
21
mm/slub.c
21
mm/slub.c
@@ -187,6 +187,7 @@
|
||||
* enum slab_flags - How the slab flags bits are used.
|
||||
* @SL_locked: Is locked with slab_lock()
|
||||
* @SL_partial: On the per-node partial list
|
||||
* @SL_pfmemalloc: Was allocated from PF_MEMALLOC reserves
|
||||
*
|
||||
* The slab flags share space with the page flags but some bits have
|
||||
* different interpretations. The high bits are used for information
|
||||
@@ -195,6 +196,7 @@
|
||||
enum slab_flags {
|
||||
SL_locked = PG_locked,
|
||||
SL_partial = PG_workingset, /* Historical reasons for this bit */
|
||||
SL_pfmemalloc = PG_active, /* Historical reasons for this bit */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -648,6 +650,25 @@ static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
|
||||
}
|
||||
#endif /* CONFIG_SLUB_CPU_PARTIAL */
|
||||
|
||||
/*
|
||||
* If network-based swap is enabled, slub must keep track of whether memory
|
||||
* were allocated from pfmemalloc reserves.
|
||||
*/
|
||||
static inline bool slab_test_pfmemalloc(const struct slab *slab)
|
||||
{
|
||||
return test_bit(SL_pfmemalloc, &slab->flags);
|
||||
}
|
||||
|
||||
static inline void slab_set_pfmemalloc(struct slab *slab)
|
||||
{
|
||||
set_bit(SL_pfmemalloc, &slab->flags);
|
||||
}
|
||||
|
||||
static inline void __slab_clear_pfmemalloc(struct slab *slab)
|
||||
{
|
||||
__clear_bit(SL_pfmemalloc, &slab->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per slab locking using the pagelock
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user