Skip to content

Commit f3ec502

Browse files
surenbaghdasaryanVlastimil Babka (SUSE)
authored andcommitted
mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT
alloc_empty_sheaf() allocates sheaves from SLAB_KMALLOC caches using __GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their allocation tags empty before freeing, which results in a warning when CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation tags for such sheaves as empty. The problem was technically introduced in commit 4c0a17e but only becomes possible to hit with commit 913ffd3. Fixes: 4c0a17e ("slab: prevent recursive kmalloc() in alloc_empty_sheaf()") Fixes: 913ffd3 ("slab: handle kmalloc sheaves bootstrap") Reported-by: David Wang <00107082@163.com> Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@163.com/ Analyzed-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Tested-by: Harry Yoo <harry.yoo@oracle.com> Tested-by: David Wang <00107082@163.com> Link: https://patch.msgid.link/20260225163407.2218712-1-surenb@google.com Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
1 parent 021ca6b commit f3ec502

File tree

3 files changed

+27
-12
lines changed

3 files changed

+27
-12
lines changed

include/linux/gfp_types.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,8 @@ enum {
139139
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
140140
*
141141
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
142+
* mark_obj_codetag_empty() should be called upon freeing for objects allocated
143+
* with this flag to indicate that their NULL tags are expected and normal.
142144
*/
143145
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
144146
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)

mm/slab.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
290290

291291
/* Determine object index from a given position */
292292
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
293-
void *addr, void *obj)
293+
void *addr, const void *obj)
294294
{
295295
return reciprocal_divide(kasan_reset_tag(obj) - addr,
296296
cache->reciprocal_size);
297297
}
298298

299299
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
300-
const struct slab *slab, void *obj)
300+
const struct slab *slab, const void *obj)
301301
{
302302
if (is_kfence_address(obj))
303303
return 0;

mm/slub.c

Lines changed: 23 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
20412041

20422042
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
20432043

2044-
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
2044+
static inline void mark_obj_codetag_empty(const void *obj)
20452045
{
2046-
struct slab *obj_exts_slab;
2046+
struct slab *obj_slab;
20472047
unsigned long slab_exts;
20482048

2049-
obj_exts_slab = virt_to_slab(obj_exts);
2050-
slab_exts = slab_obj_exts(obj_exts_slab);
2049+
obj_slab = virt_to_slab(obj);
2050+
slab_exts = slab_obj_exts(obj_slab);
20512051
if (slab_exts) {
20522052
get_slab_obj_exts(slab_exts);
2053-
unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
2054-
obj_exts_slab, obj_exts);
2055-
struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
2053+
unsigned int offs = obj_to_index(obj_slab->slab_cache,
2054+
obj_slab, obj);
2055+
struct slabobj_ext *ext = slab_obj_ext(obj_slab,
20562056
slab_exts, offs);
20572057

20582058
if (unlikely(is_codetag_empty(&ext->ref))) {
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
20902090

20912091
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
20922092

2093-
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
2093+
static inline void mark_obj_codetag_empty(const void *obj) {}
20942094
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
20952095
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
20962096
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2211,7 +2211,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
22112211
* assign slabobj_exts in parallel. In this case the existing
22122212
* objcg vector should be reused.
22132213
*/
2214-
mark_objexts_empty(vec);
2214+
mark_obj_codetag_empty(vec);
22152215
if (unlikely(!allow_spin))
22162216
kfree_nolock(vec);
22172217
else
@@ -2254,7 +2254,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
22542254
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
22552255
* the extension for obj_exts is expected to be NULL.
22562256
*/
2257-
mark_objexts_empty(obj_exts);
2257+
mark_obj_codetag_empty(obj_exts);
22582258
if (allow_spin)
22592259
kfree(obj_exts);
22602260
else
@@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
23122312

23132313
#else /* CONFIG_SLAB_OBJ_EXT */
23142314

2315+
static inline void mark_obj_codetag_empty(const void *obj)
2316+
{
2317+
}
2318+
23152319
static inline void init_slab_obj_exts(struct slab *slab)
23162320
{
23172321
}
@@ -2783,6 +2787,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
27832787

27842788
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
27852789
{
2790+
/*
2791+
* If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
2792+
* corresponding extension is NULL and alloc_tag_sub() will throw a
2793+
* warning, therefore replace NULL with CODETAG_EMPTY to indicate
2794+
* that the extension for this sheaf is expected to be NULL.
2795+
*/
2796+
if (s->flags & SLAB_KMALLOC)
2797+
mark_obj_codetag_empty(sheaf);
2798+
27862799
kfree(sheaf);
27872800

27882801
stat(s, SHEAF_FREE);

0 commit comments

Comments
 (0)