Skip to content

Commit fab0694

Browse files
hygonitehcaster
authored andcommitted
mm/slab: move [__]ksize and slab_ksize() to mm/slub.c
To access SLUB's internal implementation details beyond cache flags in ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c. [vbabka@suse.cz: also make __ksize() static and move its kerneldoc to ksize() ] Signed-off-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20260113061845.159790-9-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent 70089d0 commit fab0694

File tree

4 files changed

+86
-89
lines changed

4 files changed

+86
-89
lines changed

include/linux/slab.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -509,7 +509,6 @@ void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size
509509
void kfree(const void *objp);
510510
void kfree_nolock(const void *objp);
511511
void kfree_sensitive(const void *objp);
512-
size_t __ksize(const void *objp);
513512

514513
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
515514
DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))

mm/slab.h

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -661,33 +661,6 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
661661

662662
void kvfree_rcu_cb(struct rcu_head *head);
663663

664-
size_t __ksize(const void *objp);
665-
666-
static inline size_t slab_ksize(const struct kmem_cache *s)
667-
{
668-
#ifdef CONFIG_SLUB_DEBUG
669-
/*
670-
* Debugging requires use of the padding between object
671-
* and whatever may come after it.
672-
*/
673-
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
674-
return s->object_size;
675-
#endif
676-
if (s->flags & SLAB_KASAN)
677-
return s->object_size;
678-
/*
679-
* If we have the need to store the freelist pointer
680-
* back there or track user information then we can
681-
* only use the space before that information.
682-
*/
683-
if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
684-
return s->inuse;
685-
/*
686-
* Else we can use all the padding etc for the allocation
687-
*/
688-
return s->size;
689-
}
690-
691664
static inline unsigned int large_kmalloc_order(const struct page *page)
692665
{
693666
return page[1].flags.f & 0xff;

mm/slab_common.c

Lines changed: 0 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1021,43 +1021,6 @@ void __init create_kmalloc_caches(void)
10211021
0, SLAB_NO_MERGE, NULL);
10221022
}
10231023

1024-
/**
1025-
* __ksize -- Report full size of underlying allocation
1026-
* @object: pointer to the object
1027-
*
1028-
* This should only be used internally to query the true size of allocations.
1029-
* It is not meant to be a way to discover the usable size of an allocation
1030-
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
1031-
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
1032-
* and/or FORTIFY_SOURCE.
1033-
*
1034-
* Return: size of the actual memory used by @object in bytes
1035-
*/
1036-
size_t __ksize(const void *object)
1037-
{
1038-
const struct page *page;
1039-
const struct slab *slab;
1040-
1041-
if (unlikely(object == ZERO_SIZE_PTR))
1042-
return 0;
1043-
1044-
page = virt_to_page(object);
1045-
1046-
if (unlikely(PageLargeKmalloc(page)))
1047-
return large_kmalloc_size(page);
1048-
1049-
slab = page_slab(page);
1050-
/* Delete this after we're sure there are no users */
1051-
if (WARN_ON(!slab))
1052-
return page_size(page);
1053-
1054-
#ifdef CONFIG_SLUB_DEBUG
1055-
skip_orig_size_check(slab->slab_cache, object);
1056-
#endif
1057-
1058-
return slab_ksize(slab->slab_cache);
1059-
}
1060-
10611024
gfp_t kmalloc_fix_flags(gfp_t flags)
10621025
{
10631026
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1273,30 +1236,6 @@ void kfree_sensitive(const void *p)
12731236
}
12741237
EXPORT_SYMBOL(kfree_sensitive);
12751238

1276-
size_t ksize(const void *objp)
1277-
{
1278-
/*
1279-
* We need to first check that the pointer to the object is valid.
1280-
* The KASAN report printed from ksize() is more useful, then when
1281-
* it's printed later when the behaviour could be undefined due to
1282-
* a potential use-after-free or double-free.
1283-
*
1284-
* We use kasan_check_byte(), which is supported for the hardware
1285-
* tag-based KASAN mode, unlike kasan_check_read/write().
1286-
*
1287-
* If the pointed to memory is invalid, we return 0 to avoid users of
1288-
* ksize() writing to and potentially corrupting the memory region.
1289-
*
1290-
* We want to perform the check before __ksize(), to avoid potentially
1291-
* crashing in __ksize() due to accessing invalid metadata.
1292-
*/
1293-
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1294-
return 0;
1295-
1296-
return kfence_ksize(objp) ?: __ksize(objp);
1297-
}
1298-
EXPORT_SYMBOL(ksize);
1299-
13001239
#ifdef CONFIG_BPF_SYSCALL
13011240
#include <linux/btf.h>
13021241

mm/slub.c

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7028,6 +7028,92 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
70287028
}
70297029
EXPORT_SYMBOL(kmem_cache_free);
70307030

7031+
static inline size_t slab_ksize(const struct kmem_cache *s)
7032+
{
7033+
#ifdef CONFIG_SLUB_DEBUG
7034+
/*
7035+
* Debugging requires use of the padding between object
7036+
* and whatever may come after it.
7037+
*/
7038+
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
7039+
return s->object_size;
7040+
#endif
7041+
if (s->flags & SLAB_KASAN)
7042+
return s->object_size;
7043+
/*
7044+
* If we have the need to store the freelist pointer
7045+
* back there or track user information then we can
7046+
* only use the space before that information.
7047+
*/
7048+
if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
7049+
return s->inuse;
7050+
/*
7051+
* Else we can use all the padding etc for the allocation
7052+
*/
7053+
return s->size;
7054+
}
7055+
7056+
static size_t __ksize(const void *object)
7057+
{
7058+
const struct page *page;
7059+
const struct slab *slab;
7060+
7061+
if (unlikely(object == ZERO_SIZE_PTR))
7062+
return 0;
7063+
7064+
page = virt_to_page(object);
7065+
7066+
if (unlikely(PageLargeKmalloc(page)))
7067+
return large_kmalloc_size(page);
7068+
7069+
slab = page_slab(page);
7070+
/* Delete this after we're sure there are no users */
7071+
if (WARN_ON(!slab))
7072+
return page_size(page);
7073+
7074+
#ifdef CONFIG_SLUB_DEBUG
7075+
skip_orig_size_check(slab->slab_cache, object);
7076+
#endif
7077+
7078+
return slab_ksize(slab->slab_cache);
7079+
}
7080+
7081+
/**
7082+
* ksize -- Report full size of underlying allocation
7083+
* @objp: pointer to the object
7084+
*
7085+
* This should only be used internally to query the true size of allocations.
7086+
* It is not meant to be a way to discover the usable size of an allocation
7087+
* after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
7088+
* the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
7089+
* and/or FORTIFY_SOURCE.
7090+
*
7091+
* Return: size of the actual memory used by @objp in bytes
7092+
*/
7093+
size_t ksize(const void *objp)
7094+
{
7095+
/*
7096+
* We need to first check that the pointer to the object is valid.
7097+
* The KASAN report printed from ksize() is more useful, then when
7098+
* it's printed later when the behaviour could be undefined due to
7099+
* a potential use-after-free or double-free.
7100+
*
7101+
* We use kasan_check_byte(), which is supported for the hardware
7102+
* tag-based KASAN mode, unlike kasan_check_read/write().
7103+
*
7104+
* If the pointed to memory is invalid, we return 0 to avoid users of
7105+
* ksize() writing to and potentially corrupting the memory region.
7106+
*
7107+
* We want to perform the check before __ksize(), to avoid potentially
7108+
* crashing in __ksize() due to accessing invalid metadata.
7109+
*/
7110+
if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
7111+
return 0;
7112+
7113+
return kfence_ksize(objp) ?: __ksize(objp);
7114+
}
7115+
EXPORT_SYMBOL(ksize);
7116+
70317117
static void free_large_kmalloc(struct page *page, void *object)
70327118
{
70337119
unsigned int order = compound_order(page);

0 commit comments

Comments
 (0)