@@ -7028,6 +7028,92 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
70287028}
70297029EXPORT_SYMBOL (kmem_cache_free );
70307030
7031+ static inline size_t slab_ksize (const struct kmem_cache * s )
7032+ {
7033+ #ifdef CONFIG_SLUB_DEBUG
7034+ /*
7035+ * Debugging requires use of the padding between object
7036+ * and whatever may come after it.
7037+ */
7038+ if (s -> flags & (SLAB_RED_ZONE | SLAB_POISON ))
7039+ return s -> object_size ;
7040+ #endif
7041+ if (s -> flags & SLAB_KASAN )
7042+ return s -> object_size ;
7043+ /*
7044+ * If we have the need to store the freelist pointer
7045+ * back there or track user information then we can
7046+ * only use the space before that information.
7047+ */
7048+ if (s -> flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER ))
7049+ return s -> inuse ;
7050+ /*
7051+ * Else we can use all the padding etc for the allocation
7052+ */
7053+ return s -> size ;
7054+ }
7055+
7056+ static size_t __ksize (const void * object )
7057+ {
7058+ const struct page * page ;
7059+ const struct slab * slab ;
7060+
7061+ if (unlikely (object == ZERO_SIZE_PTR ))
7062+ return 0 ;
7063+
7064+ page = virt_to_page (object );
7065+
7066+ if (unlikely (PageLargeKmalloc (page )))
7067+ return large_kmalloc_size (page );
7068+
7069+ slab = page_slab (page );
7070+ /* Delete this after we're sure there are no users */
7071+ if (WARN_ON (!slab ))
7072+ return page_size (page );
7073+
7074+ #ifdef CONFIG_SLUB_DEBUG
7075+ skip_orig_size_check (slab -> slab_cache , object );
7076+ #endif
7077+
7078+ return slab_ksize (slab -> slab_cache );
7079+ }
7080+
7081+ /**
7082+ * ksize -- Report full size of underlying allocation
7083+ * @objp: pointer to the object
7084+ *
7085+ * This should only be used internally to query the true size of allocations.
7086+ * It is not meant to be a way to discover the usable size of an allocation
7087+ * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
7088+ * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
7089+ * and/or FORTIFY_SOURCE.
7090+ *
7091+ * Return: size of the actual memory used by @objp in bytes
7092+ */
7093+ size_t ksize (const void * objp )
7094+ {
7095+ /*
7096+ * We need to first check that the pointer to the object is valid.
7097+ * The KASAN report printed from ksize() is more useful, then when
7098+ * it's printed later when the behaviour could be undefined due to
7099+ * a potential use-after-free or double-free.
7100+ *
7101+ * We use kasan_check_byte(), which is supported for the hardware
7102+ * tag-based KASAN mode, unlike kasan_check_read/write().
7103+ *
7104+ * If the pointed to memory is invalid, we return 0 to avoid users of
7105+ * ksize() writing to and potentially corrupting the memory region.
7106+ *
7107+ * We want to perform the check before __ksize(), to avoid potentially
7108+ * crashing in __ksize() due to accessing invalid metadata.
7109+ */
7110+ if (unlikely (ZERO_OR_NULL_PTR (objp )) || !kasan_check_byte (objp ))
7111+ return 0 ;
7112+
7113+ return kfence_ksize (objp ) ?: __ksize (objp );
7114+ }
7115+ EXPORT_SYMBOL (ksize );
7116+
70317117static void free_large_kmalloc (struct page * page , void * object )
70327118{
70337119 unsigned int order = compound_order (page );
0 commit comments