Skip to content
This repository was archived by the owner on Dec 20, 2023. It is now read-only.

Commit d56791b

Browse files
Roman Bobnievpenberg
authored andcommitted
slub: proper kmemleak tracking if CONFIG_SLUB_DEBUG disabled
Move all kmemleak calls into hook functions, and make it so that all hooks (both inside and outside of #ifdef CONFIG_SLUB_DEBUG) call the appropriate kmemleak routines. This allows for kmemleak to be configured independently of slub debug features. It also fixes a bug where kmemleak was only partially enabled in some configurations. Acked-by: Catalin Marinas <[email protected]> Acked-by: Christoph Lameter <[email protected]> Signed-off-by: Roman Bobniev <[email protected]> Signed-off-by: Tim Bird <[email protected]> Signed-off-by: Pekka Enberg <[email protected]>
1 parent 6e46645 commit d56791b

File tree

1 file changed

+31
-4
lines changed

1 file changed

+31
-4
lines changed

mm/slub.c

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -928,6 +928,16 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
928928
* Hooks for other subsystems that check memory allocations. In a typical
929929
* production configuration these hooks all should produce no code at all.
930930
*/
931+
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
932+
{
933+
kmemleak_alloc(ptr, size, 1, flags);
934+
}
935+
936+
static inline void kfree_hook(const void *x)
937+
{
938+
kmemleak_free(x);
939+
}
940+
931941
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
932942
{
933943
flags &= gfp_allowed_mask;
@@ -1253,13 +1263,30 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
12531263
static inline void dec_slabs_node(struct kmem_cache *s, int node,
12541264
int objects) {}
12551265

1266+
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1267+
{
1268+
kmemleak_alloc(ptr, size, 1, flags);
1269+
}
1270+
1271+
static inline void kfree_hook(const void *x)
1272+
{
1273+
kmemleak_free(x);
1274+
}
1275+
12561276
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
12571277
{ return 0; }
12581278

12591279
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1260-
void *object) {}
1280+
void *object)
1281+
{
1282+
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
1283+
flags & gfp_allowed_mask);
1284+
}
12611285

1262-
static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1286+
static inline void slab_free_hook(struct kmem_cache *s, void *x)
1287+
{
1288+
kmemleak_free_recursive(x, s->flags);
1289+
}
12631290

12641291
#endif /* CONFIG_SLUB_DEBUG */
12651292

@@ -3265,7 +3292,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
32653292
if (page)
32663293
ptr = page_address(page);
32673294

3268-
kmemleak_alloc(ptr, size, 1, flags);
3295+
kmalloc_large_node_hook(ptr, size, flags);
32693296
return ptr;
32703297
}
32713298

@@ -3365,7 +3392,7 @@ void kfree(const void *x)
33653392
page = virt_to_head_page(x);
33663393
if (unlikely(!PageSlab(page))) {
33673394
BUG_ON(!PageCompound(page));
3368-
kmemleak_free(x);
3395+
kfree_hook(x);
33693396
__free_memcg_kmem_pages(page, compound_order(page));
33703397
return;
33713398
}

0 commit comments

Comments
 (0)