summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorHyeonggon Yoo <42.hyeyoo@gmail.com>2022-08-17 19:18:24 +0900
committerVlastimil Babka <vbabka@suse.cz>2022-09-01 11:44:26 +0200
commit2c1d697fb8ba6d2d44f914d4268ae1ccdf025f1b (patch)
treea2e3b945b6a96e59c319083739023ba52e932fcb /mm/slab_common.c
parent11e9734bcb6a7361943f993eba4e97f5812120d8 (diff)
downloadlinux-rpi-2c1d697fb8ba6d2d44f914d4268ae1ccdf025f1b.tar.gz
linux-rpi-2c1d697fb8ba6d2d44f914d4268ae1ccdf025f1b.tar.bz2
linux-rpi-2c1d697fb8ba6d2d44f914d4268ae1ccdf025f1b.zip
mm/slab_common: drop kmem_alloc & avoid dereferencing fields when not using
Drop kmem_alloc event class, and define kmalloc and kmem_cache_alloc using TRACE_EVENT() macro. And then this patch does: - Do not pass pointer to struct kmem_cache to trace_kmalloc. gfp flag is enough to know if it's accounted or not. - Avoid dereferencing s->object_size and s->size when not using kmem_cache_alloc event. - Avoid dereferencing s->name in when not using kmem_cache_free event. - Adjust s->size to SLOB_UNITS(s->size) * SLOB_UNIT in SLOB Cc: Vasily Averin <vasily.averin@linux.dev> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3d7ad992ece1..ad4c36fb697c 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -907,7 +907,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = __kmalloc_large_node(size, flags, node);
- trace_kmalloc(_RET_IP_, ret, NULL, size,
+ trace_kmalloc(_RET_IP_, ret, size,
PAGE_SIZE << get_order(size), flags, node);
return ret;
}
@@ -919,7 +919,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
ret = kasan_kmalloc(s, ret, size, flags);
- trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags, node);
+ trace_kmalloc(_RET_IP_, ret, size, s->size, flags, node);
return ret;
}
@@ -1005,7 +1005,7 @@ void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
size, _RET_IP_);
- trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags, NUMA_NO_NODE);
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
@@ -1017,7 +1017,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
{
void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
- trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags, node);
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
@@ -1072,8 +1072,8 @@ void *kmalloc_large(size_t size, gfp_t flags)
{
void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
- trace_kmalloc(_RET_IP_, ret, NULL, size,
- PAGE_SIZE << get_order(size), flags, NUMA_NO_NODE);
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
+ flags, NUMA_NO_NODE);
return ret;
}
EXPORT_SYMBOL(kmalloc_large);
@@ -1082,8 +1082,8 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
void *ret = __kmalloc_large_node(size, flags, node);
- trace_kmalloc(_RET_IP_, ret, NULL, size,
- PAGE_SIZE << get_order(size), flags, node);
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
+ flags, node);
return ret;
}
EXPORT_SYMBOL(kmalloc_large_node);