summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2023-07-18 10:16:05 +0200
committerVlastimil Babka <vbabka@suse.cz>2023-08-29 11:23:04 +0200
commit3d053e8060430b86bad0854b7c7f03f15be3a7e5 (patch)
tree83e0b8aad30b8090b2497991b221b6d69407b2b2 /mm/slab.c
parent1662b6c2bb7e7502d6ae4b6aca4116e844a4277c (diff)
parent3c6152940584290668b35fa0800026f6a1ae05fe (diff)
downloadlinux-starfive-3d053e8060430b86bad0854b7c7f03f15be3a7e5.tar.gz
linux-starfive-3d053e8060430b86bad0854b7c7f03f15be3a7e5.tar.bz2
linux-starfive-3d053e8060430b86bad0854b7c7f03f15be3a7e5.zip
Merge branch 'slab/for-6.6/random_kmalloc' into slab/for-next
Merge the new hardening feature to make heap spraying harder, by GONG, Ruiqi. It creates multiple (16) copies of kmalloc caches, reducing the chance of an attacker-controllable allocation site to land in the same slab as e.g. an allocation site with use-after-free vulnerability. The selection of the copy is derived from the allocation site address, including a per-boot random seed. In line with SLAB deprecation, this is a SLUB only feature, incompatible with SLUB_TINY due to the memory overhead of the extra cache copies.
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 88194391d553..9ad3d0f2d1a5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1670,7 +1670,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
if (freelist_size > KMALLOC_MAX_CACHE_SIZE) {
freelist_cache_size = PAGE_SIZE << get_order(freelist_size);
} else {
- freelist_cache = kmalloc_slab(freelist_size, 0u);
+ freelist_cache = kmalloc_slab(freelist_size, 0u, _RET_IP_);
if (!freelist_cache)
continue;
freelist_cache_size = freelist_cache->size;