summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-25 10:24:04 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-25 10:24:04 -0700
commit2e17ce1106e04a7f3a83796ec623881487f75dd3 (patch)
tree77fc467d4ea8e17a4cb9d2b9dad6e6066ce6dea8 /lib
parentcaa28984163cb63ea0be4cb8dbf05defdc7303f9 (diff)
parente001897da62eb543ef79bf173f145c2c9677f66c (diff)
downloadlinux-rpi-2e17ce1106e04a7f3a83796ec623881487f75dd3.tar.gz
linux-rpi-2e17ce1106e04a7f3a83796ec623881487f75dd3.tar.bz2
linux-rpi-2e17ce1106e04a7f3a83796ec623881487f75dd3.zip
Merge tag 'slab-for-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - Conversion of slub_debug stack traces to stackdepot, allowing more useful debugfs-based inspection for e.g. memory leak debugging. Allocation and free debugfs info now includes full traces and is sorted by the unique trace frequency. The stackdepot conversion was already attempted last year but reverted by ae14c63a9f20. The memory overhead (while not actually enabled on boot) has been meanwhile solved by making the large stackdepot allocation dynamic. The xfstest issues haven't been reproduced on current kernel locally nor in -next, so the slab cache layout changes that originally made that bug manifest were probably not the root cause. - Refactoring of dma-kmalloc caches creation. - Trivial cleanups such as removal of unused parameters, fixes and clarifications of comments. - Hyeonggon Yoo joins as a reviewer. * tag 'slab-for-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: MAINTAINERS: add myself as reviewer for slab mm/slub: remove unused kmem_cache_order_objects max mm: slab: fix comment for __assume_kmalloc_alignment mm: slab: fix comment for ARCH_KMALLOC_MINALIGN mm/slub: remove unneeded return value of slab_pad_check mm/slab_common: move dma-kmalloc caches creation into new_kmalloc_cache() mm/slub: remove meaningless node check in ___slab_alloc() mm/slub: remove duplicate flag in allocate_slab() mm/slub: remove unused parameter in setup_object*() mm/slab.c: fix comments slab, documentation: add description of debugfs files for SLUB caches mm/slub: sort debugfs output by frequency of stack traces mm/slub: distinguish and print stack traces in debugfs files mm/slub: use stackdepot to save stack trace in objects mm/slub: move struct track init out of set_track() lib/stackdepot: allow requesting early initialization dynamically mm/slub, kunit: Make slub_kunit unaffected by user specified flags mm/slab: remove some unused functions
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/slub_kunit.c10
-rw-r--r--lib/stackdepot.c67
3 files changed, 51 insertions, 27 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a30d5279efda..388fb808ff18 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -710,6 +710,7 @@ config DEBUG_SLAB
config SLUB_DEBUG_ON
bool "SLUB debugging on by default"
depends on SLUB && SLUB_DEBUG
+ select STACKDEPOT_ALWAYS_INIT if STACKTRACE_SUPPORT
default n
help
Boot with debugging on by default. SLUB boots by default with
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 8662dc6cb509..7a0564d7cb7a 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -12,7 +12,7 @@ static int slab_errors;
static void test_clobber_zone(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
- SLAB_RED_ZONE, NULL);
+ SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
@@ -30,7 +30,7 @@ static void test_clobber_zone(struct kunit *test)
static void test_next_pointer(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
unsigned long tmp;
unsigned long *ptr_addr;
@@ -75,7 +75,7 @@ static void test_next_pointer(struct kunit *test)
static void test_first_word(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -90,7 +90,7 @@ static void test_first_word(struct kunit *test)
static void test_clobber_50th_byte(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
- SLAB_POISON, NULL);
+ SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -106,7 +106,7 @@ static void test_clobber_50th_byte(struct kunit *test)
static void test_clobber_redzone_free(struct kunit *test)
{
struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
- SLAB_RED_ZONE, NULL);
+ SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index bf5ba9af0500..5ca0d086ef4a 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -66,6 +66,9 @@ struct stack_record {
unsigned long entries[]; /* Variable-sized array of entries. */
};
+static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_passed __initdata;
+
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
@@ -162,38 +165,58 @@ static int __init is_stack_depot_disabled(char *str)
}
early_param("stack_depot_disable", is_stack_depot_disabled);
-/*
- * __ref because of memblock_alloc(), which will not be actually called after
- * the __init code is gone, because at that point slab_is_available() is true
- */
-__ref int stack_depot_init(void)
+void __init stack_depot_want_early_init(void)
+{
+ /* Too late to request early init now */
+ WARN_ON(__stack_depot_early_init_passed);
+
+ __stack_depot_want_early_init = true;
+}
+
+int __init stack_depot_early_init(void)
+{
+ size_t size;
+
+ /* This is supposed to be called only once, from mm_init() */
+ if (WARN_ON(__stack_depot_early_init_passed))
+ return 0;
+
+ __stack_depot_early_init_passed = true;
+
+ if (!__stack_depot_want_early_init || stack_depot_disable)
+ return 0;
+
+ size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
+ pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n",
+ size);
+ stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
+
+ if (!stack_table) {
+ pr_err("Stack Depot hash table allocation failed, disabling\n");
+ stack_depot_disable = true;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
+ int ret = 0;
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disable && !stack_table) {
- size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
- int i;
-
- if (slab_is_available()) {
- pr_info("Stack Depot allocating hash table with kvmalloc\n");
- stack_table = kvmalloc(size, GFP_KERNEL);
- } else {
- pr_info("Stack Depot allocating hash table with memblock_alloc\n");
- stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
- }
- if (stack_table) {
- for (i = 0; i < STACK_HASH_SIZE; i++)
- stack_table[i] = NULL;
- } else {
+ pr_info("Stack Depot allocating hash table with kvcalloc\n");
+ stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL);
+ if (!stack_table) {
pr_err("Stack Depot hash table allocation failed, disabling\n");
stack_depot_disable = true;
- mutex_unlock(&stack_depot_init_mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
}
}
mutex_unlock(&stack_depot_init_mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(stack_depot_init);