diff options
-rw-r--r-- | mm/kmemleak.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 31f01c5011e5..474379ea3cff 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -218,7 +218,8 @@ static int kmemleak_stack_scan = 1; static DEFINE_MUTEX(scan_mutex); /* setting kmemleak=on, will set this var, skipping the disable */ static int kmemleak_skip_disable; - +/* If there are leaks that can be reported */ +static bool kmemleak_found_leaks; /* * Early object allocation/freeing logging. Kmemleak is initialized after the @@ -1382,9 +1383,12 @@ static void kmemleak_scan(void) } rcu_read_unlock(); - if (new_leaks) + if (new_leaks) { + kmemleak_found_leaks = true; + pr_info("%d new suspected memory leaks (see " "/sys/kernel/debug/kmemleak)\n", new_leaks); + } } @@ -1592,6 +1596,8 @@ static void kmemleak_clear(void) spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); + + kmemleak_found_leaks = false; } /* @@ -1685,12 +1691,11 @@ static const struct file_operations kmemleak_fops = { static void kmemleak_do_cleanup(struct work_struct *work) { struct kmemleak_object *object; - bool cleanup = scan_thread == NULL; mutex_lock(&scan_mutex); stop_scan_thread(); - if (cleanup) { + if (!kmemleak_found_leaks) { rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) delete_object_full(object->pointer); |