diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-02-06 08:35:30 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-02-06 08:35:30 +0100 |
commit | d38e781ea035456c742605fb21d0dd3c755b7b0b (patch) | |
tree | 53bda2561a47f747ab4caf1119993cc181780db7 /lib | |
parent | f5b3c341a46ec55d93332ee5c254a278af902ffe (diff) | |
parent | 4ec5183ec48656cec489c49f989c508b68b518e3 (diff) | |
download | linux-rpi-d38e781ea035456c742605fb21d0dd3c755b7b0b.tar.gz linux-rpi-d38e781ea035456c742605fb21d0dd3c755b7b0b.tar.bz2 linux-rpi-d38e781ea035456c742605fb21d0dd3c755b7b0b.zip |
Merge 6.2-rc7 into char-misc-next
We need the char-misc driver fixes in here as other patches depend on
them.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 14 | ||||
-rw-r--r-- | lib/Kconfig.kcsan | 2 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 31 | ||||
-rw-r--r-- | lib/kunit/assert.c | 40 | ||||
-rw-r--r-- | lib/kunit/test.c | 1 | ||||
-rw-r--r-- | lib/maple_tree.c | 22 | ||||
-rw-r--r-- | lib/memcpy_kunit.c | 2 | ||||
-rw-r--r-- | lib/nlattr.c | 3 | ||||
-rw-r--r-- | lib/test_maple_tree.c | 89 |
9 files changed, 175 insertions, 29 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 881c3f84e88a..02ee440f7be3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -754,6 +754,7 @@ config DEBUG_KMEMLEAK select KALLSYMS select CRC32 select STACKDEPOT + select STACKDEPOT_ALWAYS_INIT if !DEBUG_KMEMLEAK_DEFAULT_OFF help Say Y here if you want to enable the memory leak detector. The memory allocation/freeing is traced in a way @@ -1207,7 +1208,7 @@ config SCHED_DEBUG depends on DEBUG_KERNEL && PROC_FS default y help - If you say Y here, the /proc/sched_debug file will be provided + If you say Y here, the /sys/kernel/debug/sched file will be provided that can help debug the scheduler. The runtime overhead of this option is minimal. @@ -1917,7 +1918,7 @@ config FUNCTION_ERROR_INJECTION help Add fault injections into various functions that are annotated with ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return - value of theses functions. This is useful to test error paths of code. + value of these functions. This is useful to test error paths of code. If unsure, say N @@ -2566,6 +2567,15 @@ config MEMCPY_KUNIT_TEST If unsure, say N. +config MEMCPY_SLOW_KUNIT_TEST + bool "Include exhaustive memcpy tests" + depends on MEMCPY_KUNIT_TEST + default y + help + Some memcpy tests are quite exhaustive in checking for overlaps + and bit ranges. These can be very slow, so they are split out + as a separate config, in case they need to be disabled. + config IS_SIGNED_TYPE_KUNIT_TEST tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index 375575a5a0e3..4dedd61e5192 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -194,7 +194,7 @@ config KCSAN_WEAK_MEMORY Enable support for modeling a subset of weak memory, which allows detecting a subset of data races due to missing memory barriers. - Depends on KCSAN_STRICT, because the options strenghtening certain + Depends on KCSAN_STRICT, because the options strengthening certain plain accesses by default (depending on !KCSAN_STRICT) reduce the ability to detect any data races invoving reordered accesses, in particular reordered writes. diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 9555b68bb774..1dcca8f2e194 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, return 0; } EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); + +int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + + /* Otherwise do it the slow way */ + raw_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + raw_spin_unlock(lock); + return 0; +} +EXPORT_SYMBOL(_atomic_dec_and_raw_lock); + +int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, + unsigned long *flags) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + + /* Otherwise do it the slow way */ + raw_spin_lock_irqsave(lock, *flags); + if (atomic_dec_and_test(atomic)) + return 1; + raw_spin_unlock_irqrestore(lock, *flags); + return 0; +} +EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave); diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c index f5b50babe38d..05a09652f5a1 100644 --- a/lib/kunit/assert.c +++ b/lib/kunit/assert.c @@ -241,24 +241,34 @@ void kunit_mem_assert_format(const struct kunit_assert *assert, mem_assert = container_of(assert, struct kunit_mem_assert, assert); - string_stream_add(stream, - KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n", - mem_assert->text->left_text, - mem_assert->text->operation, - mem_assert->text->right_text); + if (!mem_assert->left_value) { + string_stream_add(stream, + KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n", + mem_assert->text->left_text); + } else if (!mem_assert->right_value) { + string_stream_add(stream, + KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n", + mem_assert->text->right_text); + } else { + string_stream_add(stream, + KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n", + mem_assert->text->left_text, + mem_assert->text->operation, + mem_assert->text->right_text); - string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n", - mem_assert->text->left_text); - kunit_assert_hexdump(stream, mem_assert->left_value, - mem_assert->right_value, mem_assert->size); + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n", + mem_assert->text->left_text); + kunit_assert_hexdump(stream, mem_assert->left_value, + mem_assert->right_value, mem_assert->size); - string_stream_add(stream, "\n"); + string_stream_add(stream, "\n"); - string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n", - mem_assert->text->right_text); - kunit_assert_hexdump(stream, mem_assert->right_value, - mem_assert->left_value, mem_assert->size); + string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n", + mem_assert->text->right_text); + kunit_assert_hexdump(stream, mem_assert->right_value, + mem_assert->left_value, mem_assert->size); - kunit_assert_print_msg(message, stream); + kunit_assert_print_msg(message, stream); + } } EXPORT_SYMBOL_GPL(kunit_mem_assert_format); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index c9ebf975e56b..890ba5b3a981 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -21,6 +21,7 @@ #include "try-catch-impl.h" DEFINE_STATIC_KEY_FALSE(kunit_running); +EXPORT_SYMBOL_GPL(kunit_running); #if IS_BUILTIN(CONFIG_KUNIT) /* diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 26e2045d3cda..5a976393c9ae 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -670,12 +670,13 @@ static inline unsigned long mte_pivot(const struct maple_enode *mn, unsigned char piv) { struct maple_node *node = mte_to_node(mn); + enum maple_type type = mte_node_type(mn); - if (piv >= mt_pivots[piv]) { + if (piv >= mt_pivots[type]) { WARN_ON(1); return 0; } - switch (mte_node_type(mn)) { + switch (type) { case maple_arange_64: return node->ma64.pivot[piv]; case maple_range_64: @@ -4887,7 +4888,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) unsigned long *pivots, *gaps; void __rcu **slots; unsigned long gap = 0; - unsigned long max, min, index; + unsigned long max, min; unsigned char offset; if (unlikely(mas_is_err(mas))) @@ -4909,8 +4910,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) min = mas_safe_min(mas, pivots, --offset); max = mas_safe_pivot(mas, pivots, offset, type); - index = mas->index; - while (index <= max) { + while (mas->index <= max) { gap = 0; if (gaps) gap = gaps[offset]; @@ -4941,10 +4941,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) min = mas_safe_min(mas, pivots, offset); } - if (unlikely(index > max)) { - mas_set_err(mas, -EBUSY); - return false; - } + if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) + goto no_space; if (unlikely(ma_is_leaf(type))) { mas->offset = offset; @@ -4961,9 +4959,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) return false; ascend: - if (mte_is_root(mas->node)) - mas_set_err(mas, -EBUSY); + if (!mte_is_root(mas->node)) + return false; +no_space: + mas_set_err(mas, -EBUSY); return false; } diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index 89128551448d..887926f04731 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -309,6 +309,8 @@ static void set_random_nonzero(struct kunit *test, u8 *byte) static void init_large(struct kunit *test) { + if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) + kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y"); /* Get many bit patterns. */ get_random_bytes(large_src, ARRAY_SIZE(large_src)); diff --git a/lib/nlattr.c b/lib/nlattr.c index 9055e8b4d144..489e15bde5c1 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> +#include <linux/nospec.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> @@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, if (type <= 0 || type > maxtype) return 0; + type = array_index_nospec(type, maxtype + 1); pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); @@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, } continue; } + type = array_index_nospec(type, maxtype + 1); if (policy) { int err = validate_nla(nla, maxtype, policy, validate, extack, depth); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 497fc93ccf9e..ec847bf4dcb4 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -2517,6 +2517,91 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt) mt_set_non_kernel(0); } +static noinline void check_empty_area_window(struct maple_tree *mt) +{ + unsigned long i, nr_entries = 20; + MA_STATE(mas, mt, 0, 0); + + for (i = 1; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 9, + xa_mk_value(i), GFP_KERNEL); + + /* Create another hole besides the one at 0 */ + mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL); + + /* Check lower bounds that don't fit */ + rcu_read_lock(); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY); + + /* Check lower bound that does fit */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0); + MT_BUG_ON(mt, mas.index != 5); + MT_BUG_ON(mt, mas.last != 9); + rcu_read_unlock(); + + /* Check one gap that doesn't fit and one that does */ + rcu_read_lock(); + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0); + MT_BUG_ON(mt, mas.index != 161); + MT_BUG_ON(mt, mas.last != 169); + + /* Check one gap that does fit above the min */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0); + MT_BUG_ON(mt, mas.index != 216); + MT_BUG_ON(mt, mas.last != 218); + + /* Check size that doesn't fit any gap */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY); + + /* + * Check size that doesn't fit the lower end of the window but + * does fit the gap + */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY); + + /* + * Check size that doesn't fit the upper end of the window but + * does fit the gap + */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY); + + /* Check mas_empty_area forward */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 8); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 3); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY); + + mas_reset(&mas); + mas_empty_area(&mas, 100, 165, 3); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY); + rcu_read_unlock(); +} + static DEFINE_MTREE(tree); static int maple_tree_seed(void) { @@ -2765,6 +2850,10 @@ static int maple_tree_seed(void) check_bnode_min_spanning(&tree); mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_empty_area_window(&tree); + mtree_destroy(&tree); + #if defined(BENCH) skip: #endif |