diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 7 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 31 | ||||
| -rw-r--r-- | lib/Kconfig.kasan | 2 | ||||
| -rw-r--r-- | lib/Kconfig.ubsan | 13 | ||||
| -rw-r--r-- | lib/Makefile | 3 | ||||
| -rw-r--r-- | lib/find_bit.c | 21 | ||||
| -rw-r--r-- | lib/find_bit_benchmark.c | 21 | ||||
| -rw-r--r-- | lib/genalloc.c | 2 | ||||
| -rw-r--r-- | lib/kstrtox.c | 12 | ||||
| -rw-r--r-- | lib/list_debug.c | 8 | ||||
| -rw-r--r-- | lib/lz4/lz4defs.h | 2 | ||||
| -rw-r--r-- | lib/ref_tracker.c | 5 | ||||
| -rw-r--r-- | lib/sbitmap.c | 29 | ||||
| -rw-r--r-- | lib/stackdepot.c | 46 | ||||
| -rw-r--r-- | lib/test_bitmap.c | 37 | ||||
| -rw-r--r-- | lib/test_hash.c | 259 | ||||
| -rw-r--r-- | lib/test_kasan.c | 5 | ||||
| -rw-r--r-- | lib/test_meminit.c | 1 | ||||
| -rw-r--r-- | lib/test_sysctl.c | 22 | ||||
| -rw-r--r-- | lib/test_ubsan.c | 22 | ||||
| -rw-r--r-- | lib/vsprintf.c | 24 |
21 files changed, 336 insertions, 236 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index c20b68ad2bc3..c80fde816a7e 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -65,9 +65,6 @@ config GENERIC_STRNLEN_USER config GENERIC_NET_UTILS bool -config GENERIC_FIND_FIRST_BIT - bool - source "lib/math/Kconfig" config NO_GENERIC_PCI_IOPORT_MAP @@ -673,6 +670,10 @@ config STACKDEPOT bool select STACKTRACE +config STACKDEPOT_ALWAYS_INIT + bool + select STACKDEPOT + config STACK_HASH_ORDER int "stack depot hash size (12 => 4KB, 20 => 1024KB)" range 12 20 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c77fe36bb3d8..14b89aa37c5c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1984,6 +1984,8 @@ config KCOV bool "Code coverage for fuzzing" depends on ARCH_HAS_KCOV depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS + depends on !ARCH_WANTS_NO_INSTR || STACK_VALIDATION || \ + GCC_VERSION >= 120000 || CLANG_VERSION >= 130000 select DEBUG_FS select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC help @@ -2222,12 +2224,11 @@ config TEST_RHASHTABLE If unsure, say N. -config TEST_HASH - tristate "Perform selftest on hash functions" +config TEST_SIPHASH + tristate "Perform selftest on siphash functions" help - Enable this option to test the kernel's integer (<linux/hash.h>), - string (<linux/stringhash.h>), and siphash (<linux/siphash.h>) - hash functions on boot (or module load). + Enable this option to test the kernel's siphash (<linux/siphash.h>) hash + functions on boot (or module load). This is intended to help people writing architecture-specific optimized versions. If unsure, say N. @@ -2371,6 +2372,25 @@ config BITFIELD_KUNIT If unsure, say N. +config HASH_KUNIT_TEST + tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test the kernel's string (<linux/stringhash.h>), and + integer (<linux/hash.h>) hash functions on boot. + + KUnit tests run during boot and output the results to the debug log + in TAP format (https://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + This is intended to help people writing architecture-specific + optimized versions. If unsure, say N. + config RESOURCE_KUNIT_TEST tristate "KUnit test for resource API" depends on KUNIT @@ -2502,6 +2522,7 @@ config TEST_KMOD depends on m depends on NETDEVICES && NET_CORE && INET # for TUN depends on BLOCK + depends on PAGE_SIZE_LESS_THAN_256KB # for BTRFS select TEST_LKM select XFS_FS select TUN diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index cdc842d090db..879757b6dd14 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -38,7 +38,7 @@ menuconfig KASAN CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \ HAVE_ARCH_KASAN_HW_TAGS depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) - select STACKDEPOT + select STACKDEPOT_ALWAYS_INIT help Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index e5372a13511d..236c5cefc4cc 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -112,19 +112,6 @@ config UBSAN_UNREACHABLE This option enables -fsanitize=unreachable which checks for control flow reaching an expected-to-be-unreachable position. -config UBSAN_OBJECT_SIZE - bool "Perform checking for accesses beyond the end of objects" - default UBSAN - # gcc hugely expands stack usage with -fsanitize=object-size - # https://lore.kernel.org/lkml/CAHk-=wjPasyJrDuwDnpHJS2TuQfExwe=px-SzLeN8GFMAQJPmQ@mail.gmail.com/ - depends on !CC_IS_GCC - depends on $(cc-option,-fsanitize=object-size) - help - This option enables -fsanitize=object-size which checks for accesses - beyond the end of objects where the optimizer can determine both the - object being operated on and its size, usually seen with bad downcasts, - or access to struct members from NULL pointers. - config UBSAN_BOOL bool "Perform checking for non-boolean values used as boolean" default UBSAN diff --git a/lib/Makefile b/lib/Makefile index b213a7bbf3fd..300f569c626b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -61,7 +61,8 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o obj-$(CONFIG_TEST_BITOPS) += test_bitops.o CFLAGS_test_bitops.o += -Werror obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o -obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o +obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o +obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o obj-$(CONFIG_TEST_IDA) += test_ida.o obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o CFLAGS_test_kasan.o += -fno-builtin diff --git a/lib/find_bit.c b/lib/find_bit.c index 0f8e2e369b1d..1b8e4b2a9cba 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -89,6 +89,27 @@ unsigned long _find_first_bit(const unsigned long *addr, unsigned long size) EXPORT_SYMBOL(_find_first_bit); #endif +#ifndef find_first_and_bit +/* + * Find the first set bit in two memory regions. + */ +unsigned long _find_first_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + unsigned long size) +{ + unsigned long idx, val; + + for (idx = 0; idx * BITS_PER_LONG < size; idx++) { + val = addr1[idx] & addr2[idx]; + if (val) + return min(idx * BITS_PER_LONG + __ffs(val), size); + } + + return size; +} +EXPORT_SYMBOL(_find_first_and_bit); +#endif + #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 5637c5711db9..db904b57d4b8 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c @@ -49,6 +49,25 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len) return 0; } +static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len) +{ + static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata; + unsigned long i, cnt; + ktime_t time; + + bitmap_copy(cp, bitmap, BITMAP_LEN); + + time = ktime_get(); + for (cnt = i = 0; i < len; cnt++) { + i = find_first_and_bit(cp, bitmap2, len); + __clear_bit(i, cp); + } + time = ktime_get() - time; + pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt); + + return 0; +} + static int __init test_find_next_bit(const void *bitmap, unsigned long len) { unsigned long i, cnt; @@ -129,6 +148,7 @@ static int __init find_bit_test(void) * traverse only part of bitmap to avoid soft lockup. */ test_find_first_bit(bitmap, BITMAP_LEN / 10); + test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2); test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); pr_err("\nStart testing find_bit() with sparse bitmap\n"); @@ -145,6 +165,7 @@ static int __init find_bit_test(void) test_find_next_zero_bit(bitmap, BITMAP_LEN); test_find_last_bit(bitmap, BITMAP_LEN); test_find_first_bit(bitmap, BITMAP_LEN); + test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN); test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); /* diff --git a/lib/genalloc.c b/lib/genalloc.c index 9a57257988c7..00fc50d0a640 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool) list_del(&chunk->next_chunk); end_bit = chunk_size(chunk) >> order; - bit = find_next_bit(chunk->bits, end_bit, 0); + bit = find_first_bit(chunk->bits, end_bit); BUG_ON(bit < end_bit); vfree(chunk); diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 059b8b00dc53..886510d248e5 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -22,6 +22,7 @@ #include "kstrtox.h" +noinline const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) { if (*base == 0) { @@ -47,6 +48,7 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) * * Don't you dare use this function. */ +noinline unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p, size_t max_chars) { @@ -85,6 +87,7 @@ unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned lon return rv; } +noinline unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p) { return _parse_integer_limit(s, base, p, INT_MAX); @@ -125,6 +128,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtoull(). Return code must be checked. */ +noinline int kstrtoull(const char *s, unsigned int base, unsigned long long *res) { if (s[0] == '+') @@ -148,6 +152,7 @@ EXPORT_SYMBOL(kstrtoull); * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtoll(). Return code must be checked. */ +noinline int kstrtoll(const char *s, unsigned int base, long long *res) { unsigned long long tmp; @@ -219,6 +224,7 @@ EXPORT_SYMBOL(_kstrtol); * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtoul(). Return code must be checked. */ +noinline int kstrtouint(const char *s, unsigned int base, unsigned int *res) { unsigned long long tmp; @@ -249,6 +255,7 @@ EXPORT_SYMBOL(kstrtouint); * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtol(). Return code must be checked. */ +noinline int kstrtoint(const char *s, unsigned int base, int *res) { long long tmp; @@ -264,6 +271,7 @@ int kstrtoint(const char *s, unsigned int base, int *res) } EXPORT_SYMBOL(kstrtoint); +noinline int kstrtou16(const char *s, unsigned int base, u16 *res) { unsigned long long tmp; @@ -279,6 +287,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res) } EXPORT_SYMBOL(kstrtou16); +noinline int kstrtos16(const char *s, unsigned int base, s16 *res) { long long tmp; @@ -294,6 +303,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res) } EXPORT_SYMBOL(kstrtos16); +noinline int kstrtou8(const char *s, unsigned int base, u8 *res) { unsigned long long tmp; @@ -309,6 +319,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res) } EXPORT_SYMBOL(kstrtou8); +noinline int kstrtos8(const char *s, unsigned int base, s8 *res) { long long tmp; @@ -333,6 +344,7 @@ EXPORT_SYMBOL(kstrtos8); * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value * pointed to by res is updated upon finding a match. */ +noinline int kstrtobool(const char *s, bool *res) { if (!s) diff --git a/lib/list_debug.c b/lib/list_debug.c index 5d5424b51b74..9daa3fb9d1cd 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -49,11 +49,11 @@ bool __list_del_entry_valid(struct list_head *entry) "list_del corruption, %px->prev is LIST_POISON2 (%px)\n", entry, LIST_POISON2) || CHECK_DATA_CORRUPTION(prev->next != entry, - "list_del corruption. prev->next should be %px, but was %px\n", - entry, prev->next) || + "list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n", + entry, prev->next, prev) || CHECK_DATA_CORRUPTION(next->prev != entry, - "list_del corruption. next->prev should be %px, but was %px\n", - entry, next->prev)) + "list_del corruption. next->prev should be %px, but was %px. (next=%px)\n", + entry, next->prev, next)) return false; return true; diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index 673bd206aa98..330aa539b46e 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h @@ -36,6 +36,8 @@ */ #include <asm/unaligned.h> + +#include <linux/bitops.h> #include <linux/string.h> /* memset, memcpy */ #define FORCE_INLINE __always_inline diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index 0ae2e66dcf0f..a6789c0c626b 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -69,9 +69,12 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir, unsigned long entries[REF_TRACKER_STACK_ENTRIES]; struct ref_tracker *tracker; unsigned int nr_entries; + gfp_t gfp_mask = gfp; unsigned long flags; - *trackerp = tracker = kzalloc(sizeof(*tracker), gfp | __GFP_NOFAIL); + if (gfp & __GFP_DIRECT_RECLAIM) + gfp_mask |= __GFP_NOFAIL; + *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); if (unlikely(!tracker)) { pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); refcount_inc(&dir->untracked); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 2709ab825499..09d293c30fd2 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -457,10 +457,9 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, } EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); -static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, - unsigned int depth) +static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, + unsigned int wake_batch) { - unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); int i; if (sbq->wake_batch != wake_batch) { @@ -476,6 +475,30 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, } } +static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, + unsigned int depth) +{ + unsigned int wake_batch; + + wake_batch = sbq_calc_wake_batch(sbq, depth); + __sbitmap_queue_update_wake_batch(sbq, wake_batch); +} + +void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, + unsigned int users) +{ + unsigned int wake_batch; + unsigned int min_batch; + unsigned int depth = (sbq->sb.depth + users - 1) / users; + + min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1; + + wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, + min_batch, SBQ_WAKE_BATCH); + __sbitmap_queue_update_wake_batch(sbq, wake_batch); +} +EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch); + void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) { sbitmap_queue_update_wake_batch(sbq, depth); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index b437ae79aca1..bf5ba9af0500 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -23,6 +23,7 @@ #include <linux/jhash.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <linux/percpu.h> #include <linux/printk.h> #include <linux/slab.h> @@ -161,18 +162,40 @@ static int __init is_stack_depot_disabled(char *str) } early_param("stack_depot_disable", is_stack_depot_disabled); -int __init stack_depot_init(void) +/* + * __ref because of memblock_alloc(), which will not be actually called after + * the __init code is gone, because at that point slab_is_available() is true + */ +__ref int stack_depot_init(void) { - if (!stack_depot_disable) { + static DEFINE_MUTEX(stack_depot_init_mutex); + + mutex_lock(&stack_depot_init_mutex); + if (!stack_depot_disable && !stack_table) { size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); int i; - stack_table = memblock_alloc(size, size); - for (i = 0; i < STACK_HASH_SIZE; i++) - stack_table[i] = NULL; + if (slab_is_available()) { + pr_info("Stack Depot allocating hash table with kvmalloc\n"); + stack_table = kvmalloc(size, GFP_KERNEL); + } else { + pr_info("Stack Depot allocating hash table with memblock_alloc\n"); + stack_table = memblock_alloc(size, SMP_CACHE_BYTES); + } + if (stack_table) { + for (i = 0; i < STACK_HASH_SIZE; i++) + stack_table[i] = NULL; + } else { + pr_err("Stack Depot hash table allocation failed, disabling\n"); + stack_depot_disable = true; + mutex_unlock(&stack_depot_init_mutex); + return -ENOMEM; + } } + mutex_unlock(&stack_depot_init_mutex); return 0; } +EXPORT_SYMBOL_GPL(stack_depot_init); /* Calculate hash for a stack */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) @@ -305,6 +328,9 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch); * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids * any allocations and will fail if no space is left to store the stack trace. * + * If the stack trace in @entries is from an interrupt, only the portion up to + * interrupt entry is saved. + * * Context: Any context, but setting @can_alloc to %false is required if * alloc_pages() cannot be used from the current context. Currently * this is the case from contexts where neither %GFP_ATOMIC nor @@ -323,6 +349,16 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, unsigned long flags; u32 hash; + /* + * If this stack trace is from an interrupt, including anything before + * interrupt entry usually leads to unbounded stackdepot growth. + * + * Because use of filter_irq_stacks() is a requirement to ensure + * stackdepot can efficiently deduplicate interrupt stacks, always + * filter_irq_stacks() to simplify all callers' use of stackdepot. + */ + nr_entries = filter_irq_stacks(entries, nr_entries); + if (unlikely(nr_entries == 0) || stack_depot_disable) goto fast_exit; diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index d33fa5a61b95..0c82f07f74fc 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -446,6 +446,42 @@ static void __init test_bitmap_parselist(void) } } +static void __init test_bitmap_printlist(void) +{ + unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char expected[256]; + int ret, slen; + ktime_t time; + + if (!buf || !bmap) + goto out; + + memset(bmap, -1, PAGE_SIZE); + slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1); + if (slen < 0) + goto out; + + time = ktime_get(); + ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8); + time = ktime_get() - time; + + if (ret != slen + 1) { + pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen); + goto out; + } + + if (strncmp(buf, expected, slen)) { + pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected); + goto out; + } + + pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); +out: + kfree(buf); + kfree(bmap); +} + static const unsigned long parse_test[] __initconst = { BITMAP_FROM_U64(0), BITMAP_FROM_U64(1), @@ -818,6 +854,7 @@ static void __init selftest(void) test_bitmap_arr32(); test_bitmap_parse(); test_bitmap_parselist(); + test_bitmap_printlist(); test_mem_optimisations(); test_for_each_set_clump8(); test_bitmap_cut(); diff --git a/lib/test_hash.c b/lib/test_hash.c index 0ee40b4a56dd..bb25fda34794 100644 --- a/lib/test_hash.c +++ b/lib/test_hash.c @@ -14,17 +14,15 @@ * and hash_64(). */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n" - #include <linux/compiler.h> #include <linux/types.h> #include <linux/module.h> #include <linux/hash.h> #include <linux/stringhash.h> -#include <linux/printk.h> +#include <kunit/test.h> /* 32-bit XORSHIFT generator. Seed must not be zero. */ -static u32 __init __attribute_const__ +static u32 __attribute_const__ xorshift(u32 seed) { seed ^= seed << 13; @@ -34,7 +32,7 @@ xorshift(u32 seed) } /* Given a non-zero x, returns a non-zero byte. */ -static u8 __init __attribute_const__ +static u8 __attribute_const__ mod255(u32 x) { x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */ @@ -45,8 +43,7 @@ mod255(u32 x) } /* Fill the buffer with non-zero bytes. */ -static void __init -fill_buf(char *buf, size_t len, u32 seed) +static void fill_buf(char *buf, size_t len, u32 seed) { size_t i; @@ -56,6 +53,50 @@ fill_buf(char *buf, size_t len, u32 seed) } } +/* Holds most testing variables for the int test. */ +struct test_hash_params { + /* Pointer to integer to be hashed. */ + unsigned long long *h64; + /* Low 32-bits of integer to be hashed. */ + u32 h0; + /* Arch-specific hash result. */ + u32 h1; + /* Generic hash result. */ + u32 h2; + /* ORed hashes of given size (in bits). */ + u32 (*hash_or)[33]; +}; + +#ifdef HAVE_ARCH__HASH_32 +static void +test_int__hash_32(struct kunit *test, struct test_hash_params *params) +{ + params->hash_or[1][0] |= params->h2 = __hash_32_generic(params->h0); +#if HAVE_ARCH__HASH_32 == 1 + KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2, + "__hash_32(%#x) = %#x != __hash_32_generic() = %#x", + params->h0, params->h1, params->h2); +#endif +} +#endif + +#ifdef HAVE_ARCH_HASH_64 +static void +test_int_hash_64(struct kunit *test, struct test_hash_params *params, u32 const *m, int *k) +{ + params->h2 = hash_64_generic(*params->h64, *k); +#if HAVE_ARCH_HASH_64 == 1 + KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2, + "hash_64(%#llx, %d) = %#x != hash_64_generic() = %#x", + *params->h64, *k, params->h1, params->h2); +#else + KUNIT_EXPECT_LE_MSG(test, params->h1, params->h2, + "hash_64_generic(%#llx, %d) = %#x > %#x", + *params->h64, *k, params->h1, *m); +#endif +} +#endif + /* * Test the various integer hash functions. h64 (or its low-order bits) * is the integer to hash. hash_or accumulates the OR of the hash values, @@ -65,23 +106,16 @@ fill_buf(char *buf, size_t len, u32 seed) * inline, the code being tested is actually in the module, and you can * recompile and re-test the module without rebooting. */ -static bool __init -test_int_hash(unsigned long long h64, u32 hash_or[2][33]) +static void +test_int_hash(struct kunit *test, unsigned long long h64, u32 hash_or[2][33]) { int k; - u32 h0 = (u32)h64, h1, h2; + struct test_hash_params params = { &h64, (u32)h64, 0, 0, hash_or }; /* Test __hash32 */ - hash_or[0][0] |= h1 = __hash_32(h0); + hash_or[0][0] |= params.h1 = __hash_32(params.h0); #ifdef HAVE_ARCH__HASH_32 - hash_or[1][0] |= h2 = __hash_32_generic(h0); -#if HAVE_ARCH__HASH_32 == 1 - if (h1 != h2) { - pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x", - h0, h1, h2); - return false; - } -#endif + test_int__hash_32(test, ¶ms); #endif /* Test k = 1..32 bits */ @@ -89,63 +123,53 @@ test_int_hash(unsigned long long h64, u32 hash_or[2][33]) u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */ /* Test hash_32 */ - hash_or[0][k] |= h1 = hash_32(h0, k); - if (h1 > m) { - pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m); - return false; - } -#ifdef HAVE_ARCH_HASH_32 - h2 = hash_32_generic(h0, k); -#if HAVE_ARCH_HASH_32 == 1 - if (h1 != h2) { - pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() " - " = %#x", h0, k, h1, h2); - return false; - } -#else - if (h2 > m) { - pr_err("hash_32_generic(%#x, %d) = %#x > %#x", - h0, k, h1, m); - return false; - } -#endif -#endif + hash_or[0][k] |= params.h1 = hash_32(params.h0, k); + KUNIT_EXPECT_LE_MSG(test, params.h1, m, + "hash_32(%#x, %d) = %#x > %#x", + params.h0, k, params.h1, m); + /* Test hash_64 */ - hash_or[1][k] |= h1 = hash_64(h64, k); - if (h1 > m) { - pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m); - return false; - } + hash_or[1][k] |= params.h1 = hash_64(h64, k); + KUNIT_EXPECT_LE_MSG(test, params.h1, m, + "hash_64(%#llx, %d) = %#x > %#x", + h64, k, params.h1, m); #ifdef HAVE_ARCH_HASH_64 - h2 = hash_64_generic(h64, k); -#if HAVE_ARCH_HASH_64 == 1 - if (h1 != h2) { - pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() " - "= %#x", h64, k, h1, h2); - return false; - } -#else - if (h2 > m) { - pr_err("hash_64_generic(%#llx, %d) = %#x > %#x", - h64, k, h1, m); - return false; - } -#endif + test_int_hash_64(test, ¶ms, &m, &k); #endif } - - (void)h2; /* Suppress unused variable warning */ - return true; } #define SIZE 256 /* Run time is cubic in SIZE */ -static int __init -test_hash_init(void) +static void test_string_or(struct kunit *test) { char buf[SIZE+1]; - u32 string_or = 0, hash_or[2][33] = { { 0, } }; - unsigned tests = 0; + u32 string_or = 0; + int i, j; + + fill_buf(buf, SIZE, 1); + + /* Test every possible non-empty substring in the buffer. */ + for (j = SIZE; j > 0; --j) { + buf[j] = '\0'; + + for (i = 0; i <= j; i++) { + u32 h0 = full_name_hash(buf+i, buf+i, j-i); + + string_or |= h0; + } /* i */ + } /* j */ + + /* The OR of all the hash values should cover all the bits */ + KUNIT_EXPECT_EQ_MSG(test, string_or, -1u, + "OR of all string hash results = %#x != %#x", + string_or, -1u); +} + +static void test_hash_or(struct kunit *test) +{ + char buf[SIZE+1]; + u32 hash_or[2][33] = { { 0, } }; unsigned long long h64 = 0; int i, j; @@ -160,46 +184,27 @@ test_hash_init(void) u32 h0 = full_name_hash(buf+i, buf+i, j-i); /* Check that hashlen_string gets the length right */ - if (hashlen_len(hashlen) != j-i) { - pr_err("hashlen_string(%d..%d) returned length" - " %u, expected %d", - i, j, hashlen_len(hashlen), j-i); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hashlen_len(hashlen), j-i, + "hashlen_string(%d..%d) returned length %u, expected %d", + i, j, hashlen_len(hashlen), j-i); /* Check that the hashes match */ - if (hashlen_hash(hashlen) != h0) { - pr_err("hashlen_string(%d..%d) = %08x != " - "full_name_hash() = %08x", - i, j, hashlen_hash(hashlen), h0); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hashlen_hash(hashlen), h0, + "hashlen_string(%d..%d) = %08x != full_name_hash() = %08x", + i, j, hashlen_hash(hashlen), h0); - string_or |= h0; h64 = h64 << 32 | h0; /* For use with hash_64 */ - if (!test_int_hash(h64, hash_or)) - return -EINVAL; - tests++; + test_int_hash(test, h64, hash_or); } /* i */ } /* j */ - /* The OR of all the hash values should cover all the bits */ - if (~string_or) { - pr_err("OR of all string hash results = %#x != %#x", - string_or, -1u); - return -EINVAL; - } - if (~hash_or[0][0]) { - pr_err("OR of all __hash_32 results = %#x != %#x", - hash_or[0][0], -1u); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hash_or[0][0], -1u, + "OR of all __hash_32 results = %#x != %#x", + hash_or[0][0], -1u); #ifdef HAVE_ARCH__HASH_32 #if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */ - if (~hash_or[1][0]) { - pr_err("OR of all __hash_32_generic results = %#x != %#x", - hash_or[1][0], -1u); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hash_or[1][0], -1u, + "OR of all __hash_32_generic results = %#x != %#x", + hash_or[1][0], -1u); #endif #endif @@ -207,51 +212,27 @@ test_hash_init(void) for (i = 1; i <= 32; i++) { u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */ - if (hash_or[0][i] != m) { - pr_err("OR of all hash_32(%d) results = %#x " - "(%#x expected)", i, hash_or[0][i], m); - return -EINVAL; - } - if (hash_or[1][i] != m) { - pr_err("OR of all hash_64(%d) results = %#x " - "(%#x expected)", i, hash_or[1][i], m); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hash_or[0][i], m, + "OR of all hash_32(%d) results = %#x (%#x expected)", + i, hash_or[0][i], m); + KUNIT_EXPECT_EQ_MSG(test, hash_or[1][i], m, + "OR of all hash_64(%d) results = %#x (%#x expected)", + i, hash_or[1][i], m); } +} - /* Issue notices about skipped tests. */ -#ifdef HAVE_ARCH__HASH_32 -#if HAVE_ARCH__HASH_32 != 1 - pr_info("__hash_32() is arch-specific; not compared to generic."); -#endif -#else - pr_info("__hash_32() has no arch implementation to test."); -#endif -#ifdef HAVE_ARCH_HASH_32 -#if HAVE_ARCH_HASH_32 != 1 - pr_info("hash_32() is arch-specific; not compared to generic."); -#endif -#else - pr_info("hash_32() has no arch implementation to test."); -#endif -#ifdef HAVE_ARCH_HASH_64 -#if HAVE_ARCH_HASH_64 != 1 - pr_info("hash_64() is arch-specific; not compared to generic."); -#endif -#else - pr_info("hash_64() has no arch implementation to test."); -#endif - - pr_notice("%u tests passed.", tests); +static struct kunit_case hash_test_cases[] __refdata = { + KUNIT_CASE(test_string_or), + KUNIT_CASE(test_hash_or), + {} +}; - return 0; -} +static struct kunit_suite hash_test_suite = { + .name = "hash", + .test_cases = hash_test_cases, +}; -static void __exit test_hash_exit(void) -{ -} -module_init(test_hash_init); /* Does everything */ -module_exit(test_hash_exit); /* Does nothing */ +kunit_test_suite(hash_test_suite); MODULE_LICENSE("GPL"); diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 847cdbefab46..26a5c9007653 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -492,6 +492,7 @@ static void kmalloc_oob_in_memset(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + KASAN_GRANULE_SIZE)); @@ -515,6 +516,7 @@ static void kmalloc_memmove_negative_size(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset((char *)ptr, 0, 64); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(invalid_size); KUNIT_EXPECT_KASAN_FAIL(test, memmove((char *)ptr, (char *)ptr + 4, invalid_size)); @@ -531,6 +533,7 @@ static void kmalloc_memmove_invalid_size(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset((char *)ptr, 0, 64); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, memmove((char *)ptr, (char *)ptr + 4, invalid_size)); kfree(ptr); @@ -893,6 +896,7 @@ static void kasan_memchr(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = memchr(ptr, '1', size + 1)); @@ -919,6 +923,7 @@ static void kasan_memcmp(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset(arr, 0, sizeof(arr)); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = memcmp(ptr, arr, size+1)); diff --git a/lib/test_meminit.c b/lib/test_meminit.c index e4f706a404b3..3ca717f11397 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -337,6 +337,7 @@ static int __init do_kmem_cache_size_bulk(int size, int *total_failures) if (num) kmem_cache_free_bulk(c, num, objects); } + kmem_cache_destroy(c); *total_failures += fail; return 1; } diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 3750323973f4..a5a3d6c27e1f 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -128,26 +128,6 @@ static struct ctl_table test_table[] = { { } }; -static struct ctl_table test_sysctl_table[] = { - { - .procname = "test_sysctl", - .maxlen = 0, - .mode = 0555, - .child = test_table, - }, - { } -}; - -static struct ctl_table test_sysctl_root_table[] = { - { - .procname = "debug", - .maxlen = 0, - .mode = 0555, - .child = test_sysctl_table, - }, - { } -}; - static struct ctl_table_header *test_sysctl_header; static int __init test_sysctl_init(void) @@ -155,7 +135,7 @@ static int __init test_sysctl_init(void) test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL); if (!test_data.bitmap_0001) return -ENOMEM; - test_sysctl_header = register_sysctl_table(test_sysctl_root_table); + test_sysctl_header = register_sysctl("debug/test_sysctl", test_table); if (!test_sysctl_header) { kfree(test_data.bitmap_0001); return -ENOMEM; diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 7e7bbd0f3fd2..2062be1f2e80 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -79,15 +79,6 @@ static void test_ubsan_load_invalid_value(void) eval2 = eval; } -static void test_ubsan_null_ptr_deref(void) -{ - volatile int *ptr = NULL; - int val; - - UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE); - val = *ptr; -} - static void test_ubsan_misaligned_access(void) { volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5}; @@ -98,29 +89,16 @@ static void test_ubsan_misaligned_access(void) *ptr = val; } -static void test_ubsan_object_size_mismatch(void) -{ - /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */ - volatile int val __aligned(8) = 4; - volatile long long *ptr, val2; - - UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE); - ptr = (long long *)&val; - val2 = *ptr; -} - static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, test_ubsan_misaligned_access, - test_ubsan_object_size_mismatch, }; /* Excluded because they Oops the module. */ static const test_ubsan_fp skip_ubsan_array[] = { test_ubsan_divrem_overflow, - test_ubsan_null_ptr_deref, }; static int __init test_ubsan_init(void) diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 53d6081f9e8b..3b8129dd374c 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1241,20 +1241,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, struct printf_spec spec, const char *fmt) { int nr_bits = max_t(int, spec.field_width, 0); - /* current bit is 'cur', most recently seen range is [rbot, rtop] */ - int cur, rbot, rtop; bool first = true; + int rbot, rtop; if (check_pointer(&buf, end, bitmap, spec)) return buf; - rbot = cur = find_first_bit(bitmap, nr_bits); - while (cur < nr_bits) { - rtop = cur; - cur = find_next_bit(bitmap, nr_bits, cur + 1); - if (cur < nr_bits && cur <= rtop + 1) - continue; - + for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) { if (!first) { if (buf < end) *buf = ','; @@ -1263,15 +1256,12 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, first = false; buf = number(buf, end, rbot, default_dec_spec); - if (rbot < rtop) { - if (buf < end) - *buf = '-'; - buf++; - - buf = number(buf, end, rtop, default_dec_spec); - } + if (rtop == rbot + 1) + continue; - rbot = cur; + if (buf < end) + *buf = '-'; + buf = number(++buf, end, rtop - 1, default_dec_spec); } return buf; } |
