summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig118
-rw-r--r--lib/Kconfig.debug27
-rw-r--r--lib/alloc_tag.c6
-rw-r--r--lib/atomic64.c78
-rw-r--r--lib/cpumask.c5
-rw-r--r--lib/crc32.c225
-rw-r--r--lib/crc32defs.h59
-rw-r--r--lib/crypto/aesgcm.c2
-rw-r--r--lib/crypto/gf128mul.c75
-rw-r--r--lib/fault-inject.c28
-rw-r--r--lib/gen_crc32table.c113
-rw-r--r--lib/inflate.c2
-rw-r--r--lib/kobject.c24
-rw-r--r--lib/kunit_iov_iter.c5
-rw-r--r--lib/list_debug.c22
-rw-r--r--lib/list_sort.c7
-rw-r--r--lib/lz4/lz4_compress.c1
-rw-r--r--lib/lz4/lz4_decompress.c1
-rw-r--r--lib/lz4/lz4defs.h4
-rw-r--r--lib/lz4/lz4hc_compress.c1
-rw-r--r--lib/maple_tree.c73
-rw-r--r--lib/math/Makefile1
-rw-r--r--lib/math/tests/Makefile1
-rw-r--r--lib/math/tests/int_sqrt_kunit.c66
-rw-r--r--lib/rhashtable.c14
-rw-r--r--lib/sort.c7
-rw-r--r--lib/stackinit_kunit.c106
-rw-r--r--lib/test_maple_tree.c56
-rw-r--r--lib/test_min_heap.c30
-rw-r--r--lib/test_sysctl.c6
-rw-r--r--lib/test_vmalloc.c2
-rw-r--r--lib/test_xarray.c35
-rw-r--r--lib/xarray.c78
33 files changed, 537 insertions, 741 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a78d22c6507f..dccb61b7d698 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -164,34 +164,9 @@ config CRC_T10DIF
config ARCH_HAS_CRC_T10DIF
bool
-choice
- prompt "CRC-T10DIF implementation"
- depends on CRC_T10DIF
- default CRC_T10DIF_IMPL_ARCH if ARCH_HAS_CRC_T10DIF
- default CRC_T10DIF_IMPL_GENERIC if !ARCH_HAS_CRC_T10DIF
- help
- This option allows you to override the default choice of CRC-T10DIF
- implementation.
-
-config CRC_T10DIF_IMPL_ARCH
- bool "Architecture-optimized" if ARCH_HAS_CRC_T10DIF
- help
- Use the optimized implementation of CRC-T10DIF for the selected
- architecture. It is recommended to keep this enabled, as it can
- greatly improve CRC-T10DIF performance.
-
-config CRC_T10DIF_IMPL_GENERIC
- bool "Generic implementation"
- help
- Use the generic table-based implementation of CRC-T10DIF. Selecting
- this will reduce code size slightly but can greatly reduce CRC-T10DIF
- performance.
-
-endchoice
-
config CRC_T10DIF_ARCH
tristate
- default CRC_T10DIF if CRC_T10DIF_IMPL_ARCH
+ default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS
config CRC64_ROCKSOFT
tristate "CRC calculation for the Rocksoft model CRC64"
@@ -223,87 +198,9 @@ config CRC32
config ARCH_HAS_CRC32
bool
-choice
- prompt "CRC32 implementation"
- depends on CRC32
- default CRC32_IMPL_ARCH_PLUS_SLICEBY8 if ARCH_HAS_CRC32
- default CRC32_IMPL_SLICEBY8 if !ARCH_HAS_CRC32
- help
- This option allows you to override the default choice of CRC32
- implementation. Choose the default unless you know that you need one
- of the others.
-
-config CRC32_IMPL_ARCH_PLUS_SLICEBY8
- bool "Arch-optimized, with fallback to slice-by-8" if ARCH_HAS_CRC32
- help
- Use architecture-optimized implementation of CRC32. Fall back to
- slice-by-8 in cases where the arch-optimized implementation cannot be
- used, e.g. if the CPU lacks support for the needed instructions.
-
- This is the default when an arch-optimized implementation exists.
-
-config CRC32_IMPL_ARCH_PLUS_SLICEBY1
- bool "Arch-optimized, with fallback to slice-by-1" if ARCH_HAS_CRC32
- help
- Use architecture-optimized implementation of CRC32, but fall back to
- slice-by-1 instead of slice-by-8 in order to reduce the binary size.
-
-config CRC32_IMPL_SLICEBY8
- bool "Slice by 8 bytes"
- help
- Calculate checksum 8 bytes at a time with a clever slicing algorithm.
- This is much slower than the architecture-optimized implementation of
- CRC32 (if the selected arch has one), but it is portable and is the
- fastest implementation when no arch-optimized implementation is
- available. It uses an 8KiB lookup table. Most modern processors have
- enough cache to hold this table without thrashing the cache.
-
-config CRC32_IMPL_SLICEBY4
- bool "Slice by 4 bytes"
- help
- Calculate checksum 4 bytes at a time with a clever slicing algorithm.
- This is a bit slower than slice by 8, but has a smaller 4KiB lookup
- table.
-
- Only choose this option if you know what you are doing.
-
-config CRC32_IMPL_SLICEBY1
- bool "Slice by 1 byte (Sarwate's algorithm)"
- help
- Calculate checksum a byte at a time using Sarwate's algorithm. This
- is not particularly fast, but has a small 1KiB lookup table.
-
- Only choose this option if you know what you are doing.
-
-config CRC32_IMPL_BIT
- bool "Classic Algorithm (one bit at a time)"
- help
- Calculate checksum one bit at a time. This is VERY slow, but has
- no lookup table. This is provided as a debugging option.
-
- Only choose this option if you are debugging crc32.
-
-endchoice
-
config CRC32_ARCH
tristate
- default CRC32 if CRC32_IMPL_ARCH_PLUS_SLICEBY8 || CRC32_IMPL_ARCH_PLUS_SLICEBY1
-
-config CRC32_SLICEBY8
- bool
- default y if CRC32_IMPL_SLICEBY8 || CRC32_IMPL_ARCH_PLUS_SLICEBY8
-
-config CRC32_SLICEBY4
- bool
- default y if CRC32_IMPL_SLICEBY4
-
-config CRC32_SARWATE
- bool
- default y if CRC32_IMPL_SLICEBY1 || CRC32_IMPL_ARCH_PLUS_SLICEBY1
-
-config CRC32_BIT
- bool
- default y if CRC32_IMPL_BIT
+ default CRC32 if ARCH_HAS_CRC32 && CRC_OPTIMIZATIONS
config CRC64
tristate "CRC64 functions"
@@ -343,6 +240,17 @@ config CRC8
when they need to do cyclic redundancy check according CRC8
algorithm. Module will be called crc8.
+config CRC_OPTIMIZATIONS
+ bool "Enable optimized CRC implementations" if EXPERT
+ default y
+ help
+ Disabling this option reduces code size slightly by disabling the
+ architecture-optimized implementations of any CRC variants that are
+ enabled. CRC checksumming performance may get much slower.
+
+ Keep this enabled unless you're really trying to minimize the size of
+ the kernel.
+
config XXHASH
tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5f1874622175..1af972a92d06 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2269,7 +2269,6 @@ config TEST_LIST_SORT
config TEST_MIN_HEAP
tristate "Min heap test"
depends on DEBUG_KERNEL || m
- select MIN_HEAP
help
Enable this to turn on min heap function tests. This test is
executed only once during system boot (so affects only boot time),
@@ -2479,6 +2478,17 @@ config TEST_RHASHTABLE
config TEST_IDA
tristate "Perform selftest on IDA functions"
+config TEST_MISC_MINOR
+ tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Kunit test for the misc minor.
+ It tests misc minor functions for dynamic and misc dynamic minor.
+ This include misc_xxx functions
+
+ If unsure, say N.
+
config TEST_PARMAN
tristate "Perform selftest on priority array manager"
depends on PARMAN
@@ -3172,6 +3182,21 @@ config INT_POW_TEST
If unsure, say N
+config INT_SQRT_KUNIT_TEST
+ tristate "Integer square root test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_sqrt() function,
+ which performs square root calculation. The test suite checks
+ various scenarios, including edge cases, to ensure correctness.
+
+ Enabling this option will include tests that check various scenarios
+ and edge cases to ensure the accuracy and reliability of the square root
+ function.
+
+ If unsure, say N
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 65e706e1bc19..19b45617bdcf 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -29,6 +29,8 @@ EXPORT_SYMBOL(_shared_alloc_tag);
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
+EXPORT_SYMBOL(mem_alloc_profiling_key);
+
DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
@@ -423,8 +425,8 @@ static int vm_module_tags_populate(void)
unsigned long nr;
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
- nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
- NUMA_NO_NODE, more_pages, next_page);
+ nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN,
+ NUMA_NO_NODE, more_pages, next_page);
if (nr < more_pages ||
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
diff --git a/lib/atomic64.c b/lib/atomic64.c
index caf895789a1e..1a72bba36d24 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -25,15 +25,15 @@
* Ensure each lock is in a separate cacheline.
*/
static union {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED,
},
};
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
@@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_read);
@@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read);
void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
v->counter = i;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
}
EXPORT_SYMBOL(generic_atomic64_set);
@@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set);
void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
} \
EXPORT_SYMBOL(generic_atomic64_##op);
@@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op);
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = (v->counter c_op a); \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
@@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ arch_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
- raw_spin_lock_irqsave(lock, flags); \
+ local_irq_save(flags); \
+ arch_spin_lock(lock); \
val = v->counter; \
v->counter c_op a; \
- raw_spin_unlock_irqrestore(lock, flags); \
+ arch_spin_unlock(lock); \
+ local_irq_restore(flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
@@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=)
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
@@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val == o)
v->counter = n;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
@@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg);
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
v->counter = new;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_xchg);
@@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg);
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ arch_spinlock_t *lock = lock_addr(v);
s64 val;
- raw_spin_lock_irqsave(lock, flags);
+ local_irq_save(flags);
+ arch_spin_lock(lock);
val = v->counter;
if (val != u)
v->counter += a;
- raw_spin_unlock_irqrestore(lock, flags);
+ arch_spin_unlock(lock);
+ local_irq_restore(flags);
return val;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index e77ee9d46f71..57274ba8b6d9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -83,10 +83,7 @@ EXPORT_SYMBOL(alloc_cpumask_var_node);
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
- *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
- if (!*mask)
- panic("%s: Failed to allocate %u bytes\n", __func__,
- cpumask_size());
+ *mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES);
}
/**
diff --git a/lib/crc32.c b/lib/crc32.c
index 47151624332e..ede6131f66fc 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -30,20 +30,6 @@
#include <linux/crc32poly.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/sched.h>
-#include "crc32defs.h"
-
-#if CRC_LE_BITS > 8
-# define tole(x) ((__force u32) cpu_to_le32(x))
-#else
-# define tole(x) (x)
-#endif
-
-#if CRC_BE_BITS > 8
-# define tobe(x) ((__force u32) cpu_to_be32(x))
-#else
-# define tobe(x) (x)
-#endif
#include "crc32table.h"
@@ -51,157 +37,20 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Various CRC32 calculations");
MODULE_LICENSE("GPL");
-#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
-
-/* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32 __pure
-crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
-{
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)
-# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
- t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
-# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
- t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
-# else
-# define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
-# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
- t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
-# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
- t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
-# endif
- const u32 *b;
- size_t rem_len;
-# ifdef CONFIG_X86
- size_t i;
-# endif
- const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
-# if CRC_LE_BITS != 32
- const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7];
-# endif
- u32 q;
-
- /* Align it */
- if (unlikely((long)buf & 3 && len)) {
- do {
- DO_CRC(*buf++);
- } while ((--len) && ((long)buf)&3);
- }
-
-# if CRC_LE_BITS == 32
- rem_len = len & 3;
- len = len >> 2;
-# else
- rem_len = len & 7;
- len = len >> 3;
-# endif
-
- b = (const u32 *)buf;
-# ifdef CONFIG_X86
- --b;
- for (i = 0; i < len; i++) {
-# else
- for (--b; len; --len) {
-# endif
- q = crc ^ *++b; /* use pre increment for speed */
-# if CRC_LE_BITS == 32
- crc = DO_CRC4;
-# else
- crc = DO_CRC8;
- q = *++b;
- crc ^= DO_CRC4;
-# endif
- }
- len = rem_len;
- /* And the last few bytes */
- if (len) {
- u8 *p = (u8 *)(b + 1) - 1;
-# ifdef CONFIG_X86
- for (i = 0; i < len; i++)
- DO_CRC(*++p); /* use pre increment for speed */
-# else
- do {
- DO_CRC(*++p); /* use pre increment for speed */
- } while (--len);
-# endif
- }
- return crc;
-#undef DO_CRC
-#undef DO_CRC4
-#undef DO_CRC8
-}
-#endif
-
-
-/**
- * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
- * CRC32/CRC32C
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other
- * uses, or the previous crc32/crc32c value if computing incrementally.
- * @p: pointer to buffer over which CRC32/CRC32C is run
- * @len: length of buffer @p
- * @tab: little-endian Ethernet table
- * @polynomial: CRC32/CRC32c LE polynomial
- */
-static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
- size_t len, const u32 (*tab)[256],
- u32 polynomial)
+u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
{
-#if CRC_LE_BITS == 1
- int i;
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
- }
-# elif CRC_LE_BITS == 2
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- crc = (crc >> 2) ^ tab[0][crc & 3];
- }
-# elif CRC_LE_BITS == 4
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 4) ^ tab[0][crc & 15];
- crc = (crc >> 4) ^ tab[0][crc & 15];
- }
-# elif CRC_LE_BITS == 8
- /* aka Sarwate algorithm */
- while (len--) {
- crc ^= *p++;
- crc = (crc >> 8) ^ tab[0][crc & 255];
- }
-# else
- crc = (__force u32) __cpu_to_le32(crc);
- crc = crc32_body(crc, p, len, tab);
- crc = __le32_to_cpu((__force __le32)crc);
-#endif
+ while (len--)
+ crc = (crc >> 8) ^ crc32table_le[(crc & 255) ^ *p++];
return crc;
}
+EXPORT_SYMBOL(crc32_le_base);
-#if CRC_LE_BITS == 1
-u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
-}
-u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
-}
-#else
-u32 __pure crc32_le_base(u32 crc, const u8 *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE);
-}
u32 __pure crc32c_le_base(u32 crc, const u8 *p, size_t len)
{
- return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
+ while (len--)
+ crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++];
+ return crc;
}
-#endif
-EXPORT_SYMBOL(crc32_le_base);
EXPORT_SYMBOL(crc32c_le_base);
/*
@@ -277,64 +126,10 @@ u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
EXPORT_SYMBOL(crc32_le_shift);
EXPORT_SYMBOL(__crc32c_le_shift);
-/**
- * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
- * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
- * other uses, or the previous crc32 value if computing incrementally.
- * @p: pointer to buffer over which CRC32 is run
- * @len: length of buffer @p
- * @tab: big-endian Ethernet table
- * @polynomial: CRC32 BE polynomial
- */
-static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
- size_t len, const u32 (*tab)[256],
- u32 polynomial)
-{
-#if CRC_BE_BITS == 1
- int i;
- while (len--) {
- crc ^= *p++ << 24;
- for (i = 0; i < 8; i++)
- crc =
- (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
- 0);
- }
-# elif CRC_BE_BITS == 2
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- crc = (crc << 2) ^ tab[0][crc >> 30];
- }
-# elif CRC_BE_BITS == 4
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 4) ^ tab[0][crc >> 28];
- crc = (crc << 4) ^ tab[0][crc >> 28];
- }
-# elif CRC_BE_BITS == 8
- while (len--) {
- crc ^= *p++ << 24;
- crc = (crc << 8) ^ tab[0][crc >> 24];
- }
-# else
- crc = (__force u32) __cpu_to_be32(crc);
- crc = crc32_body(crc, p, len, tab);
- crc = __be32_to_cpu((__force __be32)crc);
-# endif
- return crc;
-}
-
-#if CRC_BE_BITS == 1
-u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len)
-{
- return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
-}
-#else
u32 __pure crc32_be_base(u32 crc, const u8 *p, size_t len)
{
- return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE);
+ while (len--)
+ crc = (crc << 8) ^ crc32table_be[(crc >> 24) ^ *p++];
+ return crc;
}
-#endif
EXPORT_SYMBOL(crc32_be_base);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
deleted file mode 100644
index 0c8fb5923e7e..000000000000
--- a/lib/crc32defs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* Try to choose an implementation variant via Kconfig */
-#ifdef CONFIG_CRC32_SLICEBY8
-# define CRC_LE_BITS 64
-# define CRC_BE_BITS 64
-#endif
-#ifdef CONFIG_CRC32_SLICEBY4
-# define CRC_LE_BITS 32
-# define CRC_BE_BITS 32
-#endif
-#ifdef CONFIG_CRC32_SARWATE
-# define CRC_LE_BITS 8
-# define CRC_BE_BITS 8
-#endif
-#ifdef CONFIG_CRC32_BIT
-# define CRC_LE_BITS 1
-# define CRC_BE_BITS 1
-#endif
-
-/*
- * How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64.
- * For less performance-sensitive, use 4 or 8 to save table size.
- * For larger systems choose same as CPU architecture as default.
- * This works well on X86_64, SPARC64 systems. This may require some
- * elaboration after experiments with other architectures.
- */
-#ifndef CRC_LE_BITS
-# ifdef CONFIG_64BIT
-# define CRC_LE_BITS 64
-# else
-# define CRC_LE_BITS 32
-# endif
-#endif
-#ifndef CRC_BE_BITS
-# ifdef CONFIG_64BIT
-# define CRC_BE_BITS 64
-# else
-# define CRC_BE_BITS 32
-# endif
-#endif
-
-/*
- * Little-endian CRC computation. Used with serial bit streams sent
- * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
- */
-#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \
- CRC_LE_BITS & CRC_LE_BITS-1
-# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}"
-#endif
-
-/*
- * Big-endian CRC computation. Used with serial bit streams sent
- * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
- */
-#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \
- CRC_BE_BITS & CRC_BE_BITS-1
-# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}"
-#endif
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
index 6bba6473fdf3..902e49410aaf 100644
--- a/lib/crypto/aesgcm.c
+++ b/lib/crypto/aesgcm.c
@@ -697,7 +697,7 @@ static int __init libaesgcm_init(void)
u8 tagbuf[AES_BLOCK_SIZE];
int plen = aesgcm_tv[i].plen;
struct aesgcm_ctx ctx;
- u8 buf[sizeof(ptext12)];
+ static u8 buf[sizeof(ptext12)];
if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
aesgcm_tv[i].clen - plen)) {
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
index 8f8c45e0cdcf..fbe72cb3453a 100644
--- a/lib/crypto/gf128mul.c
+++ b/lib/crypto/gf128mul.c
@@ -225,44 +225,6 @@ void gf128mul_lle(be128 *r, const be128 *b)
}
EXPORT_SYMBOL(gf128mul_lle);
-void gf128mul_bbe(be128 *r, const be128 *b)
-{
- be128 p[8];
- int i;
-
- p[0] = *r;
- for (i = 0; i < 7; ++i)
- gf128mul_x_bbe(&p[i + 1], &p[i]);
-
- memset(r, 0, sizeof(*r));
- for (i = 0;;) {
- u8 ch = ((u8 *)b)[i];
-
- if (ch & 0x80)
- be128_xor(r, r, &p[7]);
- if (ch & 0x40)
- be128_xor(r, r, &p[6]);
- if (ch & 0x20)
- be128_xor(r, r, &p[5]);
- if (ch & 0x10)
- be128_xor(r, r, &p[4]);
- if (ch & 0x08)
- be128_xor(r, r, &p[3]);
- if (ch & 0x04)
- be128_xor(r, r, &p[2]);
- if (ch & 0x02)
- be128_xor(r, r, &p[1]);
- if (ch & 0x01)
- be128_xor(r, r, &p[0]);
-
- if (++i >= 16)
- break;
-
- gf128mul_x8_bbe(r);
- }
-}
-EXPORT_SYMBOL(gf128mul_bbe);
-
/* This version uses 64k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(2^128). If we consider a GF(2^128) value in
@@ -380,28 +342,6 @@ out:
}
EXPORT_SYMBOL(gf128mul_init_4k_lle);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
-{
- struct gf128mul_4k *t;
- int j, k;
-
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- goto out;
-
- t->t[1] = *g;
- for (j = 1; j <= 64; j <<= 1)
- gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
-
- for (j = 2; j < 256; j += j)
- for (k = 1; k < j; ++k)
- be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
-
-out:
- return t;
-}
-EXPORT_SYMBOL(gf128mul_init_4k_bbe);
-
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
@@ -417,20 +357,5 @@ void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
}
EXPORT_SYMBOL(gf128mul_4k_lle);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
-{
- u8 *ap = (u8 *)a;
- be128 r[1];
- int i = 0;
-
- *r = t->t[ap[0]];
- while (++i < 16) {
- gf128mul_x8_bbe(r);
- be128_xor(r, r, &t->t[ap[i]]);
- }
- *a = *r;
-}
-EXPORT_SYMBOL(gf128mul_4k_bbe);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 52eb6ba29698..999053fa133e 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
@@ -13,6 +13,24 @@
#include <linux/fault-inject.h>
/*
+ * The should_fail() functions use prandom instead of the normal Linux RNG
+ * since they don't need cryptographically secure random numbers.
+ */
+static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state);
+
+static u32 fault_prandom_u32_below_100(void)
+{
+ struct rnd_state *state;
+ u32 res;
+
+ state = &get_cpu_var(fault_rnd_state);
+ res = prandom_u32_state(state);
+ put_cpu_var(fault_rnd_state);
+
+ return res % 100;
+}
+
+/*
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
*/
@@ -31,6 +49,8 @@ int setup_fault_attr(struct fault_attr *attr, char *str)
return 0;
}
+ prandom_init_once(&fault_rnd_state);
+
attr->probability = probability;
attr->interval = interval;
atomic_set(&attr->times, times);
@@ -146,7 +166,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
return false;
}
- if (attr->probability <= get_random_u32_below(100))
+ if (attr->probability <= fault_prandom_u32_below_100())
return false;
fail:
@@ -219,6 +239,8 @@ struct dentry *fault_create_debugfs_attr(const char *name,
if (IS_ERR(dir))
return dir;
+ prandom_init_once(&fault_rnd_state);
+
debugfs_create_ul("probability", mode, dir, &attr->probability);
debugfs_create_ul("interval", mode, dir, &attr->interval);
debugfs_create_atomic_t("times", mode, dir, &attr->times);
@@ -431,6 +453,8 @@ static const struct config_item_type fault_config_type = {
void fault_config_init(struct fault_config *config, const char *name)
{
+ prandom_init_once(&fault_rnd_state);
+
config_group_init_type_name(&config->group, name, &fault_config_type);
}
EXPORT_SYMBOL_GPL(fault_config_init);
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
index f755b997b967..6d03425b849e 100644
--- a/lib/gen_crc32table.c
+++ b/lib/gen_crc32table.c
@@ -2,30 +2,11 @@
#include <stdio.h>
#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
-#include "crc32defs.h"
#include <inttypes.h>
-#define ENTRIES_PER_LINE 4
-
-#if CRC_LE_BITS > 8
-# define LE_TABLE_ROWS (CRC_LE_BITS/8)
-# define LE_TABLE_SIZE 256
-#else
-# define LE_TABLE_ROWS 1
-# define LE_TABLE_SIZE (1 << CRC_LE_BITS)
-#endif
-
-#if CRC_BE_BITS > 8
-# define BE_TABLE_ROWS (CRC_BE_BITS/8)
-# define BE_TABLE_SIZE 256
-#else
-# define BE_TABLE_ROWS 1
-# define BE_TABLE_SIZE (1 << CRC_BE_BITS)
-#endif
-
-static uint32_t crc32table_le[LE_TABLE_ROWS][256];
-static uint32_t crc32table_be[BE_TABLE_ROWS][256];
-static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
+static uint32_t crc32table_le[256];
+static uint32_t crc32table_be[256];
+static uint32_t crc32ctable_le[256];
/**
* crc32init_le() - allocate and initialize LE table data
@@ -34,25 +15,17 @@ static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
* fact that crctable[i^j] = crctable[i] ^ crctable[j].
*
*/
-static void crc32init_le_generic(const uint32_t polynomial,
- uint32_t (*tab)[256])
+static void crc32init_le_generic(const uint32_t polynomial, uint32_t tab[256])
{
unsigned i, j;
uint32_t crc = 1;
- tab[0][0] = 0;
+ tab[0] = 0;
- for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) {
+ for (i = 128; i; i >>= 1) {
crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
- for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
- tab[0][i + j] = crc ^ tab[0][j];
- }
- for (i = 0; i < LE_TABLE_SIZE; i++) {
- crc = tab[0][i];
- for (j = 1; j < LE_TABLE_ROWS; j++) {
- crc = tab[0][crc & 0xff] ^ (crc >> 8);
- tab[j][i] = crc;
- }
+ for (j = 0; j < 256; j += 2 * i)
+ tab[i + j] = crc ^ tab[j];
}
}
@@ -74,34 +47,22 @@ static void crc32init_be(void)
unsigned i, j;
uint32_t crc = 0x80000000;
- crc32table_be[0][0] = 0;
+ crc32table_be[0] = 0;
- for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
+ for (i = 1; i < 256; i <<= 1) {
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
- crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
- }
- for (i = 0; i < BE_TABLE_SIZE; i++) {
- crc = crc32table_be[0][i];
- for (j = 1; j < BE_TABLE_ROWS; j++) {
- crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
- crc32table_be[j][i] = crc;
- }
+ crc32table_be[i + j] = crc ^ crc32table_be[j];
}
}
-static void output_table(uint32_t (*table)[256], int rows, int len, char *trans)
+static void output_table(const uint32_t table[256])
{
- int i, j;
-
- for (j = 0 ; j < rows; j++) {
- printf("{");
- for (i = 0; i < len - 1; i++) {
- if (i % ENTRIES_PER_LINE == 0)
- printf("\n");
- printf("%s(0x%8.8xL), ", trans, table[j][i]);
- }
- printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
+ int i;
+
+ for (i = 0; i < 256; i += 4) {
+ printf("\t0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
+ table[i], table[i + 1], table[i + 2], table[i + 3]);
}
}
@@ -109,34 +70,20 @@ int main(int argc, char** argv)
{
printf("/* this file is generated - do not edit */\n\n");
- if (CRC_LE_BITS > 1) {
- crc32init_le();
- printf("static const u32 ____cacheline_aligned "
- "crc32table_le[%d][%d] = {",
- LE_TABLE_ROWS, LE_TABLE_SIZE);
- output_table(crc32table_le, LE_TABLE_ROWS,
- LE_TABLE_SIZE, "tole");
- printf("};\n");
- }
+ crc32init_le();
+ printf("static const u32 ____cacheline_aligned crc32table_le[256] = {\n");
+ output_table(crc32table_le);
+ printf("};\n");
- if (CRC_BE_BITS > 1) {
- crc32init_be();
- printf("static const u32 ____cacheline_aligned "
- "crc32table_be[%d][%d] = {",
- BE_TABLE_ROWS, BE_TABLE_SIZE);
- output_table(crc32table_be, LE_TABLE_ROWS,
- BE_TABLE_SIZE, "tobe");
- printf("};\n");
- }
- if (CRC_LE_BITS > 1) {
- crc32cinit_le();
- printf("static const u32 ____cacheline_aligned "
- "crc32ctable_le[%d][%d] = {",
- LE_TABLE_ROWS, LE_TABLE_SIZE);
- output_table(crc32ctable_le, LE_TABLE_ROWS,
- LE_TABLE_SIZE, "tole");
- printf("};\n");
- }
+ crc32init_be();
+ printf("static const u32 ____cacheline_aligned crc32table_be[256] = {\n");
+ output_table(crc32table_be);
+ printf("};\n");
+
+ crc32cinit_le();
+ printf("static const u32 ____cacheline_aligned crc32ctable_le[256] = {\n");
+ output_table(crc32ctable_le);
+ printf("};\n");
return 0;
}
diff --git a/lib/inflate.c b/lib/inflate.c
index fbaf03c1748d..eab886baa1b4 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -1257,8 +1257,6 @@ static int INIT gunzip(void)
/* Decompress */
if ((res = inflate())) {
switch (res) {
- case 0:
- break;
case 1:
error("invalid compressed format (err=1)");
break;
diff --git a/lib/kobject.c b/lib/kobject.c
index 72fa20f405f1..abe5f5b856ce 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1096,30 +1096,6 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
}
EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
-{
- const void *ns = NULL;
-
- spin_lock(&kobj_ns_type_lock);
- if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
- ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
- spin_unlock(&kobj_ns_type_lock);
-
- return ns;
-}
-
-const void *kobj_ns_initial(enum kobj_ns_type type)
-{
- const void *ns = NULL;
-
- spin_lock(&kobj_ns_type_lock);
- if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
- ns = kobj_ns_ops_tbl[type]->initial_ns();
- spin_unlock(&kobj_ns_type_lock);
-
- return ns;
-}
-
void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
spin_lock(&kobj_ns_type_lock);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 10a560feb66e..48342736d016 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -57,15 +57,12 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
*ppages = pages;
- got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+ got = alloc_pages_bulk(GFP_KERNEL, npages, pages);
if (got != npages) {
release_pages(pages, got);
KUNIT_ASSERT_EQ(test, got, npages);
}
- for (int i = 0; i < npages; i++)
- pages[i]->index = i;
-
buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
diff --git a/lib/list_debug.c b/lib/list_debug.c
index db602417febf..ee7eeeb8f92c 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -22,17 +22,17 @@ __list_valid_slowpath
bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
- if (CHECK_DATA_CORRUPTION(prev == NULL,
+ if (CHECK_DATA_CORRUPTION(prev == NULL, NULL,
"list_add corruption. prev is NULL.\n") ||
- CHECK_DATA_CORRUPTION(next == NULL,
+ CHECK_DATA_CORRUPTION(next == NULL, NULL,
"list_add corruption. next is NULL.\n") ||
- CHECK_DATA_CORRUPTION(next->prev != prev,
+ CHECK_DATA_CORRUPTION(next->prev != prev, next,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
- CHECK_DATA_CORRUPTION(prev->next != next,
+ CHECK_DATA_CORRUPTION(prev->next != next, prev,
"list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
next, prev->next, prev) ||
- CHECK_DATA_CORRUPTION(new == prev || new == next,
+ CHECK_DATA_CORRUPTION(new == prev || new == next, NULL,
"list_add double add: new=%px, prev=%px, next=%px.\n",
new, prev, next))
return false;
@@ -49,20 +49,20 @@ bool __list_del_entry_valid_or_report(struct list_head *entry)
prev = entry->prev;
next = entry->next;
- if (CHECK_DATA_CORRUPTION(next == NULL,
+ if (CHECK_DATA_CORRUPTION(next == NULL, NULL,
"list_del corruption, %px->next is NULL\n", entry) ||
- CHECK_DATA_CORRUPTION(prev == NULL,
+ CHECK_DATA_CORRUPTION(prev == NULL, NULL,
"list_del corruption, %px->prev is NULL\n", entry) ||
- CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1, next,
"list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
- CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
+ CHECK_DATA_CORRUPTION(prev == LIST_POISON2, prev,
"list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
- CHECK_DATA_CORRUPTION(prev->next != entry,
+ CHECK_DATA_CORRUPTION(prev->next != entry, prev,
"list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n",
entry, prev->next, prev) ||
- CHECK_DATA_CORRUPTION(next->prev != entry,
+ CHECK_DATA_CORRUPTION(next->prev != entry, next,
"list_del corruption. next->prev should be %px, but was %px. (next=%px)\n",
entry, next->prev, next))
return false;
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 8d3f623536fe..a310ecb7ccc0 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -108,6 +108,13 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
* and list_sort is a stable sort, so it is not necessary to distinguish
* the @a < @b and @a == @b cases.
*
+ * The comparison function must adhere to specific mathematical properties
+ * to ensure correct and stable sorting:
+ * - Antisymmetry: cmp(@a, @b) must return the opposite sign of
+ * cmp(@b, @a).
+ * - Transitivity: if cmp(@a, @b) <= 0 and cmp(@b, @c) <= 0, then
+ * cmp(@a, @c) <= 0.
+ *
* This is compatible with two styles of @cmp function:
* - The traditional style which returns <0 / =0 / >0, or
* - Returning a boolean 0/1.
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index b0bbeeb74b9e..2a397bb2c661 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -33,7 +33,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 0e31e6da5ce7..3a2cd9acada4 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -33,7 +33,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/init.h>
#include <linux/module.h>
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index cb358d6bde5a..17277ec16919 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/string.h> /* memset, memcpy */
+#include <linux/lz4.h>
#define FORCE_INLINE __always_inline
@@ -92,8 +93,7 @@ typedef uintptr_t uptrval;
#define MB (1 << 20)
#define GB (1U << 30)
-#define MAXD_LOG 16
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define MAX_DISTANCE LZ4_DISTANCE_MAX
#define STEPSIZE sizeof(size_t)
#define ML_BITS 4
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index bc45594ad2a8..91936dc3d14b 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -34,7 +34,6 @@
/*-************************************
* Dependencies
**************************************/
-#include <linux/lz4.h>
#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 047397136f15..f7153ade1be5 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1863,11 +1863,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node,
* Return: The first split location. The middle split is set in @mid_split.
*/
static inline int mab_calc_split(struct ma_state *mas,
- struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
+ struct maple_big_node *bn, unsigned char *mid_split)
{
unsigned char b_end = bn->b_end;
int split = b_end / 2; /* Assume equal split. */
- unsigned char slot_min, slot_count = mt_slots[bn->type];
+ unsigned char slot_count = mt_slots[bn->type];
/*
* To support gap tracking, all NULL entries are kept together and a node cannot
@@ -1900,18 +1900,7 @@ static inline int mab_calc_split(struct ma_state *mas,
split = b_end / 3;
*mid_split = split * 2;
} else {
- slot_min = mt_min_slots[bn->type];
-
*mid_split = 0;
- /*
- * Avoid having a range less than the slot count unless it
- * causes one node to be deficient.
- * NOTE: mt_min_slots is 1 based, b_end and split are zero.
- */
- while ((split < slot_count - 1) &&
- ((bn->pivot[split] - min) < slot_count - 1) &&
- (b_end - split > slot_min))
- split++;
}
/* Avoid ending a node on a NULL entry */
@@ -2377,7 +2366,7 @@ static inline struct maple_enode
static inline unsigned char mas_mab_to_node(struct ma_state *mas,
struct maple_big_node *b_node, struct maple_enode **left,
struct maple_enode **right, struct maple_enode **middle,
- unsigned char *mid_split, unsigned long min)
+ unsigned char *mid_split)
{
unsigned char split = 0;
unsigned char slot_count = mt_slots[b_node->type];
@@ -2390,7 +2379,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas,
if (b_node->b_end < slot_count) {
split = b_node->b_end;
} else {
- split = mab_calc_split(mas, b_node, mid_split, min);
+ split = mab_calc_split(mas, b_node, mid_split);
*right = mas_new_ma_node(mas, b_node);
}
@@ -2877,7 +2866,7 @@ static void mas_spanning_rebalance(struct ma_state *mas,
mast->bn->b_end--;
mast->bn->type = mte_node_type(mast->orig_l->node);
split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
- &mid_split, mast->orig_l->min);
+ &mid_split);
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
@@ -3365,7 +3354,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
if (mas_push_data(mas, height, &mast, false))
break;
- split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
+ split = mab_calc_split(mas, b_node, &mid_split);
mast_split_data(&mast, mas, split);
/*
* Usually correct, mab_mas_cp in the above call overwrites
@@ -4746,29 +4735,6 @@ again:
}
/*
- * mas_next_entry() - Internal function to get the next entry.
- * @mas: The maple state
- * @limit: The maximum range start.
- *
- * Set the @mas->node to the next entry and the range_start to
- * the beginning value for the entry. Does not check beyond @limit.
- * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
- * @mas->last on overflow.
- * Restarts on dead nodes.
- *
- * Return: the next entry or %NULL.
- */
-static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
-{
- if (mas->last >= limit) {
- mas->status = ma_overflow;
- return NULL;
- }
-
- return mas_next_slot(mas, limit, false);
-}
-
-/*
* mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
* highest gap address of a given size in a given node and descend.
* @mas: The maple state
@@ -4903,15 +4869,14 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
if (gap >= size) {
if (ma_is_leaf(type)) {
found = true;
- goto done;
- }
- if (mas->index <= pivot) {
- mas->node = mas_slot(mas, slots, offset);
- mas->min = min;
- mas->max = pivot;
- offset = 0;
break;
}
+
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = pivot;
+ offset = 0;
+ break;
}
next_slot:
min = pivot + 1;
@@ -4921,9 +4886,6 @@ next_slot:
}
}
- if (mte_is_root(mas->node))
- found = true;
-done:
mas->offset = offset;
return found;
}
@@ -5027,8 +4989,8 @@ static inline void mas_awalk(struct ma_state *mas, unsigned long size)
* There are 4 options:
* go to child (descend)
* go back to parent (ascend)
- * no gap found. (return, slot == MAPLE_NODE_SLOTS)
- * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ * no gap found. (return, error == -EBUSY)
+ * found the gap. (return)
*/
while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
if (last == mas->node)
@@ -5113,9 +5075,6 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
return xa_err(mas->node);
offset = mas->offset;
- if (unlikely(offset == MAPLE_NODE_SLOTS))
- return -EBUSY;
-
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
@@ -6938,7 +6897,7 @@ retry:
goto unlock;
while (mas_is_active(&mas) && (mas.last < max)) {
- entry = mas_next_entry(&mas, max);
+ entry = mas_next_slot(&mas, max, false);
if (likely(entry && !xa_is_zero(entry)))
break;
}
@@ -7597,7 +7556,7 @@ void mt_validate(struct maple_tree *mt)
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
- (mas.max != ULONG_MAX))) {
+ (!mte_is_root(mas.node)))) {
pr_err("Invalid size %u of " PTR_FMT "\n",
end, mas_mn(&mas));
}
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 3ef11305f8d2..853f023ae537 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
+obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o \ No newline at end of file
diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile
index 6a169123320a..e1a79f093b2d 100644
--- a/lib/math/tests/Makefile
+++ b/lib/math/tests/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o
+obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o
diff --git a/lib/math/tests/int_sqrt_kunit.c b/lib/math/tests/int_sqrt_kunit.c
new file mode 100644
index 000000000000..1798e1312eb7
--- /dev/null
+++ b/lib/math/tests/int_sqrt_kunit.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+struct test_case_params {
+ unsigned long x;
+ unsigned long expected_result;
+ const char *name;
+};
+
+static const struct test_case_params params[] = {
+ { 0, 0, "edge case: square root of 0" },
+ { 1, 1, "perfect square: square root of 1" },
+ { 2, 1, "non-perfect square: square root of 2" },
+ { 3, 1, "non-perfect square: square root of 3" },
+ { 4, 2, "perfect square: square root of 4" },
+ { 5, 2, "non-perfect square: square root of 5" },
+ { 6, 2, "non-perfect square: square root of 6" },
+ { 7, 2, "non-perfect square: square root of 7" },
+ { 8, 2, "non-perfect square: square root of 8" },
+ { 9, 3, "perfect square: square root of 9" },
+ { 15, 3, "non-perfect square: square root of 15 (N-1 from 16)" },
+ { 16, 4, "perfect square: square root of 16" },
+ { 17, 4, "non-perfect square: square root of 17 (N+1 from 16)" },
+ { 80, 8, "non-perfect square: square root of 80 (N-1 from 81)" },
+ { 81, 9, "perfect square: square root of 81" },
+ { 82, 9, "non-perfect square: square root of 82 (N+1 from 81)" },
+ { 255, 15, "non-perfect square: square root of 255 (N-1 from 256)" },
+ { 256, 16, "perfect square: square root of 256" },
+ { 257, 16, "non-perfect square: square root of 257 (N+1 from 256)" },
+ { 2147483648, 46340, "large input: square root of 2147483648" },
+ { 4294967295, 65535, "edge case: ULONG_MAX for 32-bit" },
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(int_sqrt, params, get_desc);
+
+static void int_sqrt_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, int_sqrt(tc->x));
+}
+
+static struct kunit_case math_int_sqrt_test_cases[] = {
+ KUNIT_CASE_PARAM(int_sqrt_test, int_sqrt_gen_params),
+ {}
+};
+
+static struct kunit_suite int_sqrt_test_suite = {
+ .name = "math-int_sqrt",
+ .test_cases = math_int_sqrt_test_cases,
+};
+
+kunit_test_suites(&int_sqrt_test_suite);
+
+MODULE_DESCRIPTION("math.int_sqrt KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6c902639728b..3e555d012ed6 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
*/
rht_assign_locked(bkt, obj);
- atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
-
return NULL;
}
@@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
+ bool inserted;
+
flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
hash, obj, data);
+ inserted = data && !new_tbl;
+ if (inserted)
+ atomic_inc(&ht->nelems);
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
+
+ if (inserted && rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
}
} while (!IS_ERR_OR_NULL(new_tbl));
@@ -665,7 +669,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* structure outside the hash table.
*
* This function may be called from any process context, including
- * non-preemptable context, but cannot be called from softirq or
+ * non-preemptible context, but cannot be called from softirq or
* hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
diff --git a/lib/sort.c b/lib/sort.c
index 048b7a6ef967..8e73dc55476b 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -200,6 +200,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
* copy (e.g. fix up pointers or auxiliary data), but the built-in swap
* avoids a slow retpoline and so is significantly faster.
*
+ * The comparison function must adhere to specific mathematical
+ * properties to ensure correct and stable sorting:
+ * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
+ * cmp_func(b, a).
+ * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
+ * cmp_func(a, c) <= 0.
+ *
* Sorting time is O(n log n) both on average and worst-case. While
* quicksort is slightly faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index c40818ec9c18..fbe910c9c825 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -47,10 +47,12 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
#define DO_NOTHING_TYPE_STRING(var_type) void
#define DO_NOTHING_TYPE_STRUCT(var_type) void
+#define DO_NOTHING_TYPE_UNION(var_type) void
#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
#define DO_NOTHING_RETURN_STRING(ptr) /**/
#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
+#define DO_NOTHING_RETURN_UNION(ptr) /**/
#define DO_NOTHING_CALL_SCALAR(var, name) \
(var) = do_nothing_ ## name(&(var))
@@ -58,10 +60,13 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
do_nothing_ ## name(var)
#define DO_NOTHING_CALL_STRUCT(var, name) \
do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_UNION(var, name) \
+ do_nothing_ ## name(&(var))
#define FETCH_ARG_SCALAR(var) &var
#define FETCH_ARG_STRING(var) var
#define FETCH_ARG_STRUCT(var) &var
+#define FETCH_ARG_UNION(var) &var
/*
* On m68k, if the leaf function test variable is longer than 8 bytes,
@@ -77,6 +82,7 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define INIT_CLONE_SCALAR /**/
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
#define INIT_CLONE_STRUCT /**/
+#define INIT_CLONE_UNION /**/
#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
@@ -92,6 +98,7 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
zero.three = 0; \
zero.four = 0; \
} while (0)
+#define ZERO_CLONE_UNION(zero) ZERO_CLONE_STRUCT(zero)
#define INIT_SCALAR_none(var_type) /**/
#define INIT_SCALAR_zero(var_type) = 0
@@ -101,6 +108,7 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define INIT_STRUCT_none(var_type) /**/
#define INIT_STRUCT_zero(var_type) = { }
+#define INIT_STRUCT_old_zero(var_type) = { 0 }
#define __static_partial { .two = 0, }
@@ -146,6 +154,34 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define INIT_STRUCT_assigned_copy(var_type) \
; var = *(arg)
+/* Union initialization is the same as structs. */
+#define INIT_UNION_none(var_type) INIT_STRUCT_none(var_type)
+#define INIT_UNION_zero(var_type) INIT_STRUCT_zero(var_type)
+#define INIT_UNION_old_zero(var_type) INIT_STRUCT_old_zero(var_type)
+
+#define INIT_UNION_static_partial(var_type) \
+ INIT_STRUCT_static_partial(var_type)
+#define INIT_UNION_static_all(var_type) \
+ INIT_STRUCT_static_all(var_type)
+#define INIT_UNION_dynamic_partial(var_type) \
+ INIT_STRUCT_dynamic_partial(var_type)
+#define INIT_UNION_dynamic_all(var_type) \
+ INIT_STRUCT_dynamic_all(var_type)
+#define INIT_UNION_runtime_partial(var_type) \
+ INIT_STRUCT_runtime_partial(var_type)
+#define INIT_UNION_runtime_all(var_type) \
+ INIT_STRUCT_runtime_all(var_type)
+#define INIT_UNION_assigned_static_partial(var_type) \
+ INIT_STRUCT_assigned_static_partial(var_type)
+#define INIT_UNION_assigned_static_all(var_type) \
+ INIT_STRUCT_assigned_static_all(var_type)
+#define INIT_UNION_assigned_dynamic_partial(var_type) \
+ INIT_STRUCT_assigned_dynamic_partial(var_type)
+#define INIT_UNION_assigned_dynamic_all(var_type) \
+ INIT_STRUCT_assigned_dynamic_all(var_type)
+#define INIT_UNION_assigned_copy(var_type) \
+ INIT_STRUCT_assigned_copy(var_type)
+
/*
* @name: unique string name for the test
* @var_type: type to be tested for zeroing initialization
@@ -294,6 +330,33 @@ struct test_user {
unsigned long four;
};
+/* No padding: all members are the same size. */
+union test_same_sizes {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Mismatched sizes, with one and two being small */
+union test_small_start {
+ char one:1;
+ char two;
+ short three;
+ unsigned long four;
+ struct big_struct {
+ unsigned long array[8];
+ } big;
+};
+
+/* Mismatched sizes, with one and two being small */
+union test_small_end {
+ short one;
+ unsigned long two;
+ char three:1;
+ char four;
+};
+
#define ALWAYS_PASS WANT_SUCCESS
#define ALWAYS_FAIL XFAIL
@@ -332,6 +395,11 @@ struct test_user {
struct test_ ## name, STRUCT, init, \
xfail)
+#define DEFINE_UNION_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, \
+ union test_ ## name, STRUCT, init, \
+ xfail)
+
#define DEFINE_STRUCT_TESTS(init, xfail) \
DEFINE_STRUCT_TEST(small_hole, init, xfail); \
DEFINE_STRUCT_TEST(big_hole, init, xfail); \
@@ -343,9 +411,22 @@ struct test_user {
xfail); \
DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
+#define DEFINE_UNION_INITIALIZER_TESTS(base, xfail) \
+ DEFINE_UNION_TESTS(base ## _ ## partial, \
+ xfail); \
+ DEFINE_UNION_TESTS(base ## _ ## all, xfail)
+
+#define DEFINE_UNION_TESTS(init, xfail) \
+ DEFINE_UNION_TEST(same_sizes, init, xfail); \
+ DEFINE_UNION_TEST(small_start, init, xfail); \
+ DEFINE_UNION_TEST(small_end, init, xfail);
+
/* These should be fully initialized all the time! */
DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(old_zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(zero, ALWAYS_PASS);
+DEFINE_UNION_TESTS(old_zero, ALWAYS_PASS);
/* Struct initializers: padding may be left uninitialized. */
DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
@@ -353,6 +434,12 @@ DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
+DEFINE_UNION_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_UNION_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_UNION_TESTS(assigned_copy, ALWAYS_FAIL);
/* No initialization without compiler instrumentation. */
DEFINE_SCALAR_TESTS(none, STRONG_PASS);
DEFINE_STRUCT_TESTS(none, BYREF_PASS);
@@ -436,13 +523,23 @@ DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
KUNIT_CASE(test_trailing_hole_ ## init),\
KUNIT_CASE(test_packed_ ## init) \
+#define KUNIT_test_unions(init) \
+ KUNIT_CASE(test_same_sizes_ ## init), \
+ KUNIT_CASE(test_small_start_ ## init), \
+ KUNIT_CASE(test_small_end_ ## init) \
+
static struct kunit_case stackinit_test_cases[] = {
/* These are explicitly initialized and should always pass. */
KUNIT_test_scalars(zero),
KUNIT_test_structs(zero),
+ KUNIT_test_structs(old_zero),
+ KUNIT_test_unions(zero),
+ KUNIT_test_unions(old_zero),
/* Padding here appears to be accidentally always initialized? */
KUNIT_test_structs(dynamic_partial),
KUNIT_test_structs(assigned_dynamic_partial),
+ KUNIT_test_unions(dynamic_partial),
+ KUNIT_test_unions(assigned_dynamic_partial),
/* Padding initialization depends on compiler behaviors. */
KUNIT_test_structs(static_partial),
KUNIT_test_structs(static_all),
@@ -452,8 +549,17 @@ static struct kunit_case stackinit_test_cases[] = {
KUNIT_test_structs(assigned_static_partial),
KUNIT_test_structs(assigned_static_all),
KUNIT_test_structs(assigned_dynamic_all),
+ KUNIT_test_unions(static_partial),
+ KUNIT_test_unions(static_all),
+ KUNIT_test_unions(dynamic_all),
+ KUNIT_test_unions(runtime_partial),
+ KUNIT_test_unions(runtime_all),
+ KUNIT_test_unions(assigned_static_partial),
+ KUNIT_test_unions(assigned_static_all),
+ KUNIT_test_unions(assigned_dynamic_all),
/* Everything fails this since it effectively performs a memcpy(). */
KUNIT_test_structs(assigned_copy),
+ KUNIT_test_unions(assigned_copy),
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
KUNIT_test_scalars(none),
KUNIT_CASE(test_switch_1_none),
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 704cb1093ae8..13e2a10d7554 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1563,6 +1563,30 @@ static noinline void __init check_root_expand(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init check_deficient_node(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ int count;
+
+ mas_lock(&mas);
+ for (count = 0; count < 10; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 20; count < 39; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 10; count < 12; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+ mas_unlock(&mas);
+ mt_validate(mt);
+}
+
static noinline void __init check_gap_combining(struct maple_tree *mt)
{
struct maple_enode *mn1, *mn2;
@@ -3714,6 +3738,34 @@ static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
}
mtree_destroy(mt);
+
+ /*
+ * Issue with reverse search was discovered
+ * https://lore.kernel.org/all/20241216060600.287B4C4CED0@smtp.kernel.org/
+ * Exhausting the allocation area and forcing the search to wrap needs a
+ * mas_reset() in mas_alloc_cyclic().
+ */
+ next = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (int i = 0; i < 1023; i++) {
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, i != location - 2);
+ MT_BUG_ON(mt, i != next - 3);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ }
+ mtree_erase(mt, 123);
+ MT_BUG_ON(mt, mtree_load(mt, 123) != NULL);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 123 != location);
+ MT_BUG_ON(mt, 124 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_erase(mt, 100);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 100 != location);
+ MT_BUG_ON(mt, 101 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_destroy(mt);
+
/* Overflow test */
next = ULONG_MAX - 1;
ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
@@ -3797,6 +3849,10 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_deficient_node(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_store_null(&tree);
mtree_destroy(&tree);
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
index e6fbb798558b..a9c4a74d3898 100644
--- a/lib/test_min_heap.c
+++ b/lib/test_min_heap.c
@@ -32,7 +32,7 @@ static __init int pop_verify_heap(bool min_heap,
int last;
last = values[0];
- min_heap_pop(heap, funcs, NULL);
+ min_heap_pop_inline(heap, funcs, NULL);
while (heap->nr > 0) {
if (min_heap) {
if (last > values[0]) {
@@ -48,7 +48,7 @@ static __init int pop_verify_heap(bool min_heap,
}
}
last = values[0];
- min_heap_pop(heap, funcs, NULL);
+ min_heap_pop_inline(heap, funcs, NULL);
}
return err;
}
@@ -69,7 +69,7 @@ static __init int test_heapify_all(bool min_heap)
int i, err;
/* Test with known set of values. */
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@@ -78,7 +78,7 @@ static __init int test_heapify_all(bool min_heap)
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
@@ -102,14 +102,14 @@ static __init int test_heap_push(bool min_heap)
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &data[i], &funcs, NULL);
+ min_heap_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
/* Test with randomly generated values. */
while (heap.nr < heap.size) {
temp = get_random_u32();
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -135,22 +135,22 @@ static __init int test_heap_pop_push(bool min_heap)
/* Fill values with data to pop and replace. */
temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_pop_push(&heap, &data[i], &funcs, NULL);
+ min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
heap.nr = 0;
for (i = 0; i < ARRAY_SIZE(data); i++)
- min_heap_push(&heap, &temp, &funcs, NULL);
+ min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with randomly generated values. */
for (i = 0; i < ARRAY_SIZE(data); i++) {
temp = get_random_u32();
- min_heap_pop_push(&heap, &temp, &funcs, NULL);
+ min_heap_pop_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@@ -163,7 +163,7 @@ static __init int test_heap_del(bool min_heap)
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
struct min_heap_test heap;
- min_heap_init(&heap, values, ARRAY_SIZE(values));
+ min_heap_init_inline(&heap, values, ARRAY_SIZE(values));
heap.nr = ARRAY_SIZE(values);
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
@@ -172,9 +172,9 @@ static __init int test_heap_del(bool min_heap)
int i, err;
/* Test with known set of values. */
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
- min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@@ -182,10 +182,10 @@ static __init int test_heap_del(bool min_heap)
heap.nr = ARRAY_SIZE(values);
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
- min_heapify_all(&heap, &funcs, NULL);
+ min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
- min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
+ min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index b6696fa1d426..4249e0cc8aaf 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -71,7 +71,7 @@ static struct test_sysctl_data test_data = {
};
/* These are all under /proc/sys/debug/test_sysctl/ */
-static struct ctl_table test_table[] = {
+static const struct ctl_table test_table[] = {
{
.procname = "int_0001",
.data = &test_data.int_0001,
@@ -177,7 +177,7 @@ static int test_sysctl_setup_node_tests(void)
}
/* Used to test that unregister actually removes the directory */
-static struct ctl_table test_table_unregister[] = {
+static const struct ctl_table test_table_unregister[] = {
{
.procname = "unregister_error",
.data = &test_data.int_0001,
@@ -220,7 +220,7 @@ static int test_sysctl_run_register_mount_point(void)
return 0;
}
-static struct ctl_table test_table_empty[] = { };
+static const struct ctl_table test_table_empty[] = { };
static int test_sysctl_run_register_empty(void)
{
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 4ddf769861ff..f585949ff696 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -373,7 +373,7 @@ vm_map_ram_test(void)
if (!pages)
return -1;
- nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+ nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages);
if (nr_allocated != map_nr_pages)
goto cleanup;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index d5c5cbba33ed..6932a26f4927 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1448,6 +1448,41 @@ static noinline void check_pause(struct xarray *xa)
XA_BUG_ON(xa, count != order_limit);
xa_destroy(xa);
+
+ index = 0;
+ for (order = XA_CHUNK_SHIFT; order > 0; order--) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ index = 0;
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (XA_CHUNK_SHIFT - count);
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
+
+ index = 0;
+ count = 0;
+ xas_set(&xas, XA_CHUNK_SIZE / 2 + 1);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ index += 1UL << (XA_CHUNK_SHIFT - count);
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != XA_CHUNK_SHIFT);
+
+ xa_destroy(xa);
+
}
static noinline void check_move_tiny(struct xarray *xa)
diff --git a/lib/xarray.c b/lib/xarray.c
index 32d4bac8c94c..116e9286c64e 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -125,19 +125,20 @@ static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
*/
static void xas_squash_marks(const struct xa_state *xas)
{
- unsigned int mark = 0;
+ xa_mark_t mark = 0;
unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
- if (!xas->xa_sibs)
- return;
+ for (;;) {
+ unsigned long *marks = node_marks(xas->xa_node, mark);
- do {
- unsigned long *marks = xas->xa_node->marks[mark];
- if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
- continue;
- __set_bit(xas->xa_offset, marks);
- bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
- } while (mark++ != (__force unsigned)XA_MARK_MAX);
+ if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) {
+ __set_bit(xas->xa_offset, marks);
+ bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
}
/* extracts the offset within this node from the index */
@@ -435,6 +436,11 @@ static unsigned long max_index(void *entry)
return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
}
+static inline void *xa_zero_to_null(void *entry)
+{
+ return xa_is_zero(entry) ? NULL : entry;
+}
+
static void xas_shrink(struct xa_state *xas)
{
struct xarray *xa = xas->xa;
@@ -451,8 +457,8 @@ static void xas_shrink(struct xa_state *xas)
break;
if (!xa_is_node(entry) && node->shift)
break;
- if (xa_is_zero(entry) && xa_zero_busy(xa))
- entry = NULL;
+ if (xa_zero_busy(xa))
+ entry = xa_zero_to_null(entry);
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
@@ -1022,7 +1028,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
- if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
goto nomem;
if (xas->xa_shift + XA_CHUNK_SHIFT > order)
return;
@@ -1147,6 +1153,7 @@ void xas_pause(struct xa_state *xas)
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
+ xas->xa_index &= ~0UL << node->shift;
xas->xa_index += (offset - xas->xa_offset) << node->shift;
if (xas->xa_index == 0)
xas->xa_node = XAS_BOUNDS;
@@ -1382,6 +1389,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
continue;
+ if (xa_is_sibling(entry))
+ continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@@ -1474,9 +1483,7 @@ void *xa_load(struct xarray *xa, unsigned long index)
rcu_read_lock();
do {
- entry = xas_load(&xas);
- if (xa_is_zero(entry))
- entry = NULL;
+ entry = xa_zero_to_null(xas_load(&xas));
} while (xas_retry(&xas, entry));
rcu_read_unlock();
@@ -1486,8 +1493,6 @@ EXPORT_SYMBOL(xa_load);
static void *xas_result(struct xa_state *xas, void *curr)
{
- if (xa_is_zero(curr))
- return NULL;
if (xas_error(xas))
curr = xas->xa_node;
return curr;
@@ -1508,7 +1513,7 @@ static void *xas_result(struct xa_state *xas, void *curr)
void *__xa_erase(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
- return xas_result(&xas, xas_store(&xas, NULL));
+ return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL)));
}
EXPORT_SYMBOL(__xa_erase);
@@ -1567,7 +1572,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
- return xas_result(&xas, curr);
+ return xas_result(&xas, xa_zero_to_null(curr));
}
EXPORT_SYMBOL(__xa_store);
@@ -1600,6 +1605,9 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
}
EXPORT_SYMBOL(xa_store);
+static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp);
+
/**
* __xa_cmpxchg() - Store this entry in the XArray.
* @xa: XArray.
@@ -1619,6 +1627,13 @@ EXPORT_SYMBOL(xa_store);
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
+ return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp));
+}
+EXPORT_SYMBOL(__xa_cmpxchg);
+
+static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
XA_STATE(xas, xa, index);
void *curr;
@@ -1636,7 +1651,6 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
return xas_result(&xas, curr);
}
-EXPORT_SYMBOL(__xa_cmpxchg);
/**
* __xa_insert() - Store this entry in the XArray if no entry is present.
@@ -1656,26 +1670,16 @@ EXPORT_SYMBOL(__xa_cmpxchg);
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
- XA_STATE(xas, xa, index);
void *curr;
+ int errno;
- if (WARN_ON_ONCE(xa_is_advanced(entry)))
- return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
-
- do {
- curr = xas_load(&xas);
- if (!curr) {
- xas_store(&xas, entry);
- if (xa_track_free(xa))
- xas_clear_mark(&xas, XA_FREE_MARK);
- } else {
- xas_set_err(&xas, -EBUSY);
- }
- } while (__xas_nomem(&xas, gfp));
-
- return xas_error(&xas);
+ curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp);
+ errno = xa_err(curr);
+ if (errno)
+ return errno;
+ return (curr != NULL) ? -EBUSY : 0;
}
EXPORT_SYMBOL(__xa_insert);