summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorPetr Mladek <pmladek@suse.com>2026-04-20 13:41:28 +0200
committerPetr Mladek <pmladek@suse.com>2026-04-20 13:41:28 +0200
commit3e9e952bb3139ad1e08f3e1960239c2988ab90c9 (patch)
treeee27c846e06879bc062388cf948086caeda676cf /lib
parentaea645c02f1acc36088618667e086b62d8f83e92 (diff)
parent8901ac9d2c7eb8ed7ae5e749bf13ecb3b6062488 (diff)
Merge branch 'for-7.1-printf-kunit-build' into for-linus
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug130
-rw-r--r--lib/Makefile3
-rw-r--r--lib/alloc_tag.c32
-rw-r--r--lib/assoc_array.c31
-rw-r--r--lib/bch.c2
-rw-r--r--lib/bootconfig.c27
-rw-r--r--lib/bucket_locks.c2
-rwxr-xr-xlib/build_OID_registry26
-rw-r--r--lib/codetag.c4
-rw-r--r--lib/cpu_rmap.c2
-rw-r--r--lib/crypto/gf128mul.c6
-rw-r--r--lib/crypto/mpi/mpih-mul.c2
-rw-r--r--lib/crypto/mpi/mpiutil.c6
-rw-r--r--lib/crypto/powerpc/aes.h12
-rw-r--r--lib/crypto/sha1.c63
-rw-r--r--lib/decompress_unxz.c4
-rw-r--r--lib/dhry_1.c4
-rw-r--r--lib/dim/net_dim.c2
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/error-inject.c2
-rw-r--r--lib/globtest.c167
-rw-r--r--lib/group_cpus.c281
-rw-r--r--lib/hexdump.c1
-rw-r--r--lib/idr.c6
-rw-r--r--lib/interval_tree_test.c3
-rw-r--r--lib/iov_iter.c4
-rw-r--r--lib/kfifo.c2
-rw-r--r--lib/kobject.c4
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/kstrtox.c4
-rw-r--r--lib/kunit/attributes.c2
-rw-r--r--lib/kunit/device.c2
-rw-r--r--lib/kunit/executor.c6
-rw-r--r--lib/kunit/executor_test.c2
-rw-r--r--lib/kunit/kunit-example-test.c2
-rw-r--r--lib/kunit/kunit-test.c3
-rw-r--r--lib/kunit/resource.c2
-rw-r--r--lib/kunit/static_stub.c2
-rw-r--r--lib/kunit/string-stream.c4
-rw-r--r--lib/logic_iomem.c2
-rw-r--r--lib/lru_cache.c6
-rw-r--r--lib/lwq.c2
-rw-r--r--lib/objagg.c17
-rw-r--r--lib/objpool.c2
-rw-r--r--lib/once.c4
-rw-r--r--lib/parman.c2
-rw-r--r--lib/percpu-refcount.c2
-rw-r--r--lib/pldmfw/pldmfw.c8
-rw-r--r--lib/rbtree_test.c2
-rw-r--r--lib/reed_solomon/reed_solomon.c2
-rw-r--r--lib/reed_solomon/test_rslib.c4
-rw-r--r--lib/ref_tracker.c5
-rw-r--r--lib/scatterlist.c32
-rw-r--r--lib/sg_split.c7
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/string_helpers.c3
-rw-r--r--lib/test_bpf.c50
-rw-r--r--lib/test_debug_virtual.c2
-rw-r--r--lib/test_firmware.c4
-rw-r--r--lib/test_hmm.c4
-rw-r--r--lib/test_kho.c9
-rw-r--r--lib/test_memcat_p.c8
-rw-r--r--lib/test_objagg.c4
-rw-r--r--lib/test_parman.c2
-rw-r--r--lib/test_rhashtable.c2
-rw-r--r--lib/test_uuid.c134
-rw-r--r--lib/test_vmalloc.c15
-rw-r--r--lib/tests/Makefile7
-rw-r--r--lib/tests/glob_kunit.c125
-rw-r--r--lib/tests/kunit_iov_iter.c6
-rw-r--r--lib/tests/list-private-test.c76
-rw-r--r--lib/tests/list-test.c8
-rw-r--r--lib/tests/liveupdate.c158
-rw-r--r--lib/tests/min_heap_kunit.c (renamed from lib/test_min_heap.c)147
-rw-r--r--lib/tests/test_ratelimit.c2
-rw-r--r--lib/tests/uuid_kunit.c106
-rw-r--r--lib/uuid.c1
-rw-r--r--lib/vsprintf.c1
-rw-r--r--lib/xz/xz_dec_bcj.c2
-rw-r--r--lib/xz/xz_dec_lzma2.c4
-rw-r--r--lib/xz/xz_dec_stream.c2
-rw-r--r--lib/zlib_inflate/infutil.c2
83 files changed, 1131 insertions, 730 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 2923924bea78..0f2fb9610647 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -430,19 +430,6 @@ config GLOB
are compiling an out-of tree driver which tells you that it
depends on this.
-config GLOB_SELFTEST
- tristate "glob self-test on init"
- depends on GLOB
- help
- This option enables a simple self-test of the glob_match
- function on startup. It is primarily useful for people
- working on the code to ensure they haven't introduced any
- regressions.
-
- It only adds a little bit of code and slows kernel boot (or
- module load) by a small amount, so you're welcome to play with
- it, but you probably don't need it.
-
#
# Netlink attribute parsing support is select'ed if needed
#
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e03992d8fad4..4e2dfbbd3d78 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -35,6 +35,18 @@ config PRINTK_CALLER
no option to enable/disable at the kernel command line parameter or
sysfs interface.
+config PRINTK_EXECUTION_CTX
+ bool
+ depends on PRINTK
+ help
+ This option extends struct printk_info to include extra execution
+ context in printk, such as task name and CPU number from where the
+ message originated. This is useful for correlating printk messages
+ with specific execution contexts.
+
+ This is automatically enabled when a console driver that supports
+ execution context is selected.
+
config STACKTRACE_BUILD_ID
bool "Show build ID information in stacktraces"
depends on PRINTK
@@ -1135,13 +1147,14 @@ config SOFTLOCKUP_DETECTOR_INTR_STORM
the CPU stats and the interrupt counts during the "soft lockups".
config BOOTPARAM_SOFTLOCKUP_PANIC
- bool "Panic (Reboot) On Soft Lockups"
+ int "Panic (Reboot) On Soft Lockups"
depends on SOFTLOCKUP_DETECTOR
+ default 0
help
- Say Y here to enable the kernel to panic on "soft lockups",
- which are bugs that cause the kernel to loop in kernel
- mode for more than 20 seconds (configurable using the watchdog_thresh
- sysctl), without giving other tasks a chance to run.
+ Set to a non-zero value N to enable the kernel to panic on "soft
+ lockups", which are bugs that cause the kernel to loop in kernel
+ mode for more than (N * 20 seconds) (configurable using the
+ watchdog_thresh sysctl), without giving other tasks a chance to run.
The panic can be used in combination with panic_timeout,
to cause the system to reboot automatically after a
@@ -1149,7 +1162,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
high-availability systems that have uptime guarantees and
where a lockup must be resolved ASAP.
- Say N if unsure.
+ Say 0 if unsure.
config HAVE_HARDLOCKUP_DETECTOR_BUDDY
bool
@@ -1298,7 +1311,7 @@ config BOOTPARAM_HUNG_TASK_PANIC
high-availability systems that have uptime guarantees and
where a hung tasks must be resolved ASAP.
- Say N if unsure.
+ Say 0 if unsure.
config DETECT_HUNG_TASK_BLOCKER
bool "Dump Hung Tasks Blocker"
@@ -1407,6 +1420,24 @@ config DEBUG_PREEMPT
depending on workload as it triggers debugging routines for each
this_cpu operation. It should only be used for debugging purposes.
+config DEBUG_ATOMIC
+ bool "Debug atomic variables"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here then the kernel will add a runtime alignment check
+ to atomic accesses. Useful for architectures that do not have trap on
+ mis-aligned access.
+
+ This option has potentially significant overhead.
+
+config DEBUG_ATOMIC_LARGEST_ALIGN
+ bool "Check alignment only up to __aligned_largest"
+ depends on DEBUG_ATOMIC
+ help
+ If you say Y here then the check for natural alignment of
+ atomic accesses will be constrained to the compiler's largest
+ alignment for scalar types.
+
menu "Lock Debugging (spinlocks, mutexes, etc...)"
config LOCK_DEBUGGING_SUPPORT
@@ -2325,16 +2356,6 @@ config TEST_LIST_SORT
If unsure, say N.
-config TEST_MIN_HEAP
- tristate "Min heap test"
- depends on DEBUG_KERNEL || m
- help
- Enable this to turn on min heap function tests. This test is
- executed only once during system boot (so affects only boot time),
- or at module load time.
-
- If unsure, say N.
-
config TEST_SORT
tristate "Array-based sort test" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2547,9 +2568,6 @@ config TEST_BITMAP
If unsure, say N.
-config TEST_UUID
- tristate "Test functions located in the uuid module at runtime"
-
config TEST_XARRAY
tristate "Test the XArray code at runtime"
@@ -2833,6 +2851,20 @@ config LIST_KUNIT_TEST
If unsure, say N.
+config LIST_PRIVATE_KUNIT_TEST
+ tristate "KUnit Test for Kernel Private Linked-list structures" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the KUnit test for the private linked-list primitives
+ defined in include/linux/list_private.h.
+
+ These primitives allow manipulation of list_head members that are
+ marked as private and require special accessors (ACCESS_PRIVATE)
+ to strip qualifiers or handle encapsulation.
+
+ If unsure, say N.
+
config HASHTABLE_KUNIT_TEST
tristate "KUnit Test for Kernel Hashtable structures" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2872,6 +2904,29 @@ config CONTEXT_ANALYSIS_TEST
If unsure, say N.
+config LIVEUPDATE_TEST
+ bool "Live Update Kernel Test"
+ default n
+ depends on LIVEUPDATE
+ help
+ Enable a built-in kernel test module for the Live Update
+ Orchestrator.
+
+ This module validates the File-Lifecycle-Bound subsystem by
+ registering a set of mock FLB objects with any real file handlers
+ that support live update (such as the memfd handler).
+
+ When live update operations are performed, this test module will
+ output messages to the kernel log (dmesg), confirming that its
+ registration and various callback functions (preserve, retrieve,
+ finish, etc.) are being invoked correctly.
+
+ This is a debugging and regression testing tool for developers
+ working on the Live Update subsystem. It should not be enabled in
+ production kernels.
+
+ If unsure, say N
+
config CMDLINE_KUNIT_TEST
tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2947,6 +3002,17 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
+config MIN_HEAP_KUNIT_TEST
+ tristate "Min heap test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the min heap library
+ which provides functions for creating and managing min heaps.
+ The test suite checks the functionality of the min heap library.
+
+ If unsure, say N
+
config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -3352,6 +3418,17 @@ config RATELIMIT_KUNIT_TEST
If unsure, say N.
+config UUID_KUNIT_TEST
+ tristate "KUnit test for UUID" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the uuid library,
+ which provides functions for generating and parsing UUID and GUID.
+ The test suite checks parsing of UUID and GUID strings.
+
+ If unsure, say N.
+
config INT_POW_KUNIT_TEST
tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -3421,6 +3498,19 @@ config PRIME_NUMBERS_KUNIT_TEST
If unsure, say N
+config GLOB_KUNIT_TEST
+ tristate "Glob matching test" if !KUNIT_ALL_TESTS
+ depends on GLOB
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the glob functions at runtime.
+
+ This test suite verifies the correctness of glob_match() across various
+ scenarios, including edge cases.
+
+ If unsure, say N
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index 22d8742bba57..1b9ee167517f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -77,7 +77,6 @@ obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
-obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
@@ -91,7 +90,6 @@ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
GCOV_PROFILE_test_bitmap.o := n
endif
-obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
@@ -228,7 +226,6 @@ obj-$(CONFIG_CLOSURES) += closure.o
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
-obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 27fee57a5c91..58991ab09d84 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -669,8 +669,9 @@ static int __init alloc_mod_tags_mem(void)
return -ENOMEM;
}
- vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
- sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+ vm_module_tags->pages = kmalloc_objs(struct page *,
+ get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
+ GFP_KERNEL | __GFP_ZERO);
if (!vm_module_tags->pages) {
free_vm_area(vm_module_tags);
return -ENOMEM;
@@ -776,31 +777,38 @@ EXPORT_SYMBOL(page_alloc_tagging_ops);
static int proc_mem_profiling_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
- if (!mem_profiling_support && write)
- return -EINVAL;
+ if (write) {
+ /*
+ * Call from do_sysctl_args() which is a no-op since the same
+ * value was already set by setup_early_mem_profiling.
+ * Return success to avoid warnings from do_sysctl_args().
+ */
+ if (!current->mm)
+ return 0;
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+ /* User can't toggle profiling while debugging */
+ return -EACCES;
+#endif
+ if (!mem_profiling_support)
+ return -EINVAL;
+ }
return proc_do_static_key(table, write, buffer, lenp, ppos);
}
-static struct ctl_table memory_allocation_profiling_sysctls[] = {
+static const struct ctl_table memory_allocation_profiling_sysctls[] = {
{
.procname = "mem_profiling",
.data = &mem_alloc_profiling_key,
-#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
- .mode = 0444,
-#else
.mode = 0644,
-#endif
.proc_handler = proc_mem_profiling_handler,
},
};
static void __init sysctl_init(void)
{
- if (!mem_profiling_support)
- memory_allocation_profiling_sysctls[0].mode = 0444;
-
register_sysctl_init("vm", memory_allocation_profiling_sysctls);
}
#else /* CONFIG_SYSCTL */
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 388e656ac974..bcc6e0a013eb 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -454,7 +454,7 @@ static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
pr_devel("-->%s()\n", __func__);
- new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ new_n0 = kzalloc_obj(struct assoc_array_node);
if (!new_n0)
return false;
@@ -536,11 +536,11 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
- new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ new_n0 = kzalloc_obj(struct assoc_array_node);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
- new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ new_n1 = kzalloc_obj(struct assoc_array_node);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
@@ -741,7 +741,7 @@ all_leaves_cluster_together:
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s0 = kzalloc(struct_size(new_s0, index_key, keylen), GFP_KERNEL);
+ new_s0 = kzalloc_flex(*new_s0, index_key, keylen);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
@@ -832,7 +832,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
/* Create a new node now since we're going to need it anyway */
- new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ new_n0 = kzalloc_obj(struct assoc_array_node);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
@@ -848,8 +848,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s0 = kzalloc(struct_size(new_s0, index_key, keylen),
- GFP_KERNEL);
+ new_s0 = kzalloc_flex(*new_s0, index_key, keylen);
if (!new_s0)
return false;
edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
@@ -898,8 +897,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s1 = kzalloc(struct_size(new_s1, index_key, keylen),
- GFP_KERNEL);
+ new_s1 = kzalloc_flex(*new_s1, index_key, keylen);
if (!new_s1)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
@@ -977,7 +975,7 @@ struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
*/
BUG_ON(assoc_array_ptr_is_meta(object));
- edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ edit = kzalloc_obj(struct assoc_array_edit);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
@@ -1089,7 +1087,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
pr_devel("-->%s()\n", __func__);
- edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ edit = kzalloc_obj(struct assoc_array_edit);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
@@ -1206,7 +1204,7 @@ found_leaf:
node = parent;
/* Create a new node to collapse into */
- new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ new_n0 = kzalloc_obj(struct assoc_array_node);
if (!new_n0)
goto enomem;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
@@ -1281,7 +1279,7 @@ struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
if (!array->root)
return NULL;
- edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ edit = kzalloc_obj(struct assoc_array_edit);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
@@ -1469,7 +1467,7 @@ int assoc_array_gc(struct assoc_array *array,
if (!array->root)
return 0;
- edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ edit = kzalloc_obj(struct assoc_array_edit);
if (!edit)
return -ENOMEM;
edit->array = array;
@@ -1490,8 +1488,7 @@ descend:
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
- new_s = kmalloc(struct_size(new_s, index_key, keylen),
- GFP_KERNEL);
+ new_s = kmalloc_flex(*new_s, index_key, keylen);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
@@ -1505,7 +1502,7 @@ descend:
/* Duplicate the node at this position */
node = assoc_array_ptr_to_node(cursor);
- new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ new_n = kzalloc_obj(struct assoc_array_node);
if (!new_n)
goto enomem;
pr_devel("dup node %p -> %p\n", node, new_n);
diff --git a/lib/bch.c b/lib/bch.c
index 1c0cb07cdfeb..9561c0828802 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -1320,7 +1320,7 @@ struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
if (prim_poly == 0)
prim_poly = prim_poly_tab[m-min_m];
- bch = kzalloc(sizeof(*bch), GFP_KERNEL);
+ bch = kzalloc_obj(*bch);
if (bch == NULL)
goto fail;
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 81f29c29f47b..449369a60846 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -557,17 +557,13 @@ static int __init __xbc_close_brace(char *p)
/*
* Return delimiter or error, no node added. As same as lib/cmdline.c,
* you can use " around spaces, but can't escape " for value.
+ * *@__v must point real value string. (not including spaces before value.)
*/
static int __init __xbc_parse_value(char **__v, char **__n)
{
char *p, *v = *__v;
int c, quotes = 0;
- v = skip_spaces(v);
- while (*v == '#') {
- v = skip_comment(v);
- v = skip_spaces(v);
- }
if (*v == '"' || *v == '\'') {
quotes = *v;
v++;
@@ -617,6 +613,13 @@ static int __init xbc_parse_array(char **__v)
last_parent = xbc_node_get_child(last_parent);
do {
+ /* Search the next array value beyond comments and empty lines */
+ next = skip_spaces(*__v);
+ while (*next == '#') {
+ next = skip_comment(next);
+ next = skip_spaces(next);
+ }
+ *__v = next;
c = __xbc_parse_value(__v, &next);
if (c < 0)
return c;
@@ -701,9 +704,17 @@ static int __init xbc_parse_kv(char **k, char *v, int op)
if (ret)
return ret;
- c = __xbc_parse_value(&v, &next);
- if (c < 0)
- return c;
+ v = skip_spaces_until_newline(v);
+ /* If there is a comment, this has an empty value. */
+ if (*v == '#') {
+ next = skip_comment(v);
+ *v = '\0';
+ c = '\n';
+ } else {
+ c = __xbc_parse_value(&v, &next);
+ if (c < 0)
+ return c;
+ }
child = xbc_node_get_child(last_parent);
if (child && xbc_node_is_value(child)) {
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
index 64b92e1dbace..d29516ca0554 100644
--- a/lib/bucket_locks.c
+++ b/lib/bucket_locks.c
@@ -31,7 +31,7 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
}
if (sizeof(spinlock_t) != 0) {
- tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
+ tlocks = kvmalloc_objs(spinlock_t, size, gfp);
if (!tlocks)
return -ENOMEM;
for (i = 0; i < size; i++) {
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
index 8267e8d71338..30493ac190c0 100755
--- a/lib/build_OID_registry
+++ b/lib/build_OID_registry
@@ -60,10 +60,12 @@ for (my $i = 0; $i <= $#names; $i++) {
# Determine the encoded length of this OID
my $size = $#components;
for (my $loop = 2; $loop <= $#components; $loop++) {
- my $c = $components[$loop];
+ $ENV{'BC_LINE_LENGTH'} = "0";
+ my $c = `echo "ibase=10; obase=2; $components[$loop]" | bc`;
+ chomp($c);
# We will base128 encode the number
- my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
+ my $tmp = length($c) - 1;
$tmp = int($tmp / 7);
$size += $tmp;
}
@@ -100,16 +102,24 @@ for (my $i = 0; $i <= $#names; $i++) {
push @octets, $components[0] * 40 + $components[1];
for (my $loop = 2; $loop <= $#components; $loop++) {
- my $c = $components[$loop];
+ # get the base 2 representation of the component
+ $ENV{'BC_LINE_LENGTH'} = "0";
+ my $c = `echo "ibase=10; obase=2; $components[$loop]" | bc`;
+ chomp($c);
- # Base128 encode the number
- my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
+ my $tmp = length($c) - 1;
$tmp = int($tmp / 7);
- for (; $tmp > 0; $tmp--) {
- push @octets, (($c >> $tmp * 7) & 0x7f) | 0x80;
+ # zero pad upto length multiple of 7
+ $c = substr("0000000", 0, ($tmp + 1) * 7 - length($c)).$c;
+
+ # Base128 encode the number
+ for (my $j = 0; $j < $tmp; $j++) {
+ my $b = oct("0b".substr($c, $j * 7, 7));
+
+ push @octets, $b | 0x80;
}
- push @octets, $c & 0x7f;
+ push @octets, oct("0b".substr($c, $tmp * 7, 7));
}
push @encoded_oids, \@octets;
diff --git a/lib/codetag.c b/lib/codetag.c
index 545911cebd25..304667897ad4 100644
--- a/lib/codetag.c
+++ b/lib/codetag.c
@@ -193,7 +193,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
BUG_ON(range.start > range.stop);
- cmod = kmalloc(sizeof(*cmod), GFP_KERNEL);
+ cmod = kmalloc_obj(*cmod);
if (unlikely(!cmod))
return -ENOMEM;
@@ -383,7 +383,7 @@ codetag_register_type(const struct codetag_type_desc *desc)
BUG_ON(desc->tag_size <= 0);
- cttype = kzalloc(sizeof(*cttype), GFP_KERNEL);
+ cttype = kzalloc_obj(*cttype);
if (unlikely(!cttype))
return ERR_PTR(-ENOMEM);
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index f03d9be3f06b..c86ab6e55d17 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -309,7 +309,7 @@ EXPORT_SYMBOL(irq_cpu_rmap_remove);
*/
int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
{
- struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ struct irq_glue *glue = kzalloc_obj(*glue);
int rc;
if (!glue)
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
index 2a34590fe3f1..e5a727b15f07 100644
--- a/lib/crypto/gf128mul.c
+++ b/lib/crypto/gf128mul.c
@@ -245,12 +245,12 @@ struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
struct gf128mul_64k *t;
int i, j, k;
- t = kzalloc(sizeof(*t), GFP_KERNEL);
+ t = kzalloc_obj(*t);
if (!t)
goto out;
for (i = 0; i < 16; i++) {
- t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+ t->t[i] = kzalloc_obj(*t->t[i]);
if (!t->t[i]) {
gf128mul_free_64k(t);
t = NULL;
@@ -326,7 +326,7 @@ struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
struct gf128mul_4k *t;
int j, k;
- t = kzalloc(sizeof(*t), GFP_KERNEL);
+ t = kzalloc_obj(*t);
if (!t)
goto out;
diff --git a/lib/crypto/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c
index a93647564054..29dd80609c47 100644
--- a/lib/crypto/mpi/mpih-mul.c
+++ b/lib/crypto/mpi/mpih-mul.c
@@ -372,7 +372,7 @@ mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
return -ENOMEM;
} else {
if (!ctx->next) {
- ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL);
+ ctx->next = kzalloc_obj(*ctx);
if (!ctx->next)
return -ENOMEM;
}
diff --git a/lib/crypto/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c
index 7f2db830f404..f4faf7c903f9 100644
--- a/lib/crypto/mpi/mpiutil.c
+++ b/lib/crypto/mpi/mpiutil.c
@@ -33,7 +33,7 @@ MPI mpi_alloc(unsigned nlimbs)
{
MPI a;
- a = kmalloc(sizeof *a, GFP_KERNEL);
+ a = kmalloc_obj(*a);
if (!a)
return a;
@@ -93,14 +93,14 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0; /* no need to do it */
if (a->d) {
- p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+ p = kzalloc_objs(mpi_limb_t, nlimbs);
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
kfree_sensitive(a->d);
a->d = p;
} else {
- a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+ a->d = kzalloc_objs(mpi_limb_t, nlimbs);
if (!a->d)
return -ENOMEM;
}
diff --git a/lib/crypto/powerpc/aes.h b/lib/crypto/powerpc/aes.h
index 42e0a993c619..5a36b637e6b9 100644
--- a/lib/crypto/powerpc/aes.h
+++ b/lib/crypto/powerpc/aes.h
@@ -95,7 +95,8 @@ static inline bool is_vsx_format(const struct p8_aes_key *key)
}
/*
- * Convert a round key from VSX to generic format by reflecting the 16 bytes,
+ * Convert a round key from VSX to generic format by reflecting all 16 bytes (if
+ * little endian) or reflecting the bytes in each 4-byte word (if big endian),
* and (if apply_inv_mix=true) applying InvMixColumn to each column.
*
* It would be nice if the VSX and generic key formats would be compatible. But
@@ -107,6 +108,7 @@ static inline bool is_vsx_format(const struct p8_aes_key *key)
*/
static void rndkey_from_vsx(u32 out[4], const u32 in[4], bool apply_inv_mix)
{
+ const bool be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
u32 k0 = swab32(in[0]);
u32 k1 = swab32(in[1]);
u32 k2 = swab32(in[2]);
@@ -118,10 +120,10 @@ static void rndkey_from_vsx(u32 out[4], const u32 in[4], bool apply_inv_mix)
k2 = inv_mix_columns(k2);
k3 = inv_mix_columns(k3);
}
- out[0] = k3;
- out[1] = k2;
- out[2] = k1;
- out[3] = k0;
+ out[0] = be ? k0 : k3;
+ out[1] = be ? k1 : k2;
+ out[2] = be ? k2 : k1;
+ out[3] = be ? k3 : k0;
}
static void aes_preparekey_arch(union aes_enckey_arch *k,
diff --git a/lib/crypto/sha1.c b/lib/crypto/sha1.c
index 52788278cd17..daf18c862fdf 100644
--- a/lib/crypto/sha1.c
+++ b/lib/crypto/sha1.c
@@ -49,7 +49,7 @@ static const struct sha1_block_state sha1_iv = {
#endif
/* This "rolls" over the 512-bit array */
-#define W(x) (array[(x)&15])
+#define W(x) (workspace[(x)&15])
/*
* Where do we get the source from? The first 16 iterations get it from
@@ -70,34 +70,20 @@ static const struct sha1_block_state sha1_iv = {
#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
-/**
- * sha1_transform - single block SHA1 transform (deprecated)
- *
- * @digest: 160 bit digest to update
- * @data: 512 bits of data to hash
- * @array: 16 words of workspace (see note)
- *
- * This function executes SHA-1's internal compression function. It updates the
- * 160-bit internal state (@digest) with a single 512-bit data block (@data).
- *
- * Don't use this function. SHA-1 is no longer considered secure. And even if
- * you do have to use SHA-1, this isn't the correct way to hash something with
- * SHA-1 as this doesn't handle padding and finalization.
- *
- * Note: If the hash is security sensitive, the caller should be sure
- * to clear the workspace. This is left to the caller to avoid
- * unnecessary clears between chained hashing operations.
- */
-void sha1_transform(__u32 *digest, const char *data, __u32 *array)
+#define SHA1_WORKSPACE_WORDS 16
+
+static void sha1_block_generic(struct sha1_block_state *state,
+ const u8 data[SHA1_BLOCK_SIZE],
+ u32 workspace[SHA1_WORKSPACE_WORDS])
{
__u32 A, B, C, D, E;
unsigned int i = 0;
- A = digest[0];
- B = digest[1];
- C = digest[2];
- D = digest[3];
- E = digest[4];
+ A = state->h[0];
+ B = state->h[1];
+ C = state->h[2];
+ D = state->h[3];
+ E = state->h[4];
/* Round 1 - iterations 0-16 take their input from 'data' */
for (; i < 16; ++i)
@@ -119,27 +105,12 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array)
for (; i < 80; ++i)
T_60_79(i, A, B, C, D, E);
- digest[0] += A;
- digest[1] += B;
- digest[2] += C;
- digest[3] += D;
- digest[4] += E;
-}
-EXPORT_SYMBOL(sha1_transform);
-
-/**
- * sha1_init_raw - initialize the vectors for a SHA1 digest
- * @buf: vector to initialize
- */
-void sha1_init_raw(__u32 *buf)
-{
- buf[0] = 0x67452301;
- buf[1] = 0xefcdab89;
- buf[2] = 0x98badcfe;
- buf[3] = 0x10325476;
- buf[4] = 0xc3d2e1f0;
+ state->h[0] += A;
+ state->h[1] += B;
+ state->h[2] += C;
+ state->h[3] += D;
+ state->h[4] += E;
}
-EXPORT_SYMBOL(sha1_init_raw);
static void __maybe_unused sha1_blocks_generic(struct sha1_block_state *state,
const u8 *data, size_t nblocks)
@@ -147,7 +118,7 @@ static void __maybe_unused sha1_blocks_generic(struct sha1_block_state *state,
u32 workspace[SHA1_WORKSPACE_WORDS];
do {
- sha1_transform(state->h, data, workspace);
+ sha1_block_generic(state, data, workspace);
data += SHA1_BLOCK_SIZE;
} while (--nblocks);
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 32138bb8ef77..05d5cb490a44 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -157,11 +157,11 @@
* when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
* Workaround it here because the other decompressors don't need it.
*/
-#undef kmalloc
+#undef kmalloc_obj
#undef kfree
#undef vmalloc
#undef vfree
-#define kmalloc(size, flags) malloc(size)
+#define kmalloc_obj(type) malloc(sizeof(type))
#define kfree(ptr) free(ptr)
#define vmalloc(size) malloc(size)
#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
index ca6c87232c58..134cc1c746c2 100644
--- a/lib/dhry_1.c
+++ b/lib/dhry_1.c
@@ -139,11 +139,11 @@ int dhry(int n)
/* Initializations */
- Next_Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ Next_Ptr_Glob = (Rec_Pointer) kzalloc_obj(Rec_Type, GFP_ATOMIC);
if (!Next_Ptr_Glob)
return -ENOMEM;
- Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ Ptr_Glob = (Rec_Pointer) kzalloc_obj(Rec_Type, GFP_ATOMIC);
if (!Ptr_Glob) {
kfree(Next_Ptr_Glob);
return -ENOMEM;
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index d6aa09a979b3..d8d4f6553559 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -105,7 +105,7 @@ int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
struct dim_irq_moder *moder;
int len;
- dev->irq_moder = kzalloc(sizeof(*dev->irq_moder), GFP_KERNEL);
+ dev->irq_moder = kzalloc_obj(*dev->irq_moder);
if (!dev->irq_moder)
return -ENOMEM;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7d7892e57a01..18a71a9108d3 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -1241,7 +1241,7 @@ static int ddebug_add_module(struct _ddebug_info *di, const char *modname)
return 0;
}
- dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+ dt = kzalloc_obj(*dt);
if (dt == NULL) {
pr_err("error adding module: %s\n", modname);
return -ENOMEM;
diff --git a/lib/error-inject.c b/lib/error-inject.c
index 887acd9a6ea6..f3d1b70be605 100644
--- a/lib/error-inject.c
+++ b/lib/error-inject.c
@@ -80,7 +80,7 @@ static void populate_error_injection_list(struct error_injection_entry *start,
continue;
}
- ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ ent = kmalloc_obj(*ent);
if (!ent)
break;
ent->start_addr = entry;
diff --git a/lib/globtest.c b/lib/globtest.c
deleted file mode 100644
index d8e97d43b905..000000000000
--- a/lib/globtest.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Extracted fronm glob.c
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/glob.h>
-#include <linux/printk.h>
-
-/* Boot with "glob.verbose=1" to show successful tests, too */
-static bool verbose = false;
-module_param(verbose, bool, 0);
-
-struct glob_test {
- char const *pat, *str;
- bool expected;
-};
-
-static bool __pure __init test(char const *pat, char const *str, bool expected)
-{
- bool match = glob_match(pat, str);
- bool success = match == expected;
-
- /* Can't get string literals into a particular section, so... */
- static char const msg_error[] __initconst =
- KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
- static char const msg_ok[] __initconst =
- KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
- static char const mismatch[] __initconst = "mismatch";
- char const *message;
-
- if (!success)
- message = msg_error;
- else if (verbose)
- message = msg_ok;
- else
- return success;
-
- printk(message, pat, str, mismatch + 3*match);
- return success;
-}
-
-/*
- * The tests are all jammed together in one array to make it simpler
- * to place that array in the .init.rodata section. The obvious
- * "array of structures containing char *" has no way to force the
- * pointed-to strings to be in a particular section.
- *
- * Anyway, a test consists of:
- * 1. Expected glob_match result: '1' or '0'.
- * 2. Pattern to match: null-terminated string
- * 3. String to match against: null-terminated string
- *
- * The list of tests is terminated with a final '\0' instead of
- * a glob_match result character.
- */
-static char const glob_tests[] __initconst =
- /* Some basic tests */
- "1" "a\0" "a\0"
- "0" "a\0" "b\0"
- "0" "a\0" "aa\0"
- "0" "a\0" "\0"
- "1" "\0" "\0"
- "0" "\0" "a\0"
- /* Simple character class tests */
- "1" "[a]\0" "a\0"
- "0" "[a]\0" "b\0"
- "0" "[!a]\0" "a\0"
- "1" "[!a]\0" "b\0"
- "1" "[ab]\0" "a\0"
- "1" "[ab]\0" "b\0"
- "0" "[ab]\0" "c\0"
- "1" "[!ab]\0" "c\0"
- "1" "[a-c]\0" "b\0"
- "0" "[a-c]\0" "d\0"
- /* Corner cases in character class parsing */
- "1" "[a-c-e-g]\0" "-\0"
- "0" "[a-c-e-g]\0" "d\0"
- "1" "[a-c-e-g]\0" "f\0"
- "1" "[]a-ceg-ik[]\0" "a\0"
- "1" "[]a-ceg-ik[]\0" "]\0"
- "1" "[]a-ceg-ik[]\0" "[\0"
- "1" "[]a-ceg-ik[]\0" "h\0"
- "0" "[]a-ceg-ik[]\0" "f\0"
- "0" "[!]a-ceg-ik[]\0" "h\0"
- "0" "[!]a-ceg-ik[]\0" "]\0"
- "1" "[!]a-ceg-ik[]\0" "f\0"
- /* Simple wild cards */
- "1" "?\0" "a\0"
- "0" "?\0" "aa\0"
- "0" "??\0" "a\0"
- "1" "?x?\0" "axb\0"
- "0" "?x?\0" "abx\0"
- "0" "?x?\0" "xab\0"
- /* Asterisk wild cards (backtracking) */
- "0" "*??\0" "a\0"
- "1" "*??\0" "ab\0"
- "1" "*??\0" "abc\0"
- "1" "*??\0" "abcd\0"
- "0" "??*\0" "a\0"
- "1" "??*\0" "ab\0"
- "1" "??*\0" "abc\0"
- "1" "??*\0" "abcd\0"
- "0" "?*?\0" "a\0"
- "1" "?*?\0" "ab\0"
- "1" "?*?\0" "abc\0"
- "1" "?*?\0" "abcd\0"
- "1" "*b\0" "b\0"
- "1" "*b\0" "ab\0"
- "0" "*b\0" "ba\0"
- "1" "*b\0" "bb\0"
- "1" "*b\0" "abb\0"
- "1" "*b\0" "bab\0"
- "1" "*bc\0" "abbc\0"
- "1" "*bc\0" "bc\0"
- "1" "*bc\0" "bbc\0"
- "1" "*bc\0" "bcbc\0"
- /* Multiple asterisks (complex backtracking) */
- "1" "*ac*\0" "abacadaeafag\0"
- "1" "*ac*ae*ag*\0" "abacadaeafag\0"
- "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
- "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
- "1" "*abcd*\0" "abcabcabcabcdefg\0"
- "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
- "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
- "0" "*abcd*\0" "abcabcabcabcefg\0"
- "0" "*ab*cd*\0" "abcabcabcabcefg\0";
-
-static int __init glob_init(void)
-{
- unsigned successes = 0;
- unsigned n = 0;
- char const *p = glob_tests;
- static char const message[] __initconst =
- KERN_INFO "glob: %u self-tests passed, %u failed\n";
-
- /*
- * Tests are jammed together in a string. The first byte is '1'
- * or '0' to indicate the expected outcome, or '\0' to indicate the
- * end of the tests. Then come two null-terminated strings: the
- * pattern and the string to match it against.
- */
- while (*p) {
- bool expected = *p++ & 1;
- char const *pat = p;
-
- p += strlen(p) + 1;
- successes += test(pat, p, expected);
- p += strlen(p) + 1;
- n++;
- }
-
- n -= successes;
- printk(message, successes, n);
-
- /* What's the errno for "kernel bug detected"? Guess... */
- return n ? -ECANCELED : 0;
-}
-
-/* We need a dummy exit function to allow unload */
-static void __exit glob_fini(void) { }
-
-module_init(glob_init);
-module_exit(glob_fini);
-
-MODULE_DESCRIPTION("glob(7) matching tests");
-MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
index 6d08ac05f371..e6e18d7a49bb 100644
--- a/lib/group_cpus.c
+++ b/lib/group_cpus.c
@@ -47,7 +47,7 @@ static cpumask_var_t *alloc_node_to_cpumask(void)
cpumask_var_t *masks;
int node;
- masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
+ masks = kzalloc_objs(cpumask_var_t, nr_node_ids);
if (!masks)
return NULL;
@@ -114,48 +114,15 @@ static int ncpus_cmp_func(const void *l, const void *r)
return ln->ncpus - rn->ncpus;
}
-/*
- * Allocate group number for each node, so that for each node:
- *
- * 1) the allocated number is >= 1
- *
- * 2) the allocated number is <= active CPU number of this node
- *
- * The actual allocated total groups may be less than @numgrps when
- * active total CPU number is less than @numgrps.
- *
- * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
- * for each node.
- */
-static void alloc_nodes_groups(unsigned int numgrps,
- cpumask_var_t *node_to_cpumask,
- const struct cpumask *cpu_mask,
- const nodemask_t nodemsk,
- struct cpumask *nmsk,
- struct node_groups *node_groups)
+static void alloc_groups_to_nodes(unsigned int numgrps,
+ unsigned int numcpus,
+ struct node_groups *node_groups,
+ unsigned int num_nodes)
{
- unsigned n, remaining_ncpus = 0;
-
- for (n = 0; n < nr_node_ids; n++) {
- node_groups[n].id = n;
- node_groups[n].ncpus = UINT_MAX;
- }
-
- for_each_node_mask(n, nodemsk) {
- unsigned ncpus;
-
- cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
- ncpus = cpumask_weight(nmsk);
-
- if (!ncpus)
- continue;
- remaining_ncpus += ncpus;
- node_groups[n].ncpus = ncpus;
- }
+ unsigned int n, remaining_ncpus = numcpus;
+ unsigned int ngroups, ncpus;
- numgrps = min_t(unsigned, remaining_ncpus, numgrps);
-
- sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
+ sort(node_groups, num_nodes, sizeof(node_groups[0]),
ncpus_cmp_func, NULL);
/*
@@ -226,9 +193,8 @@ static void alloc_nodes_groups(unsigned int numgrps,
* finally for each node X: grps(X) <= ncpu(X).
*
*/
- for (n = 0; n < nr_node_ids; n++) {
- unsigned ngroups, ncpus;
+ for (n = 0; n < num_nodes; n++) {
if (node_groups[n].ncpus == UINT_MAX)
continue;
@@ -246,12 +212,201 @@ static void alloc_nodes_groups(unsigned int numgrps,
}
}
+/*
+ * Allocate group number for each node, so that for each node:
+ *
+ * 1) the allocated number is >= 1
+ *
+ * 2) the allocated number is <= active CPU number of this node
+ *
+ * The actual allocated total groups may be less than @numgrps when
+ * active total CPU number is less than @numgrps.
+ *
+ * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
+ * for each node.
+ */
+static void alloc_nodes_groups(unsigned int numgrps,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ const nodemask_t nodemsk,
+ struct cpumask *nmsk,
+ struct node_groups *node_groups)
+{
+ unsigned int n, numcpus = 0;
+
+ for (n = 0; n < nr_node_ids; n++) {
+ node_groups[n].id = n;
+ node_groups[n].ncpus = UINT_MAX;
+ }
+
+ for_each_node_mask(n, nodemsk) {
+ unsigned int ncpus;
+
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ ncpus = cpumask_weight(nmsk);
+
+ if (!ncpus)
+ continue;
+ numcpus += ncpus;
+ node_groups[n].ncpus = ncpus;
+ }
+
+ numgrps = min_t(unsigned int, numcpus, numgrps);
+ alloc_groups_to_nodes(numgrps, numcpus, node_groups, nr_node_ids);
+}
+
+static void assign_cpus_to_groups(unsigned int ncpus,
+ struct cpumask *nmsk,
+ struct node_groups *nv,
+ struct cpumask *masks,
+ unsigned int *curgrp,
+ unsigned int last_grp)
+{
+ unsigned int v, cpus_per_grp, extra_grps;
+ /* Account for rounding errors */
+ extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
+
+ /* Spread allocated groups on CPUs of the current node */
+ for (v = 0; v < nv->ngroups; v++, *curgrp += 1) {
+ cpus_per_grp = ncpus / nv->ngroups;
+
+ /* Account for extra groups to compensate rounding errors */
+ if (extra_grps) {
+ cpus_per_grp++;
+ --extra_grps;
+ }
+
+ /*
+ * wrapping has to be considered given 'startgrp'
+ * may start anywhere
+ */
+ if (*curgrp >= last_grp)
+ *curgrp = 0;
+ grp_spread_init_one(&masks[*curgrp], nmsk, cpus_per_grp);
+ }
+}
+
+static int alloc_cluster_groups(unsigned int ncpus,
+ unsigned int ngroups,
+ struct cpumask *node_cpumask,
+ cpumask_var_t msk,
+ const struct cpumask ***clusters_ptr,
+ struct node_groups **cluster_groups_ptr)
+{
+ unsigned int ncluster = 0;
+ unsigned int cpu, nc, n;
+ const struct cpumask *cluster_mask;
+ const struct cpumask **clusters;
+ struct node_groups *cluster_groups;
+
+ cpumask_copy(msk, node_cpumask);
+
+ /* Probe how many clusters in this node. */
+ while (1) {
+ cpu = cpumask_first(msk);
+ if (cpu >= nr_cpu_ids)
+ break;
+
+ cluster_mask = topology_cluster_cpumask(cpu);
+ if (!cpumask_weight(cluster_mask))
+ goto no_cluster;
+ /* Clean out CPUs on the same cluster. */
+ cpumask_andnot(msk, msk, cluster_mask);
+ ncluster++;
+ }
+
+ /* If ngroups < ncluster, cross cluster is inevitable, skip. */
+ if (ncluster == 0 || ncluster > ngroups)
+ goto no_cluster;
+
+ /* Allocate memory based on cluster number. */
+ clusters = kzalloc_objs(*clusters, ncluster);
+ if (!clusters)
+ goto no_cluster;
+ cluster_groups = kzalloc_objs(struct node_groups, ncluster);
+ if (!cluster_groups)
+ goto fail_cluster_groups;
+
+ /* Filling cluster info for later process. */
+ cpumask_copy(msk, node_cpumask);
+ for (n = 0; n < ncluster; n++) {
+ cpu = cpumask_first(msk);
+ cluster_mask = topology_cluster_cpumask(cpu);
+ nc = cpumask_weight_and(cluster_mask, node_cpumask);
+ clusters[n] = cluster_mask;
+ cluster_groups[n].id = n;
+ cluster_groups[n].ncpus = nc;
+ cpumask_andnot(msk, msk, cluster_mask);
+ }
+
+ alloc_groups_to_nodes(ngroups, ncpus, cluster_groups, ncluster);
+
+ *clusters_ptr = clusters;
+ *cluster_groups_ptr = cluster_groups;
+ return ncluster;
+
+ fail_cluster_groups:
+ kfree(clusters);
+ no_cluster:
+ return 0;
+}
+
+/*
+ * Try group CPUs evenly for cluster locality within a NUMA node.
+ *
+ * Return: true if success, false otherwise.
+ */
+static bool __try_group_cluster_cpus(unsigned int ncpus,
+ unsigned int ngroups,
+ struct cpumask *node_cpumask,
+ struct cpumask *masks,
+ unsigned int *curgrp,
+ unsigned int last_grp)
+{
+ struct node_groups *cluster_groups;
+ const struct cpumask **clusters;
+ unsigned int ncluster;
+ bool ret = false;
+ cpumask_var_t nmsk;
+ unsigned int i, nc;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ goto fail_nmsk_alloc;
+
+ ncluster = alloc_cluster_groups(ncpus, ngroups, node_cpumask, nmsk,
+ &clusters, &cluster_groups);
+
+ if (ncluster == 0)
+ goto fail_no_clusters;
+
+ for (i = 0; i < ncluster; i++) {
+ struct node_groups *nv = &cluster_groups[i];
+
+ /* Get the cpus on this cluster. */
+ cpumask_and(nmsk, node_cpumask, clusters[nv->id]);
+ nc = cpumask_weight(nmsk);
+ if (!nc)
+ continue;
+ WARN_ON_ONCE(nv->ngroups > nc);
+
+ assign_cpus_to_groups(nc, nmsk, nv, masks, curgrp, last_grp);
+ }
+
+ ret = true;
+ kfree(cluster_groups);
+ kfree(clusters);
+ fail_no_clusters:
+ free_cpumask_var(nmsk);
+ fail_nmsk_alloc:
+ return ret;
+}
+
static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask,
struct cpumask *nmsk, struct cpumask *masks)
{
- unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
+ unsigned int i, n, nodes, done = 0;
unsigned int last_grp = numgrps;
unsigned int curgrp = startgrp;
nodemask_t nodemsk = NODE_MASK_NONE;
@@ -277,9 +432,7 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
return numgrps;
}
- node_groups = kcalloc(nr_node_ids,
- sizeof(struct node_groups),
- GFP_KERNEL);
+ node_groups = kzalloc_objs(struct node_groups, nr_node_ids);
if (!node_groups)
return -ENOMEM;
@@ -287,7 +440,7 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
nodemsk, nmsk, node_groups);
for (i = 0; i < nr_node_ids; i++) {
- unsigned int ncpus, v;
+ unsigned int ncpus;
struct node_groups *nv = &node_groups[i];
if (nv->ngroups == UINT_MAX)
@@ -301,28 +454,14 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
WARN_ON_ONCE(nv->ngroups > ncpus);
- /* Account for rounding errors */
- extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
-
- /* Spread allocated groups on CPUs of the current node */
- for (v = 0; v < nv->ngroups; v++, curgrp++) {
- cpus_per_grp = ncpus / nv->ngroups;
-
- /* Account for extra groups to compensate rounding errors */
- if (extra_grps) {
- cpus_per_grp++;
- --extra_grps;
- }
-
- /*
- * wrapping has to be considered given 'startgrp'
- * may start anywhere
- */
- if (curgrp >= last_grp)
- curgrp = 0;
- grp_spread_init_one(&masks[curgrp], nmsk,
- cpus_per_grp);
+ if (__try_group_cluster_cpus(ncpus, nv->ngroups, nmsk,
+ masks, &curgrp, last_grp)) {
+ done += nv->ngroups;
+ continue;
}
+
+ assign_cpus_to_groups(ncpus, nmsk, nv, masks, &curgrp,
+ last_grp);
done += nv->ngroups;
}
kfree(node_groups);
@@ -367,7 +506,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
if (!node_to_cpumask)
goto fail_npresmsk;
- masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ masks = kzalloc_objs(*masks, numgrps);
if (!masks)
goto fail_node_to_cpumask;
@@ -433,7 +572,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks)
if (numgrps == 0)
return NULL;
- masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ masks = kzalloc_objs(*masks, numgrps);
if (!masks)
return NULL;
diff --git a/lib/hexdump.c b/lib/hexdump.c
index c3db7c3a7643..2e5cd8c24769 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/errno.h>
+#include <linux/hex.h>
#include <linux/kernel.h>
#include <linux/minmax.h>
#include <linux/export.h>
diff --git a/lib/idr.c b/lib/idr.c
index 457430cff8c5..69bee5369670 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -417,7 +417,7 @@ next:
}
bitmap = alloc;
if (!bitmap)
- bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
+ bitmap = kzalloc_obj(*bitmap, GFP_NOWAIT);
if (!bitmap)
goto alloc;
bitmap->bitmap[0] = tmp;
@@ -444,7 +444,7 @@ next:
} else {
bitmap = alloc;
if (!bitmap)
- bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
+ bitmap = kzalloc_obj(*bitmap, GFP_NOWAIT);
if (!bitmap)
goto alloc;
__set_bit(bit, bitmap->bitmap);
@@ -465,7 +465,7 @@ out:
return xas.xa_index * IDA_BITMAP_BITS + bit;
alloc:
xas_unlock_irqrestore(&xas, flags);
- alloc = kzalloc(sizeof(*bitmap), gfp);
+ alloc = kzalloc_obj(*bitmap, gfp);
if (!alloc)
return -ENOMEM;
xas_set(&xas, min / IDA_BITMAP_BITS);
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index 5fd62656f42e..16200feacbf3 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -311,8 +311,7 @@ static inline int span_iteration_check(void) {return 0; }
static int interval_tree_test_init(void)
{
- nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
- GFP_KERNEL);
+ nodes = kmalloc_objs(struct interval_tree_node, nnodes);
if (!nodes)
return -ENOMEM;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 545250507f08..0a63c7fba313 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -903,7 +903,7 @@ static int want_pages_array(struct page ***res, size_t size,
count = maxpages;
WARN_ON(!count); // caller should've prevented that
if (!*res) {
- *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ *res = kvmalloc_objs(struct page *, count);
if (!*res)
return 0;
}
@@ -1318,7 +1318,7 @@ struct iovec *iovec_from_user(const struct iovec __user *uvec,
if (nr_segs > UIO_MAXIOV)
return ERR_PTR(-EINVAL);
if (nr_segs > fast_segs) {
- iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
+ iov = kmalloc_objs(struct iovec, nr_segs);
if (!iov)
return ERR_PTR(-ENOMEM);
}
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 525e66f8294c..2633f9cc336c 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -41,7 +41,7 @@ int __kfifo_alloc_node(struct __kfifo *fifo, unsigned int size,
return -EINVAL;
}
- fifo->data = kmalloc_array_node(esize, size, gfp_mask, node);
+ fifo->data = kmalloc_array_node(size, esize, gfp_mask, node);
if (!fifo->data) {
fifo->mask = 0;
diff --git a/lib/kobject.c b/lib/kobject.c
index abe5f5b856ce..cfdb2c3f20a2 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -765,7 +765,7 @@ static struct kobject *kobject_create(void)
{
struct kobject *kobj;
- kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ kobj = kzalloc_obj(*kobj);
if (!kobj)
return NULL;
@@ -962,7 +962,7 @@ static struct kset *kset_create(const char *name,
struct kset *kset;
int retval;
- kset = kzalloc(sizeof(*kset), GFP_KERNEL);
+ kset = kzalloc_obj(*kset);
if (!kset)
return NULL;
retval = kobject_set_name(&kset->kobj, "%s", name);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 78e16b95d210..871941c9830c 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -124,7 +124,7 @@ static int kobject_action_args(const char *buf, size_t count,
if (!count)
return -EINVAL;
- env = kzalloc(sizeof(*env), GFP_KERNEL);
+ env = kzalloc_obj(*env);
if (!env)
return -ENOMEM;
@@ -537,7 +537,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
}
/* environment buffer */
- env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+ env = kzalloc_obj(struct kobj_uevent_env);
if (!env)
return -ENOMEM;
@@ -776,7 +776,7 @@ static int uevent_net_init(struct net *net)
.flags = NL_CFG_F_NONROOT_RECV
};
- ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
+ ue_sk = kzalloc_obj(*ue_sk);
if (!ue_sk)
return -ENOMEM;
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index bdde40cd69d7..97be2a39f537 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -340,8 +340,8 @@ EXPORT_SYMBOL(kstrtos8);
* @s: input string
* @res: result
*
- * This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or
- * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
+ * This routine returns 0 iff the first character is one of 'EeYyTt1DdNnFf0',
+ * or [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
* pointed to by res is updated upon finding a match.
*/
noinline
diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c
index 2cf04cc09372..6d7a53af94a9 100644
--- a/lib/kunit/attributes.c
+++ b/lib/kunit/attributes.c
@@ -410,7 +410,7 @@ struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suit
kunit_suite_for_each_test_case(suite, test_case) { n++; }
- filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ filtered = kzalloc_objs(*filtered, n + 1);
if (!filtered) {
kfree(copy);
return ERR_PTR(-ENOMEM);
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index f201aaacd4cf..85d57ad34045 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -111,7 +111,7 @@ static struct kunit_device *kunit_device_register_internal(struct kunit *test,
struct kunit_device *kunit_dev;
int err = -ENOMEM;
- kunit_dev = kzalloc(sizeof(*kunit_dev), GFP_KERNEL);
+ kunit_dev = kzalloc_obj(*kunit_dev);
if (!kunit_dev)
return ERR_PTR(err);
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 02ff380ab793..1fef217de11d 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -131,7 +131,7 @@ kunit_filter_glob_tests(const struct kunit_suite *const suite, const char *test_
if (!copy)
return ERR_PTR(-ENOMEM);
- filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ filtered = kzalloc_objs(*filtered, n + 1);
if (!filtered) {
kfree(copy);
return ERR_PTR(-ENOMEM);
@@ -179,7 +179,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
const size_t max = suite_set->end - suite_set->start;
- copy = kcalloc(max, sizeof(*copy), GFP_KERNEL);
+ copy = kzalloc_objs(*copy, max);
if (!copy) { /* won't be able to run anything, return an empty set */
return filtered;
}
@@ -194,7 +194,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
/* Parse attribute filters */
if (filters) {
filter_count = kunit_get_filter_count(filters);
- parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
+ parsed_filters = kzalloc_objs(*parsed_filters, filter_count);
if (!parsed_filters) {
*err = -ENOMEM;
goto free_parsed_glob;
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index f0090c2729cd..4cb119ad8f64 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -272,7 +272,7 @@ static void free_suite_set_at_end(struct kunit *test, const void *to_free)
if (!((struct kunit_suite_set *)to_free)->start)
return;
- free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL);
+ free = kzalloc_obj(struct kunit_suite_set);
*free = *(struct kunit_suite_set *)to_free;
kunit_add_action(test, free_suite_set, (void *)free);
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 9452b163956f..0bae7b7ca0b0 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -283,7 +283,7 @@ static void example_slow_test(struct kunit *test)
*/
static int example_resource_init(struct kunit_resource *res, void *context)
{
- int *info = kmalloc(sizeof(*info), GFP_KERNEL);
+ int *info = kmalloc_obj(*info);
if (!info)
return -ENOMEM;
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 63130a48e237..126e30879dad 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -538,8 +538,7 @@ static void kunit_resource_test_action_ordering(struct kunit *test)
static int kunit_resource_test_init(struct kunit *test)
{
- struct kunit_test_resource_context *ctx =
- kzalloc(sizeof(*ctx), GFP_KERNEL);
+ struct kunit_test_resource_context *ctx = kzalloc_obj(*ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c
index f0209252b179..45e55238ccf6 100644
--- a/lib/kunit/resource.c
+++ b/lib/kunit/resource.c
@@ -98,7 +98,7 @@ int kunit_add_action(struct kunit *test, void (*action)(void *), void *ctx)
KUNIT_ASSERT_NOT_NULL_MSG(test, action, "Tried to action a NULL function!");
- action_ctx = kzalloc(sizeof(*action_ctx), GFP_KERNEL);
+ action_ctx = kzalloc_obj(*action_ctx);
if (!action_ctx)
return -ENOMEM;
diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c
index 484fd85251b4..d9dd6377aa38 100644
--- a/lib/kunit/static_stub.c
+++ b/lib/kunit/static_stub.c
@@ -111,7 +111,7 @@ void __kunit_activate_static_stub(struct kunit *test,
/* We got an extra reference from find_resource(), so put it. */
kunit_put_resource(res);
} else {
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kmalloc_obj(*ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
ctx->real_fn_addr = real_fn_addr;
ctx->replacement_addr = replacement_addr;
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index 54f4fdcbfac8..0d8f1b30559b 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -18,7 +18,7 @@ static struct string_stream_fragment *alloc_string_stream_fragment(int len, gfp_
{
struct string_stream_fragment *frag;
- frag = kzalloc(sizeof(*frag), gfp);
+ frag = kzalloc_obj(*frag, gfp);
if (!frag)
return ERR_PTR(-ENOMEM);
@@ -158,7 +158,7 @@ struct string_stream *alloc_string_stream(gfp_t gfp)
{
struct string_stream *stream;
- stream = kzalloc(sizeof(*stream), gfp);
+ stream = kzalloc_obj(*stream, gfp);
if (!stream)
return ERR_PTR(-ENOMEM);
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
index b247d412ddef..42a571d05670 100644
--- a/lib/logic_iomem.c
+++ b/lib/logic_iomem.c
@@ -48,7 +48,7 @@ int logic_iomem_add_region(struct resource *resource,
if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
return -EINVAL;
- rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
+ rreg = kzalloc_obj(*rreg);
if (!rreg)
return -ENOMEM;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 9e0d469c7658..82f775044056 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -94,14 +94,14 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
if (e_count > LC_MAX_ACTIVE)
return NULL;
- slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
+ slot = kzalloc_objs(struct hlist_head, e_count);
if (!slot)
goto out_fail;
- element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL);
+ element = kzalloc_objs(struct lc_element *, e_count);
if (!element)
goto out_fail;
- lc = kzalloc(sizeof(*lc), GFP_KERNEL);
+ lc = kzalloc_obj(*lc);
if (!lc)
goto out_fail;
diff --git a/lib/lwq.c b/lib/lwq.c
index 57d080a4d53d..c1e11ba6f254 100644
--- a/lib/lwq.c
+++ b/lib/lwq.c
@@ -110,7 +110,7 @@ static int lwq_test(void)
for (i = 0; i < ARRAY_SIZE(threads); i++)
threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
for (i = 0; i < 100; i++) {
- t = kmalloc(sizeof(*t), GFP_KERNEL);
+ t = kmalloc_obj(*t);
if (!t)
break;
t->i = i;
diff --git a/lib/objagg.c b/lib/objagg.c
index 363e43e849ac..23c7105a3f9f 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -525,7 +525,7 @@ struct objagg *objagg_create(const struct objagg_ops *ops,
!ops->delta_destroy))
return ERR_PTR(-EINVAL);
- objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
+ objagg = kzalloc_obj(*objagg);
if (!objagg)
return ERR_PTR(-ENOMEM);
objagg->ops = ops;
@@ -610,8 +610,8 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
struct objagg_obj *objagg_obj;
int i;
- objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
- objagg->obj_count), GFP_KERNEL);
+ objagg_stats = kzalloc_flex(*objagg_stats, stats_info,
+ objagg->obj_count);
if (!objagg_stats)
return ERR_PTR(-ENOMEM);
@@ -786,11 +786,11 @@ static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
struct objagg_obj *objagg_obj;
int i, j;
- graph = kzalloc(sizeof(*graph), GFP_KERNEL);
+ graph = kzalloc_obj(*graph);
if (!graph)
return NULL;
- graph->nodes = kcalloc(nodes_count, sizeof(*graph->nodes), GFP_KERNEL);
+ graph->nodes = kzalloc_objs(*graph->nodes, nodes_count);
if (!graph->nodes)
goto err_nodes_alloc;
graph->nodes_count = nodes_count;
@@ -930,7 +930,7 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
struct objagg_hints *objagg_hints;
int err;
- objagg_hints = kzalloc(sizeof(*objagg_hints), GFP_KERNEL);
+ objagg_hints = kzalloc_obj(*objagg_hints);
if (!objagg_hints)
return ERR_PTR(-ENOMEM);
@@ -1010,9 +1010,8 @@ objagg_hints_stats_get(struct objagg_hints *objagg_hints)
struct objagg_hints_node *hnode;
int i;
- objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
- objagg_hints->node_count),
- GFP_KERNEL);
+ objagg_stats = kzalloc_flex(*objagg_stats, stats_info,
+ objagg_hints->node_count);
if (!objagg_stats)
return ERR_PTR(-ENOMEM);
diff --git a/lib/objpool.c b/lib/objpool.c
index b998b720c732..d98fadf1de16 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -142,7 +142,7 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
pool->gfp = gfp & ~__GFP_ZERO;
pool->context = context;
pool->release = release;
- slot_size = nr_cpu_ids * sizeof(struct objpool_slot);
+ slot_size = nr_cpu_ids * sizeof(struct objpool_slot *);
pool->cpu_slots = kzalloc(slot_size, pool->gfp);
if (!pool->cpu_slots)
return -ENOMEM;
diff --git a/lib/once.c b/lib/once.c
index 2c306f0e891e..d801bfa945e6 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -26,7 +26,7 @@ static void once_disable_jump(struct static_key_true *key, struct module *mod)
{
struct once_work *w;
- w = kmalloc(sizeof(*w), GFP_ATOMIC);
+ w = kmalloc_obj(*w, GFP_ATOMIC);
if (!w)
return;
@@ -93,6 +93,6 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
{
*done = true;
mutex_unlock(&once_mutex);
- once_disable_jump(once_key, mod);
+ static_branch_disable(once_key);
}
EXPORT_SYMBOL(__do_once_sleepable_done);
diff --git a/lib/parman.c b/lib/parman.c
index 3f8f8d422e62..0de691c202ab 100644
--- a/lib/parman.c
+++ b/lib/parman.c
@@ -268,7 +268,7 @@ struct parman *parman_create(const struct parman_ops *ops, void *priv)
{
struct parman *parman;
- parman = kzalloc(sizeof(*parman), GFP_KERNEL);
+ parman = kzalloc_obj(*parman);
if (!parman)
return NULL;
INIT_LIST_HEAD(&parman->prio_list);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 668f6aa6a75d..97772e42b9b2 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -73,7 +73,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
if (!ref->percpu_count_ptr)
return -ENOMEM;
- data = kzalloc(sizeof(*ref->data), gfp);
+ data = kzalloc_obj(*ref->data, gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
ref->percpu_count_ptr = 0;
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
index b45ceb725780..e4612ea147bb 100644
--- a/lib/pldmfw/pldmfw.c
+++ b/lib/pldmfw/pldmfw.c
@@ -287,7 +287,7 @@ pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8
if (err)
return err;
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ desc = kzalloc_obj(*desc);
if (!desc)
return -ENOMEM;
@@ -328,7 +328,7 @@ pldm_parse_one_record(struct pldmfw_priv *data,
int i;
/* Make a copy and insert it into the record list */
- record = kzalloc(sizeof(*record), GFP_KERNEL);
+ record = kzalloc_obj(*record);
if (!record)
return -ENOMEM;
@@ -465,7 +465,7 @@ static int pldm_parse_components(struct pldmfw_priv *data)
if (err)
return err;
- component = kzalloc(sizeof(*component), GFP_KERNEL);
+ component = kzalloc_obj(*component);
if (!component)
return -ENOMEM;
@@ -848,7 +848,7 @@ int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw)
struct pldmfw_priv *data;
int err;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc_obj(*data);
if (!data)
return -ENOMEM;
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 690cede46ac2..768c5e6453f3 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -399,7 +399,7 @@ static int augmented_check(void)
static int __init rbtree_test_init(void)
{
- nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
+ nodes = kmalloc_objs(*nodes, nnodes);
if (!nodes)
return -ENOMEM;
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index a9e2dcb6f2a7..864484c01827 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -73,7 +73,7 @@ static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int),
int i, j, sr, root, iprim;
struct rs_codec *rs;
- rs = kzalloc(sizeof(*rs), gfp);
+ rs = kzalloc_obj(*rs, gfp);
if (!rs)
return NULL;
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
index 75cb1adac884..42b856e6c8a8 100644
--- a/lib/reed_solomon/test_rslib.c
+++ b/lib/reed_solomon/test_rslib.c
@@ -111,7 +111,7 @@ static struct wspace *alloc_ws(struct rs_codec *rs)
struct wspace *ws;
int nn = rs->nn;
- ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ ws = kzalloc_obj(*ws);
if (!ws)
return NULL;
@@ -124,7 +124,7 @@ static struct wspace *alloc_ws(struct rs_codec *rs)
ws->s = ws->r + nn;
ws->corr = ws->s + nroots;
- ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
+ ws->errlocs = kmalloc_objs(int, nn + nroots);
if (!ws->errlocs)
goto err;
diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c
index 258fb0e7abdf..30c999d57b10 100644
--- a/lib/ref_tracker.c
+++ b/lib/ref_tracker.c
@@ -74,8 +74,7 @@ ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
struct ref_tracker_dir_stats *stats;
struct ref_tracker *tracker;
- stats = kmalloc(struct_size(stats, stacks, limit),
- GFP_NOWAIT);
+ stats = kmalloc_flex(*stats, stacks, limit, GFP_NOWAIT);
if (!stats)
return ERR_PTR(-ENOMEM);
stats->total = 0;
@@ -268,7 +267,7 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir,
}
if (gfp & __GFP_DIRECT_RECLAIM)
gfp_mask |= __GFP_NOFAIL;
- *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
+ *trackerp = tracker = kzalloc_obj(*tracker, gfp_mask);
if (unlikely(!tracker)) {
pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
refcount_inc(&dir->untracked);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 4af1c8b0775a..d773720d11bf 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -65,6 +65,32 @@ int sg_nents_for_len(struct scatterlist *sg, u64 len)
EXPORT_SYMBOL(sg_nents_for_len);
/**
+ * sg_nents_for_dma - return the count of DMA-capable entries in scatterlist
+ * @sgl: The scatterlist
+ * @sglen: The current number of entries
+ * @len: The maximum length of DMA-capable block
+ *
+ * Description:
+ * Determines the number of entries in @sgl which would be permitted in
+ * DMA-capable transfer if list had been split accordingly, taking into
+ * account chaining as well.
+ *
+ * Returns:
+ * the number of sgl entries needed
+ *
+ **/
+int sg_nents_for_dma(struct scatterlist *sgl, unsigned int sglen, size_t len)
+{
+ struct scatterlist *sg;
+ int i, nents = 0;
+
+ for_each_sg(sgl, sg, sglen, i)
+ nents += DIV_ROUND_UP(sg_dma_len(sg), len);
+ return nents;
+}
+EXPORT_SYMBOL(sg_nents_for_dma);
+
+/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
* @nents: Number of entries in the scatterlist
@@ -142,8 +168,7 @@ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
return ptr;
} else
- return kmalloc_array(nents, sizeof(struct scatterlist),
- gfp_mask);
+ return kmalloc_objs(struct scatterlist, nents, gfp_mask);
}
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
@@ -606,8 +631,7 @@ struct scatterlist *sgl_alloc_order(unsigned long long length,
return NULL;
nalloc++;
}
- sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
- gfp & ~GFP_DMA);
+ sgl = kmalloc_objs(struct scatterlist, nalloc, gfp & ~GFP_DMA);
if (!sgl)
return NULL;
diff --git a/lib/sg_split.c b/lib/sg_split.c
index 0f89aab5c671..24e8f5e48e63 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -152,7 +152,7 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
int i, ret;
struct sg_splitter *splitters;
- splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
+ splitters = kzalloc_objs(*splitters, nb_splits, gfp_mask);
if (!splitters)
return -ENOMEM;
@@ -163,9 +163,8 @@ int sg_split(struct scatterlist *in, const int in_mapped_nents,
ret = -ENOMEM;
for (i = 0; i < nb_splits; i++) {
- splitters[i].out_sg = kmalloc_array(splitters[i].nents,
- sizeof(struct scatterlist),
- gfp_mask);
+ splitters[i].out_sg = kmalloc_objs(struct scatterlist,
+ splitters[i].nents, gfp_mask);
if (!splitters[i].out_sg)
goto err;
}
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 166f50ad8391..dd2717ff94bf 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -260,7 +260,7 @@ int stack_depot_init(void)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
- stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
+ stack_table = kvzalloc_objs(struct list_head, entries);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index ffb8ead6d4cd..169eaf583494 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/hex.h>
#include <linux/limits.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -146,7 +147,7 @@ int parse_int_array(const char *buf, size_t count, int **array)
if (!nints)
return -ENOENT;
- ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
+ ints = kzalloc_objs(*ints, nints + 1);
if (!ints)
return -ENOMEM;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index af0041df2b72..5892c0f17ddc 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -94,7 +94,7 @@ static int bpf_fill_maxinsns1(struct bpf_test *self)
__u32 k = ~0;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -113,7 +113,7 @@ static int bpf_fill_maxinsns2(struct bpf_test *self)
struct sock_filter *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -133,7 +133,7 @@ static int bpf_fill_maxinsns3(struct bpf_test *self)
struct rnd_state rnd;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -159,7 +159,7 @@ static int bpf_fill_maxinsns4(struct bpf_test *self)
struct sock_filter *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -178,7 +178,7 @@ static int bpf_fill_maxinsns5(struct bpf_test *self)
struct sock_filter *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -201,7 +201,7 @@ static int bpf_fill_maxinsns6(struct bpf_test *self)
struct sock_filter *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -223,7 +223,7 @@ static int bpf_fill_maxinsns7(struct bpf_test *self)
struct sock_filter *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -249,7 +249,7 @@ static int bpf_fill_maxinsns8(struct bpf_test *self)
struct sock_filter *insn;
int i, jmp_off = len - 3;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -272,7 +272,7 @@ static int bpf_fill_maxinsns9(struct bpf_test *self)
struct bpf_insn *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -298,7 +298,7 @@ static int bpf_fill_maxinsns10(struct bpf_test *self)
struct bpf_insn *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -324,7 +324,7 @@ static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
unsigned int rlen;
int i, j;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -358,7 +358,7 @@ static int bpf_fill_maxinsns12(struct bpf_test *self)
struct sock_filter *insn;
int i = 0;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -381,7 +381,7 @@ static int bpf_fill_maxinsns13(struct bpf_test *self)
struct sock_filter *insn;
int i = 0;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -410,7 +410,7 @@ static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
struct sock_filter *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -434,7 +434,7 @@ static int __bpf_fill_stxdw(struct bpf_test *self, int size)
struct bpf_insn *insn;
int i;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -484,7 +484,7 @@ static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm, bool alu3
int len = S16_MAX + 5;
int i;
- insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ insns = kmalloc_objs(*insns, len);
if (!insns)
return -ENOMEM;
@@ -626,7 +626,7 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
int imm, k;
int i = 0;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -759,7 +759,7 @@ static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
int i = 0;
u64 val;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -1244,7 +1244,7 @@ static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
u32 imm;
int rd;
- insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ insns = kmalloc_objs(*insns, len);
if (!insns)
return -ENOMEM;
@@ -1426,7 +1426,7 @@ static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
int rd, rs;
int i = 0;
- insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ insns = kmalloc_objs(*insns, len);
if (!insns)
return -ENOMEM;
@@ -1917,7 +1917,7 @@ static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
u64 mem, upd, res;
int rd, rs, i = 0;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -2163,7 +2163,7 @@ static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
int bit, adj, sign;
int i = 0;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -2217,7 +2217,7 @@ static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
u32 rand = 1;
int i = 0;
- insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ insn = kmalloc_objs(*insn, len);
if (!insn)
return -ENOMEM;
@@ -2724,7 +2724,7 @@ static int __bpf_fill_staggered_jumps(struct bpf_test *self,
struct bpf_insn *insns;
int off, ind;
- insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ insns = kmalloc_objs(*insns, len);
if (!insns)
return -ENOMEM;
@@ -15461,7 +15461,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
int which, err;
/* Allocate the table of programs to be used for tail calls */
- progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
+ progs = kzalloc_flex(*progs, ptrs, ntests + 1);
if (!progs)
goto out_nomem;
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index b7cc0aaee173..518ee8d213cd 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -29,7 +29,7 @@ static int __init test_debug_virtual_init(void)
pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va);
- foo = kzalloc(sizeof(*foo), GFP_KERNEL);
+ foo = kzalloc_obj(*foo);
if (!foo)
return -ENOMEM;
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index be4f93124901..b471d720879a 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -1309,7 +1309,7 @@ static ssize_t upload_register_store(struct device *dev,
goto free_name;
}
- tst = kzalloc(sizeof(*tst), GFP_KERNEL);
+ tst = kzalloc_obj(*tst);
if (!tst) {
ret = -ENOMEM;
goto free_name;
@@ -1526,7 +1526,7 @@ static int __init test_firmware_init(void)
{
int rc;
- test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
+ test_fw_config = kzalloc_obj(struct test_config);
if (!test_fw_config)
return -ENOMEM;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 455a6862ae50..0964d53365e6 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -166,7 +166,7 @@ static int dmirror_fops_open(struct inode *inode, struct file *filp)
int ret;
/* Mirror this process address space */
- dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
+ dmirror = kzalloc_obj(*dmirror);
if (dmirror == NULL)
return -ENOMEM;
@@ -504,7 +504,7 @@ static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
void *ptr;
int ret = -ENOMEM;
- devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+ devmem = kzalloc_obj(*devmem);
if (!devmem)
return ret;
diff --git a/lib/test_kho.c b/lib/test_kho.c
index 47de56280795..7ef9e4061869 100644
--- a/lib/test_kho.c
+++ b/lib/test_kho.c
@@ -19,6 +19,7 @@
#include <linux/printk.h>
#include <linux/vmalloc.h>
#include <linux/kexec_handover.h>
+#include <linux/kho/abi/kexec_handover.h>
#include <net/checksum.h>
@@ -210,7 +211,7 @@ static int kho_test_save(void)
max_mem = PAGE_ALIGN(max_mem);
max_nr = max_mem >> PAGE_SHIFT;
- folios = kvmalloc_array(max_nr, sizeof(*state->folios), GFP_KERNEL);
+ folios = kvmalloc_objs(*state->folios, max_nr);
if (!folios)
return -ENOMEM;
state->folios = folios;
@@ -339,11 +340,15 @@ module_init(kho_test_init);
static void kho_test_cleanup(void)
{
+ /* unpreserve and free the data stored in folios */
+ kho_test_unpreserve_data(&kho_test_state);
for (int i = 0; i < kho_test_state.nr_folios; i++)
folio_put(kho_test_state.folios[i]);
kvfree(kho_test_state.folios);
- vfree(kho_test_state.folios_info);
+
+ /* Unpreserve and release the FDT folio */
+ kho_unpreserve_folio(kho_test_state.fdt);
folio_put(kho_test_state.fdt);
}
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
index 7e0797a6bebf..62f1633b30f8 100644
--- a/lib/test_memcat_p.c
+++ b/lib/test_memcat_p.c
@@ -24,20 +24,20 @@ static int __init test_memcat_p_init(void)
struct test_struct **in0, **in1, **out, **p;
int err = -ENOMEM, i, r, total = 0;
- in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
+ in0 = kzalloc_objs(*in0, INPUT_MAX);
if (!in0)
return err;
- in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
+ in1 = kzalloc_objs(*in1, INPUT_MAX);
if (!in1)
goto err_free_in0;
for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
- in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
+ in0[i] = kmalloc_obj(**in0);
if (!in0[i])
goto err_free_elements;
- in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
+ in1[i] = kmalloc_obj(**in1);
if (!in1[i]) {
kfree(in0[i]);
goto err_free_elements;
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index ce5c4c36a084..f21e3ae01395 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -107,7 +107,7 @@ static void *delta_create(void *priv, void *parent_obj, void *obj)
if (!delta_check(priv, parent_obj, obj))
return ERR_PTR(-EINVAL);
- delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ delta = kzalloc_obj(*delta);
if (!delta)
return ERR_PTR(-ENOMEM);
delta->key_id_diff = diff;
@@ -130,7 +130,7 @@ static void *root_create(void *priv, void *obj, unsigned int id)
struct tokey *key = obj;
struct root *root;
- root = kzalloc(sizeof(*root), GFP_KERNEL);
+ root = kzalloc_obj(*root);
if (!root)
return ERR_PTR(-ENOMEM);
memcpy(&root->key, key, sizeof(root->key));
diff --git a/lib/test_parman.c b/lib/test_parman.c
index f9b97426a337..28f0951189ab 100644
--- a/lib/test_parman.c
+++ b/lib/test_parman.c
@@ -219,7 +219,7 @@ static struct test_parman *test_parman_create(const struct parman_ops *ops)
struct test_parman *test_parman;
int err;
- test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL);
+ test_parman = kzalloc_obj(*test_parman);
if (!test_parman)
return ERR_PTR(-ENOMEM);
err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c63db03ebb9d..0b33559a910b 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -524,7 +524,7 @@ static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
const char *key;
int err = 0;
- rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
+ rhlt = kmalloc_obj(*rhlt);
if (WARN_ON(!rhlt))
return -EINVAL;
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
deleted file mode 100644
index 0124fad5d72c..000000000000
--- a/lib/test_uuid.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Test cases for lib/uuid.c module.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/uuid.h>
-
-struct test_uuid_data {
- const char *uuid;
- guid_t le;
- uuid_t be;
-};
-
-static const struct test_uuid_data test_uuid_test_data[] = {
- {
- .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
- .le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
- .be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
- },
- {
- .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
- .le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
- .be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
- },
- {
- .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
- .le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
- .be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
- },
-};
-
-static const char * const test_uuid_wrong_data[] = {
- "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
- "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
- "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
-};
-
-static unsigned total_tests __initdata;
-static unsigned failed_tests __initdata;
-
-static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
- const char *data, const char *actual)
-{
- pr_err("%s test #%u %s %s data: '%s'\n",
- prefix,
- total_tests,
- wrong ? "passed on wrong" : "failed on",
- be ? "BE" : "LE",
- data);
- if (actual && *actual)
- pr_err("%s test #%u actual data: '%s'\n",
- prefix,
- total_tests,
- actual);
- failed_tests++;
-}
-
-static void __init test_uuid_test(const struct test_uuid_data *data)
-{
- guid_t le;
- uuid_t be;
- char buf[48];
-
- /* LE */
- total_tests++;
- if (guid_parse(data->uuid, &le))
- test_uuid_failed("conversion", false, false, data->uuid, NULL);
-
- total_tests++;
- if (!guid_equal(&data->le, &le)) {
- sprintf(buf, "%pUl", &le);
- test_uuid_failed("cmp", false, false, data->uuid, buf);
- }
-
- /* BE */
- total_tests++;
- if (uuid_parse(data->uuid, &be))
- test_uuid_failed("conversion", false, true, data->uuid, NULL);
-
- total_tests++;
- if (!uuid_equal(&data->be, &be)) {
- sprintf(buf, "%pUb", &be);
- test_uuid_failed("cmp", false, true, data->uuid, buf);
- }
-}
-
-static void __init test_uuid_wrong(const char *data)
-{
- guid_t le;
- uuid_t be;
-
- /* LE */
- total_tests++;
- if (!guid_parse(data, &le))
- test_uuid_failed("negative", true, false, data, NULL);
-
- /* BE */
- total_tests++;
- if (!uuid_parse(data, &be))
- test_uuid_failed("negative", true, true, data, NULL);
-}
-
-static int __init test_uuid_init(void)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
- test_uuid_test(&test_uuid_test_data[i]);
-
- for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
- test_uuid_wrong(test_uuid_wrong_data[i]);
-
- if (failed_tests == 0)
- pr_info("all %u tests passed\n", total_tests);
- else
- pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
-
- return failed_tests ? -EINVAL : 0;
-}
-module_init(test_uuid_init);
-
-static void __exit test_uuid_exit(void)
-{
- /* do nothing */
-}
-module_exit(test_uuid_exit);
-
-MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
-MODULE_DESCRIPTION("Test cases for lib/uuid.c module");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 6521c05c7816..876c72c18a0c 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -58,6 +58,9 @@ __param(int, run_test_mask, 7,
/* Add a new test case description here. */
);
+__param(int, nr_pcpu_objects, 35000,
+ "Number of pcpu objects to allocate for pcpu_alloc_test");
+
/*
* This is for synchronization of setup phase.
*/
@@ -317,24 +320,24 @@ pcpu_alloc_test(void)
size_t size, align;
int i;
- pcpu = vmalloc(sizeof(void __percpu *) * 35000);
+ pcpu = vmalloc(sizeof(void __percpu *) * nr_pcpu_objects);
if (!pcpu)
return -1;
- for (i = 0; i < 35000; i++) {
+ for (i = 0; i < nr_pcpu_objects; i++) {
size = get_random_u32_inclusive(1, PAGE_SIZE / 4);
/*
* Maximum PAGE_SIZE
*/
- align = 1 << get_random_u32_inclusive(1, 11);
+ align = 1 << get_random_u32_inclusive(1, PAGE_SHIFT - 1);
pcpu[i] = __alloc_percpu(size, align);
if (!pcpu[i])
rv = -1;
}
- for (i = 0; i < 35000; i++)
+ for (i = 0; i < nr_pcpu_objects; i++)
free_percpu(pcpu[i]);
vfree(pcpu);
@@ -393,7 +396,7 @@ vm_map_ram_test(void)
int i;
map_nr_pages = nr_pages > 0 ? nr_pages:1;
- pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL);
+ pages = kzalloc_objs(struct page *, map_nr_pages);
if (!pages)
return -1;
@@ -539,7 +542,7 @@ init_test_configuration(void)
nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX);
/* Allocate the space for test instances. */
- tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL);
+ tdriver = kvzalloc_objs(*tdriver, nr_threads);
if (tdriver == NULL)
return -1;
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 0f24048f3684..7e9c2fa52e35 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -20,22 +20,28 @@ CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+obj-$(CONFIG_GLOB_KUNIT_TEST) += glob_kunit.o
obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_LIST_PRIVATE_KUNIT_TEST) += list-private-test.o
obj-$(CONFIG_KFIFO_KUNIT_TEST) += kfifo_kunit.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_LIVEUPDATE_TEST) += liveupdate.o
CFLAGS_longest_symbol_kunit.o += $(call cc-disable-warning, missing-prototypes)
obj-$(CONFIG_LONGEST_SYM_KUNIT_TEST) += longest_symbol_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+obj-$(CONFIG_MIN_HEAP_KUNIT_TEST) += min_heap_kunit.o
CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+# GCC < 12.1 can miscompile errptr() test when branch profiling is enabled.
+CFLAGS_printf_kunit.o += -DDISABLE_BRANCH_PROFILING
obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
obj-$(CONFIG_RANDSTRUCT_KUNIT_TEST) += randstruct_kunit.o
obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
@@ -50,5 +56,6 @@ obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
obj-$(CONFIG_RATELIMIT_KUNIT_TEST) += test_ratelimit.o
+obj-$(CONFIG_UUID_KUNIT_TEST) += uuid_kunit.o
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/tests/glob_kunit.c b/lib/tests/glob_kunit.c
new file mode 100644
index 000000000000..362b1eda8e5b
--- /dev/null
+++ b/lib/tests/glob_kunit.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: MIT OR GPL-2.0
+/*
+ * Test cases for glob functions.
+ */
+
+#include <kunit/test.h>
+#include <linux/glob.h>
+#include <linux/module.h>
+
+/**
+ * struct glob_test_case - Test case for glob matching.
+ * @pat: Pattern to match.
+ * @str: String to match against.
+ * @expected: Expected glob_match result, true if matched.
+ */
+struct glob_test_case {
+ const char *pat;
+ const char *str;
+ bool expected;
+};
+
+static const struct glob_test_case glob_test_cases[] = {
+ /* Some basic tests */
+ { .pat = "a", .str = "a", .expected = true },
+ { .pat = "a", .str = "b", .expected = false },
+ { .pat = "a", .str = "aa", .expected = false },
+ { .pat = "a", .str = "", .expected = false },
+ { .pat = "", .str = "", .expected = true },
+ { .pat = "", .str = "a", .expected = false },
+ /* Simple character class tests */
+ { .pat = "[a]", .str = "a", .expected = true },
+ { .pat = "[a]", .str = "b", .expected = false },
+ { .pat = "[!a]", .str = "a", .expected = false },
+ { .pat = "[!a]", .str = "b", .expected = true },
+ { .pat = "[ab]", .str = "a", .expected = true },
+ { .pat = "[ab]", .str = "b", .expected = true },
+ { .pat = "[ab]", .str = "c", .expected = false },
+ { .pat = "[!ab]", .str = "c", .expected = true },
+ { .pat = "[a-c]", .str = "b", .expected = true },
+ { .pat = "[a-c]", .str = "d", .expected = false },
+ /* Corner cases in character class parsing */
+ { .pat = "[a-c-e-g]", .str = "-", .expected = true },
+ { .pat = "[a-c-e-g]", .str = "d", .expected = false },
+ { .pat = "[a-c-e-g]", .str = "f", .expected = true },
+ { .pat = "[]a-ceg-ik[]", .str = "a", .expected = true },
+ { .pat = "[]a-ceg-ik[]", .str = "]", .expected = true },
+ { .pat = "[]a-ceg-ik[]", .str = "[", .expected = true },
+ { .pat = "[]a-ceg-ik[]", .str = "h", .expected = true },
+ { .pat = "[]a-ceg-ik[]", .str = "f", .expected = false },
+ { .pat = "[!]a-ceg-ik[]", .str = "h", .expected = false },
+ { .pat = "[!]a-ceg-ik[]", .str = "]", .expected = false },
+ { .pat = "[!]a-ceg-ik[]", .str = "f", .expected = true },
+ /* Simple wild cards */
+ { .pat = "?", .str = "a", .expected = true },
+ { .pat = "?", .str = "aa", .expected = false },
+ { .pat = "??", .str = "a", .expected = false },
+ { .pat = "?x?", .str = "axb", .expected = true },
+ { .pat = "?x?", .str = "abx", .expected = false },
+ { .pat = "?x?", .str = "xab", .expected = false },
+ /* Asterisk wild cards (backtracking) */
+ { .pat = "*??", .str = "a", .expected = false },
+ { .pat = "*??", .str = "ab", .expected = true },
+ { .pat = "*??", .str = "abc", .expected = true },
+ { .pat = "*??", .str = "abcd", .expected = true },
+ { .pat = "??*", .str = "a", .expected = false },
+ { .pat = "??*", .str = "ab", .expected = true },
+ { .pat = "??*", .str = "abc", .expected = true },
+ { .pat = "??*", .str = "abcd", .expected = true },
+ { .pat = "?*?", .str = "a", .expected = false },
+ { .pat = "?*?", .str = "ab", .expected = true },
+ { .pat = "?*?", .str = "abc", .expected = true },
+ { .pat = "?*?", .str = "abcd", .expected = true },
+ { .pat = "*b", .str = "b", .expected = true },
+ { .pat = "*b", .str = "ab", .expected = true },
+ { .pat = "*b", .str = "ba", .expected = false },
+ { .pat = "*b", .str = "bb", .expected = true },
+ { .pat = "*b", .str = "abb", .expected = true },
+ { .pat = "*b", .str = "bab", .expected = true },
+ { .pat = "*bc", .str = "abbc", .expected = true },
+ { .pat = "*bc", .str = "bc", .expected = true },
+ { .pat = "*bc", .str = "bbc", .expected = true },
+ { .pat = "*bc", .str = "bcbc", .expected = true },
+ /* Multiple asterisks (complex backtracking) */
+ { .pat = "*ac*", .str = "abacadaeafag", .expected = true },
+ { .pat = "*ac*ae*ag*", .str = "abacadaeafag", .expected = true },
+ { .pat = "*a*b*[bc]*[ef]*g*", .str = "abacadaeafag", .expected = true },
+ { .pat = "*a*b*[ef]*[cd]*g*", .str = "abacadaeafag", .expected = false },
+ { .pat = "*abcd*", .str = "abcabcabcabcdefg", .expected = true },
+ { .pat = "*ab*cd*", .str = "abcabcabcabcdefg", .expected = true },
+ { .pat = "*abcd*abcdef*", .str = "abcabcdabcdeabcdefg", .expected = true },
+ { .pat = "*abcd*", .str = "abcabcabcabcefg", .expected = false },
+ { .pat = "*ab*cd*", .str = "abcabcabcabcefg", .expected = false },
+};
+
+static void glob_case_to_desc(const struct glob_test_case *t, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "pat:\"%s\" str:\"%s\"", t->pat, t->str);
+}
+
+KUNIT_ARRAY_PARAM(glob, glob_test_cases, glob_case_to_desc);
+
+static void glob_test_match(struct kunit *test)
+{
+ const struct glob_test_case *params = test->param_value;
+
+ KUNIT_EXPECT_EQ_MSG(test,
+ glob_match(params->pat, params->str),
+ params->expected,
+ "Pattern: \"%s\", String: \"%s\", Expected: %d",
+ params->pat, params->str, params->expected);
+}
+
+static struct kunit_case glob_kunit_test_cases[] = {
+ KUNIT_CASE_PARAM(glob_test_match, glob_gen_params),
+ {}
+};
+
+static struct kunit_suite glob_test_suite = {
+ .name = "glob",
+ .test_cases = glob_kunit_test_cases,
+};
+
+kunit_test_suite(glob_test_suite);
+MODULE_DESCRIPTION("Test cases for glob functions");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/tests/kunit_iov_iter.c b/lib/tests/kunit_iov_iter.c
index 48342736d016..bb847e5010eb 100644
--- a/lib/tests/kunit_iov_iter.c
+++ b/lib/tests/kunit_iov_iter.c
@@ -387,7 +387,7 @@ static void __init iov_kunit_load_folioq(struct kunit *test,
for (i = 0; i < npages; i++) {
if (folioq_full(p)) {
- p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ p->next = kzalloc_obj(struct folio_queue);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
folioq_init(p->next, 0);
p->next->prev = p;
@@ -403,7 +403,7 @@ static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
{
struct folio_queue *folioq;
- folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
+ folioq = kzalloc_obj(struct folio_queue);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
folioq_init(folioq, 0);
@@ -565,7 +565,7 @@ static struct xarray *iov_kunit_create_xarray(struct kunit *test)
{
struct xarray *xarray;
- xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
+ xarray = kzalloc_obj(struct xarray);
xa_init(xarray);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
diff --git a/lib/tests/list-private-test.c b/lib/tests/list-private-test.c
new file mode 100644
index 000000000000..3bd62939ae67
--- /dev/null
+++ b/lib/tests/list-private-test.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit compilation/smoke test for Private list primitives.
+ *
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#include <linux/list_private.h>
+#include <kunit/test.h>
+
+/*
+ * This forces compiler to warn if you access it directly, because list
+ * primitives expect (struct list_head *), not (volatile struct list_head *).
+ */
+#undef __private
+#define __private volatile
+
+/* Redefine ACCESS_PRIVATE for this test. */
+#undef ACCESS_PRIVATE
+#define ACCESS_PRIVATE(p, member) \
+ (*((struct list_head *)((unsigned long)&((p)->member))))
+
+struct list_test_struct {
+ int data;
+ struct list_head __private list;
+};
+
+static void list_private_compile_test(struct kunit *test)
+{
+ struct list_test_struct entry;
+ struct list_test_struct *pos, *n;
+ LIST_HEAD(head);
+
+ INIT_LIST_HEAD(&ACCESS_PRIVATE(&entry, list));
+ list_add(&ACCESS_PRIVATE(&entry, list), &head);
+ pos = &entry;
+
+ pos = list_private_entry(&ACCESS_PRIVATE(&entry, list), struct list_test_struct, list);
+ pos = list_private_first_entry(&head, struct list_test_struct, list);
+ pos = list_private_last_entry(&head, struct list_test_struct, list);
+ pos = list_private_next_entry(pos, list);
+ pos = list_private_prev_entry(pos, list);
+ pos = list_private_next_entry_circular(pos, &head, list);
+ pos = list_private_prev_entry_circular(pos, &head, list);
+
+ if (list_private_entry_is_head(pos, &head, list))
+ return;
+
+ list_private_for_each_entry(pos, &head, list) { }
+ list_private_for_each_entry_reverse(pos, &head, list) { }
+ list_private_for_each_entry_continue(pos, &head, list) { }
+ list_private_for_each_entry_continue_reverse(pos, &head, list) { }
+ list_private_for_each_entry_from(pos, &head, list) { }
+ list_private_for_each_entry_from_reverse(pos, &head, list) { }
+
+ list_private_for_each_entry_safe(pos, n, &head, list)
+ list_private_safe_reset_next(pos, n, list);
+ list_private_for_each_entry_safe_continue(pos, n, &head, list) { }
+ list_private_for_each_entry_safe_from(pos, n, &head, list) { }
+ list_private_for_each_entry_safe_reverse(pos, n, &head, list) { }
+}
+
+static struct kunit_case list_private_test_cases[] = {
+ KUNIT_CASE(list_private_compile_test),
+ {},
+};
+
+static struct kunit_suite list_private_test_module = {
+ .name = "list-private-kunit-test",
+ .test_cases = list_private_test_cases,
+};
+
+kunit_test_suite(list_private_test_module);
+
+MODULE_DESCRIPTION("KUnit compilation test for private list primitives");
+MODULE_LICENSE("GPL");
diff --git a/lib/tests/list-test.c b/lib/tests/list-test.c
index 9135cdc1bb39..6d9227a2b204 100644
--- a/lib/tests/list-test.c
+++ b/lib/tests/list-test.c
@@ -26,10 +26,10 @@ static void list_test_list_init(struct kunit *test)
INIT_LIST_HEAD(&list2);
- list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ list4 = kzalloc_obj(*list4, GFP_KERNEL | __GFP_NOFAIL);
INIT_LIST_HEAD(list4);
- list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ list5 = kmalloc_obj(*list5, GFP_KERNEL | __GFP_NOFAIL);
memset(list5, 0xFF, sizeof(*list5));
INIT_LIST_HEAD(list5);
@@ -829,10 +829,10 @@ static void hlist_test_init(struct kunit *test)
INIT_HLIST_HEAD(&list2);
- list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ list4 = kzalloc_obj(*list4, GFP_KERNEL | __GFP_NOFAIL);
INIT_HLIST_HEAD(list4);
- list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ list5 = kmalloc_obj(*list5, GFP_KERNEL | __GFP_NOFAIL);
memset(list5, 0xFF, sizeof(*list5));
INIT_HLIST_HEAD(list5);
diff --git a/lib/tests/liveupdate.c b/lib/tests/liveupdate.c
new file mode 100644
index 000000000000..496d6ef91a30
--- /dev/null
+++ b/lib/tests/liveupdate.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME " test: " fmt
+
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/liveupdate.h>
+#include <linux/module.h>
+#include "../../kernel/liveupdate/luo_internal.h"
+
+static const struct liveupdate_flb_ops test_flb_ops;
+#define DEFINE_TEST_FLB(i) { \
+ .ops = &test_flb_ops, \
+ .compatible = LIVEUPDATE_TEST_FLB_COMPATIBLE(i), \
+}
+
+/* Number of Test FLBs to register with every file handler */
+#define TEST_NFLBS 3
+static struct liveupdate_flb test_flbs[TEST_NFLBS] = {
+ DEFINE_TEST_FLB(0),
+ DEFINE_TEST_FLB(1),
+ DEFINE_TEST_FLB(2),
+};
+
+#define TEST_FLB_MAGIC_BASE 0xFEEDF00DCAFEBEE0ULL
+
+static int test_flb_preserve(struct liveupdate_flb_op_args *argp)
+{
+ ptrdiff_t index = argp->flb - test_flbs;
+
+ pr_info("%s: preserve was triggered\n", argp->flb->compatible);
+ argp->data = TEST_FLB_MAGIC_BASE + index;
+
+ return 0;
+}
+
+static void test_flb_unpreserve(struct liveupdate_flb_op_args *argp)
+{
+ pr_info("%s: unpreserve was triggered\n", argp->flb->compatible);
+}
+
+static int test_flb_retrieve(struct liveupdate_flb_op_args *argp)
+{
+ ptrdiff_t index = argp->flb - test_flbs;
+ u64 expected_data = TEST_FLB_MAGIC_BASE + index;
+
+ if (argp->data == expected_data) {
+ pr_info("%s: found flb data from the previous boot\n",
+ argp->flb->compatible);
+ argp->obj = (void *)argp->data;
+ } else {
+ pr_err("%s: ERROR - incorrect data handle: %llx, expected %llx\n",
+ argp->flb->compatible, argp->data, expected_data);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void test_flb_finish(struct liveupdate_flb_op_args *argp)
+{
+ ptrdiff_t index = argp->flb - test_flbs;
+ void *expected_obj = (void *)(TEST_FLB_MAGIC_BASE + index);
+
+ if (argp->obj == expected_obj) {
+ pr_info("%s: finish was triggered\n", argp->flb->compatible);
+ } else {
+ pr_err("%s: ERROR - finish called with invalid object\n",
+ argp->flb->compatible);
+ }
+}
+
+static const struct liveupdate_flb_ops test_flb_ops = {
+ .preserve = test_flb_preserve,
+ .unpreserve = test_flb_unpreserve,
+ .retrieve = test_flb_retrieve,
+ .finish = test_flb_finish,
+ .owner = THIS_MODULE,
+};
+
+static void liveupdate_test_init(void)
+{
+ static DEFINE_MUTEX(init_lock);
+ static bool initialized;
+ int i;
+
+ guard(mutex)(&init_lock);
+
+ if (initialized)
+ return;
+
+ for (i = 0; i < TEST_NFLBS; i++) {
+ struct liveupdate_flb *flb = &test_flbs[i];
+ void *obj;
+ int err;
+
+ err = liveupdate_flb_get_incoming(flb, &obj);
+ if (err && err != -ENODATA && err != -ENOENT) {
+ pr_err("liveupdate_flb_get_incoming for %s failed: %pe\n",
+ flb->compatible, ERR_PTR(err));
+ }
+ }
+ initialized = true;
+}
+
+void liveupdate_test_register(struct liveupdate_file_handler *fh)
+{
+ int err, i;
+
+ liveupdate_test_init();
+
+ for (i = 0; i < TEST_NFLBS; i++) {
+ struct liveupdate_flb *flb = &test_flbs[i];
+
+ err = liveupdate_register_flb(fh, flb);
+ if (err) {
+ pr_err("Failed to register %s %pe\n",
+ flb->compatible, ERR_PTR(err));
+ }
+ }
+
+ err = liveupdate_register_flb(fh, &test_flbs[0]);
+ if (!err || err != -EEXIST) {
+ pr_err("Failed: %s should be already registered, but got err: %pe\n",
+ test_flbs[0].compatible, ERR_PTR(err));
+ }
+
+ pr_info("Registered %d FLBs with file handler: [%s]\n",
+ TEST_NFLBS, fh->compatible);
+}
+
+void liveupdate_test_unregister(struct liveupdate_file_handler *fh)
+{
+ int err, i;
+
+ for (i = 0; i < TEST_NFLBS; i++) {
+ struct liveupdate_flb *flb = &test_flbs[i];
+
+ err = liveupdate_unregister_flb(fh, flb);
+ if (err) {
+ pr_err("Failed to unregister %s %pe\n",
+ flb->compatible, ERR_PTR(err));
+ }
+ }
+
+ pr_info("Unregistered %d FLBs from file handler: [%s]\n",
+ TEST_NFLBS, fh->compatible);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pasha Tatashin <pasha.tatashin@soleen.com>");
+MODULE_DESCRIPTION("In-kernel test for LUO mechanism");
diff --git a/lib/test_min_heap.c b/lib/tests/min_heap_kunit.c
index a9c4a74d3898..9c1122661698 100644
--- a/lib/test_min_heap.c
+++ b/lib/tests/min_heap_kunit.c
@@ -1,60 +1,66 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "min_heap_test: " fmt
-
/*
* Test cases for the min max heap.
*/
-#include <linux/log2.h>
+#include <kunit/test.h>
#include <linux/min_heap.h>
#include <linux/module.h>
-#include <linux/printk.h>
#include <linux/random.h>
+struct min_heap_test_case {
+ const char *str;
+ bool min_heap;
+};
+
+static struct min_heap_test_case min_heap_cases[] = {
+ {
+ .str = "min",
+ .min_heap = true,
+ },
+ {
+ .str = "max",
+ .min_heap = false,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(min_heap, min_heap_cases, str);
+
DEFINE_MIN_HEAP(int, min_heap_test);
-static __init bool less_than(const void *lhs, const void *rhs, void __always_unused *args)
+static bool less_than(const void *lhs, const void *rhs, void __always_unused *args)
{
return *(int *)lhs < *(int *)rhs;
}
-static __init bool greater_than(const void *lhs, const void *rhs, void __always_unused *args)
+static bool greater_than(const void *lhs, const void *rhs, void __always_unused *args)
{
return *(int *)lhs > *(int *)rhs;
}
-static __init int pop_verify_heap(bool min_heap,
- struct min_heap_test *heap,
- const struct min_heap_callbacks *funcs)
+static void pop_verify_heap(struct kunit *test,
+ bool min_heap,
+ struct min_heap_test *heap,
+ const struct min_heap_callbacks *funcs)
{
int *values = heap->data;
- int err = 0;
int last;
last = values[0];
min_heap_pop_inline(heap, funcs, NULL);
while (heap->nr > 0) {
- if (min_heap) {
- if (last > values[0]) {
- pr_err("error: expected %d <= %d\n", last,
- values[0]);
- err++;
- }
- } else {
- if (last < values[0]) {
- pr_err("error: expected %d >= %d\n", last,
- values[0]);
- err++;
- }
- }
+ if (min_heap)
+ KUNIT_EXPECT_LE(test, last, values[0]);
+ else
+ KUNIT_EXPECT_GE(test, last, values[0]);
last = values[0];
min_heap_pop_inline(heap, funcs, NULL);
}
- return err;
}
-static __init int test_heapify_all(bool min_heap)
+static void test_heapify_all(struct kunit *test)
{
+ const struct min_heap_test_case *params = test->param_value;
int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
struct min_heap_test heap = {
@@ -63,15 +69,14 @@ static __init int test_heapify_all(bool min_heap)
.size = ARRAY_SIZE(values),
};
struct min_heap_callbacks funcs = {
- .less = min_heap ? less_than : greater_than,
+ .less = params->min_heap ? less_than : greater_than,
.swp = NULL,
};
- int i, err;
+ int i;
/* Test with known set of values. */
min_heapify_all_inline(&heap, &funcs, NULL);
- err = pop_verify_heap(min_heap, &heap, &funcs);
-
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
/* Test with randomly generated values. */
heap.nr = ARRAY_SIZE(values);
@@ -79,13 +84,12 @@ static __init int test_heapify_all(bool min_heap)
values[i] = get_random_u32();
min_heapify_all_inline(&heap, &funcs, NULL);
- err += pop_verify_heap(min_heap, &heap, &funcs);
-
- return err;
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
}
-static __init int test_heap_push(bool min_heap)
+static void test_heap_push(struct kunit *test)
{
+ const struct min_heap_test_case *params = test->param_value;
const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
-3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
int values[ARRAY_SIZE(data)];
@@ -95,29 +99,28 @@ static __init int test_heap_push(bool min_heap)
.size = ARRAY_SIZE(values),
};
struct min_heap_callbacks funcs = {
- .less = min_heap ? less_than : greater_than,
+ .less = params->min_heap ? less_than : greater_than,
.swp = NULL,
};
- int i, temp, err;
+ int i, temp;
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
min_heap_push_inline(&heap, &data[i], &funcs, NULL);
- err = pop_verify_heap(min_heap, &heap, &funcs);
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
/* Test with randomly generated values. */
while (heap.nr < heap.size) {
temp = get_random_u32();
min_heap_push_inline(&heap, &temp, &funcs, NULL);
}
- err += pop_verify_heap(min_heap, &heap, &funcs);
-
- return err;
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
}
-static __init int test_heap_pop_push(bool min_heap)
+static void test_heap_pop_push(struct kunit *test)
{
+ const struct min_heap_test_case *params = test->param_value;
const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
-3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
int values[ARRAY_SIZE(data)];
@@ -127,13 +130,13 @@ static __init int test_heap_pop_push(bool min_heap)
.size = ARRAY_SIZE(values),
};
struct min_heap_callbacks funcs = {
- .less = min_heap ? less_than : greater_than,
+ .less = params->min_heap ? less_than : greater_than,
.swp = NULL,
};
- int i, temp, err;
+ int i, temp;
/* Fill values with data to pop and replace. */
- temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
+ temp = params->min_heap ? 0x80000000 : 0x7FFFFFFF;
for (i = 0; i < ARRAY_SIZE(data); i++)
min_heap_push_inline(&heap, &temp, &funcs, NULL);
@@ -141,7 +144,7 @@ static __init int test_heap_pop_push(bool min_heap)
for (i = 0; i < ARRAY_SIZE(data); i++)
min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL);
- err = pop_verify_heap(min_heap, &heap, &funcs);
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
heap.nr = 0;
for (i = 0; i < ARRAY_SIZE(data); i++)
@@ -152,13 +155,12 @@ static __init int test_heap_pop_push(bool min_heap)
temp = get_random_u32();
min_heap_pop_push_inline(&heap, &temp, &funcs, NULL);
}
- err += pop_verify_heap(min_heap, &heap, &funcs);
-
- return err;
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
}
-static __init int test_heap_del(bool min_heap)
+static void test_heap_del(struct kunit *test)
{
+ const struct min_heap_test_case *params = test->param_value;
int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
struct min_heap_test heap;
@@ -166,17 +168,16 @@ static __init int test_heap_del(bool min_heap)
min_heap_init_inline(&heap, values, ARRAY_SIZE(values));
heap.nr = ARRAY_SIZE(values);
struct min_heap_callbacks funcs = {
- .less = min_heap ? less_than : greater_than,
+ .less = params->min_heap ? less_than : greater_than,
.swp = NULL,
};
- int i, err;
+ int i;
/* Test with known set of values. */
min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
- err = pop_verify_heap(min_heap, &heap, &funcs);
-
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
/* Test with randomly generated values. */
heap.nr = ARRAY_SIZE(values);
@@ -186,37 +187,23 @@ static __init int test_heap_del(bool min_heap)
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
- err += pop_verify_heap(min_heap, &heap, &funcs);
-
- return err;
+ pop_verify_heap(test, params->min_heap, &heap, &funcs);
}
-static int __init test_min_heap_init(void)
-{
- int err = 0;
-
- err += test_heapify_all(true);
- err += test_heapify_all(false);
- err += test_heap_push(true);
- err += test_heap_push(false);
- err += test_heap_pop_push(true);
- err += test_heap_pop_push(false);
- err += test_heap_del(true);
- err += test_heap_del(false);
- if (err) {
- pr_err("test failed with %d errors\n", err);
- return -EINVAL;
- }
- pr_info("test passed\n");
- return 0;
-}
-module_init(test_min_heap_init);
+static struct kunit_case min_heap_test_cases[] = {
+ KUNIT_CASE_PARAM(test_heapify_all, min_heap_gen_params),
+ KUNIT_CASE_PARAM(test_heap_push, min_heap_gen_params),
+ KUNIT_CASE_PARAM(test_heap_pop_push, min_heap_gen_params),
+ KUNIT_CASE_PARAM(test_heap_del, min_heap_gen_params),
+ {},
+};
-static void __exit test_min_heap_exit(void)
-{
- /* do nothing */
-}
-module_exit(test_min_heap_exit);
+static struct kunit_suite min_heap_test_suite = {
+ .name = "min_heap",
+ .test_cases = min_heap_test_cases,
+};
+
+kunit_test_suite(min_heap_test_suite);
MODULE_DESCRIPTION("Test cases for the min max heap");
MODULE_LICENSE("GPL");
diff --git a/lib/tests/test_ratelimit.c b/lib/tests/test_ratelimit.c
index bfaeca49304a..33cea5f3d28b 100644
--- a/lib/tests/test_ratelimit.c
+++ b/lib/tests/test_ratelimit.c
@@ -104,7 +104,7 @@ static void test_ratelimit_stress(struct kunit *test)
int i;
const int n_stress_kthread = cpumask_weight(cpu_online_mask);
struct stress_kthread skt = { 0 };
- struct stress_kthread *sktp = kcalloc(n_stress_kthread, sizeof(*sktp), GFP_KERNEL);
+ struct stress_kthread *sktp = kzalloc_objs(*sktp, n_stress_kthread);
KUNIT_EXPECT_NOT_NULL_MSG(test, sktp, "Memory allocation failure");
for (i = 0; i < n_stress_kthread; i++) {
diff --git a/lib/tests/uuid_kunit.c b/lib/tests/uuid_kunit.c
new file mode 100644
index 000000000000..de71b2649dac
--- /dev/null
+++ b/lib/tests/uuid_kunit.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/*
+ * Test cases for lib/uuid.c module.
+ */
+
+#include <kunit/test.h>
+#include <linux/uuid.h>
+
+struct test_uuid_data {
+ const char *uuid;
+ guid_t le;
+ uuid_t be;
+};
+
+static const struct test_uuid_data test_uuid_test_data[] = {
+ {
+ .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
+ .le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ .be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ },
+ {
+ .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
+ .le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ .be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ },
+ {
+ .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
+ .le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ .be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ },
+};
+
+static const char * const test_uuid_wrong_data[] = {
+ "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
+ "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
+ "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
+};
+
+static void uuid_test_guid_valid(struct kunit *test)
+{
+ unsigned int i;
+ const struct test_uuid_data *data;
+ guid_t le;
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++) {
+ data = &test_uuid_test_data[i];
+ KUNIT_EXPECT_EQ(test, guid_parse(data->uuid, &le), 0);
+ KUNIT_EXPECT_TRUE(test, guid_equal(&data->le, &le));
+ }
+}
+
+static void uuid_test_uuid_valid(struct kunit *test)
+{
+ unsigned int i;
+ const struct test_uuid_data *data;
+ uuid_t be;
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++) {
+ data = &test_uuid_test_data[i];
+ KUNIT_EXPECT_EQ(test, uuid_parse(data->uuid, &be), 0);
+ KUNIT_EXPECT_TRUE(test, uuid_equal(&data->be, &be));
+ }
+}
+
+static void uuid_test_guid_invalid(struct kunit *test)
+{
+ unsigned int i;
+ const char *uuid;
+ guid_t le;
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++) {
+ uuid = test_uuid_wrong_data[i];
+ KUNIT_EXPECT_EQ(test, guid_parse(uuid, &le), -EINVAL);
+ }
+}
+
+static void uuid_test_uuid_invalid(struct kunit *test)
+{
+ unsigned int i;
+ const char *uuid;
+ uuid_t be;
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++) {
+ uuid = test_uuid_wrong_data[i];
+ KUNIT_EXPECT_EQ(test, uuid_parse(uuid, &be), -EINVAL);
+ }
+}
+
+static struct kunit_case uuid_test_cases[] = {
+ KUNIT_CASE(uuid_test_guid_valid),
+ KUNIT_CASE(uuid_test_uuid_valid),
+ KUNIT_CASE(uuid_test_guid_invalid),
+ KUNIT_CASE(uuid_test_uuid_invalid),
+ {},
+};
+
+static struct kunit_suite uuid_test_suite = {
+ .name = "uuid",
+ .test_cases = uuid_test_cases,
+};
+
+kunit_test_suite(uuid_test_suite);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Test cases for lib/uuid.c module");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/uuid.c b/lib/uuid.c
index e309b4c5be3d..e8543c668dc7 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/export.h>
+#include <linux/hex.h>
#include <linux/uuid.h>
#include <linux/random.h>
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2871ffd28103..9f359b31c8d1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
+#include <linux/hex.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/math64.h>
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
index 610d58d947ab..cc49a300a5b2 100644
--- a/lib/xz/xz_dec_bcj.c
+++ b/lib/xz/xz_dec_bcj.c
@@ -591,7 +591,7 @@ enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, struct xz_dec_lzma2 *lzma2,
struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
{
- struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct xz_dec_bcj *s = kmalloc_obj(*s);
if (s != NULL)
s->single_call = single_call;
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
index 83bb66b6016d..4b783ac94e71 100644
--- a/lib/xz/xz_dec_lzma2.c
+++ b/lib/xz/xz_dec_lzma2.c
@@ -1138,7 +1138,7 @@ enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, struct xz_buf *b)
struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, uint32_t dict_max)
{
- struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct xz_dec_lzma2 *s = kmalloc_obj(*s);
if (s == NULL)
return NULL;
@@ -1296,7 +1296,7 @@ struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
if (dict_size < 4096 || dict_size > (3U << 30))
return NULL;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = kmalloc_obj(*s);
if (s == NULL)
return NULL;
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index f9d003684d56..59bfd54ffee7 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -784,7 +784,7 @@ enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
{
- struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct xz_dec *s = kmalloc_obj(*s);
if (s == NULL)
return NULL;
diff --git a/lib/zlib_inflate/infutil.c b/lib/zlib_inflate/infutil.c
index 4824c2cc7a09..12169aacd3f1 100644
--- a/lib/zlib_inflate/infutil.c
+++ b/lib/zlib_inflate/infutil.c
@@ -14,7 +14,7 @@ int zlib_inflate_blob(void *gunzip_buf, unsigned int sz,
int rc;
rc = -ENOMEM;
- strm = kmalloc(sizeof(*strm), GFP_KERNEL);
+ strm = kmalloc_obj(*strm);
if (strm == NULL)
goto gunzip_nomem1;
strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);