summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorVlastimil Babka (SUSE) <vbabka@kernel.org>2026-04-07 14:39:34 +0200
committerVlastimil Babka (SUSE) <vbabka@kernel.org>2026-04-13 13:23:36 +0200
commit44e0ebe4accd67c67134cf3b805917153041a300 (patch)
tree92783b4ef7f67b71ef2d38742547bf257758510a /lib
parent51274836193a661a3f39e7f10629d5978a61bbfb (diff)
parent92af129b4085cd561b59bfa1596653844cb82e4c (diff)
Merge branch 'slab/for-7.1/misc' into slab/for-next
Merge misc slab changes that are not related to sheaves. Various improvements for sysfs, debugging and testing.
Diffstat (limited to 'lib')
-rw-r--r--lib/tests/slub_kunit.c92
1 files changed, 92 insertions, 0 deletions
diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c
index 848b682a2d70..fa6d31dbca16 100644
--- a/lib/tests/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
+#include <linux/perf_event.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@@ -291,6 +292,94 @@ static void test_krealloc_redzone_zeroing(struct kunit *test)
kmem_cache_destroy(s);
}
+#ifdef CONFIG_PERF_EVENTS
+#define NR_ITERATIONS 1000
+#define NR_OBJECTS 1000
+static void *objects[NR_OBJECTS];
+
+struct test_nolock_context {
+ struct kunit *test;
+ int callback_count;
+ int alloc_ok;
+ int alloc_fail;
+ struct perf_event *event;
+};
+
+static struct perf_event_attr hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+ .freq = 1,
+ .sample_freq = 100000,
+};
+
+static void overflow_handler_test_kmalloc_kfree_nolock(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ void *objp;
+ gfp_t gfp;
+ struct test_nolock_context *ctx = event->overflow_handler_context;
+
+ /* __GFP_ACCOUNT to test kmalloc_nolock() in alloc_slab_obj_exts() */
+ gfp = (ctx->callback_count % 2) ? 0 : __GFP_ACCOUNT;
+ objp = kmalloc_nolock(64, gfp, NUMA_NO_NODE);
+
+ if (objp)
+ ctx->alloc_ok++;
+ else
+ ctx->alloc_fail++;
+
+ kfree_nolock(objp);
+ ctx->callback_count++;
+}
+
+static void test_kmalloc_kfree_nolock(struct kunit *test)
+{
+ int i, j;
+ struct test_nolock_context ctx = { .test = test };
+ struct perf_event *event;
+ bool alloc_fail = false;
+
+ event = perf_event_create_kernel_counter(&hw_attr, -1, current,
+ overflow_handler_test_kmalloc_kfree_nolock,
+ &ctx);
+ if (IS_ERR(event))
+ kunit_skip(test, "Failed to create perf event");
+ ctx.event = event;
+ perf_event_enable(ctx.event);
+ for (i = 0; i < NR_ITERATIONS; i++) {
+ for (j = 0; j < NR_OBJECTS; j++) {
+ gfp_t gfp = (i % 2) ? GFP_KERNEL : GFP_KERNEL_ACCOUNT;
+
+ objects[j] = kmalloc(64, gfp);
+ if (!objects[j]) {
+ j--;
+ while (j >= 0)
+ kfree(objects[j--]);
+ alloc_fail = true;
+ goto cleanup;
+ }
+ }
+ for (j = 0; j < NR_OBJECTS; j++)
+ kfree(objects[j]);
+ }
+
+cleanup:
+ perf_event_disable(ctx.event);
+ perf_event_release_kernel(ctx.event);
+
+ kunit_info(test, "callback_count: %d, alloc_ok: %d, alloc_fail: %d\n",
+ ctx.callback_count, ctx.alloc_ok, ctx.alloc_fail);
+
+ if (alloc_fail)
+ kunit_skip(test, "Allocation failed");
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+#endif
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -315,6 +404,9 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_kfree_rcu_wq_destroy),
KUNIT_CASE(test_leak_destroy),
KUNIT_CASE(test_krealloc_redzone_zeroing),
+#ifdef CONFIG_PERF_EVENTS
+ KUNIT_CASE_SLOW(test_kmalloc_kfree_nolock),
+#endif
{}
};