diff options
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/test-ww_mutex.c | 165 |
1 files changed, 122 insertions, 43 deletions
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c index bcb1b9fea588..79b5e45f8d4c 100644 --- a/kernel/locking/test-ww_mutex.c +++ b/kernel/locking/test-ww_mutex.c @@ -13,7 +13,8 @@ #include <linux/slab.h> #include <linux/ww_mutex.h> -static DEFINE_WD_CLASS(ww_class); +static DEFINE_WD_CLASS(wd_class); +static DEFINE_WW_CLASS(ww_class); struct workqueue_struct *wq; #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH @@ -54,16 +55,16 @@ static void test_mutex_work(struct work_struct *work) ww_mutex_unlock(&mtx->mutex); } -static int __test_mutex(unsigned int flags) +static int __test_mutex(struct ww_class *class, unsigned int flags) { #define TIMEOUT (HZ / 16) struct test_mutex mtx; struct ww_acquire_ctx ctx; int ret; - ww_mutex_init(&mtx.mutex, &ww_class); + ww_mutex_init(&mtx.mutex, class); if (flags & TEST_MTX_CTX) - ww_acquire_init(&ctx, &ww_class); + ww_acquire_init(&ctx, class); INIT_WORK_ONSTACK(&mtx.work, test_mutex_work); init_completion(&mtx.ready); @@ -71,7 +72,7 @@ static int __test_mutex(unsigned int flags) init_completion(&mtx.done); mtx.flags = flags; - schedule_work(&mtx.work); + queue_work(wq, &mtx.work); wait_for_completion(&mtx.ready); ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL); @@ -106,13 +107,13 @@ static int __test_mutex(unsigned int flags) #undef TIMEOUT } -static int test_mutex(void) +static int test_mutex(struct ww_class *class) { int ret; int i; for (i = 0; i < __TEST_MTX_LAST; i++) { - ret = __test_mutex(i); + ret = __test_mutex(class, i); if (ret) return ret; } @@ -120,15 +121,15 @@ static int test_mutex(void) return 0; } -static int test_aa(bool trylock) +static int test_aa(struct ww_class *class, bool trylock) { struct ww_mutex mutex; struct ww_acquire_ctx ctx; int ret; const char *from = trylock ? "trylock" : "lock"; - ww_mutex_init(&mutex, &ww_class); - ww_acquire_init(&ctx, &ww_class); + ww_mutex_init(&mutex, class); + ww_acquire_init(&ctx, class); if (!trylock) { ret = ww_mutex_lock(&mutex, &ctx); @@ -177,6 +178,7 @@ out: struct test_abba { struct work_struct work; + struct ww_class *class; struct ww_mutex a_mutex; struct ww_mutex b_mutex; struct completion a_ready; @@ -191,7 +193,7 @@ static void test_abba_work(struct work_struct *work) struct ww_acquire_ctx ctx; int err; - ww_acquire_init_noinject(&ctx, &ww_class); + ww_acquire_init_noinject(&ctx, abba->class); if (!abba->trylock) ww_mutex_lock(&abba->b_mutex, &ctx); else @@ -217,23 +219,24 @@ static void test_abba_work(struct work_struct *work) abba->result = err; } -static int test_abba(bool trylock, bool resolve) +static int test_abba(struct ww_class *class, bool trylock, bool resolve) { struct test_abba abba; struct ww_acquire_ctx ctx; int err, ret; - ww_mutex_init(&abba.a_mutex, &ww_class); - ww_mutex_init(&abba.b_mutex, &ww_class); + ww_mutex_init(&abba.a_mutex, class); + ww_mutex_init(&abba.b_mutex, class); INIT_WORK_ONSTACK(&abba.work, test_abba_work); init_completion(&abba.a_ready); init_completion(&abba.b_ready); + abba.class = class; abba.trylock = trylock; abba.resolve = resolve; - schedule_work(&abba.work); + queue_work(wq, &abba.work); - ww_acquire_init_noinject(&ctx, &ww_class); + ww_acquire_init_noinject(&ctx, class); if (!trylock) ww_mutex_lock(&abba.a_mutex, &ctx); else @@ -278,6 +281,7 @@ static int test_abba(bool trylock, bool resolve) struct test_cycle { struct work_struct work; + struct ww_class *class; struct ww_mutex a_mutex; struct ww_mutex *b_mutex; struct completion *a_signal; @@ -291,7 +295,7 @@ static void test_cycle_work(struct work_struct *work) struct ww_acquire_ctx ctx; int err, erra = 0; - ww_acquire_init_noinject(&ctx, &ww_class); + ww_acquire_init_noinject(&ctx, cycle->class); ww_mutex_lock(&cycle->a_mutex, &ctx); complete(cycle->a_signal); @@ -314,7 +318,7 @@ static void test_cycle_work(struct work_struct *work) cycle->result = err ?: erra; } -static int __test_cycle(unsigned int nthreads) +static int __test_cycle(struct ww_class *class, unsigned int nthreads) { struct test_cycle *cycles; unsigned int n, last = nthreads - 1; @@ -327,7 +331,8 @@ static int __test_cycle(unsigned int nthreads) for (n = 0; n < nthreads; n++) { struct test_cycle *cycle = &cycles[n]; - ww_mutex_init(&cycle->a_mutex, &ww_class); + cycle->class = class; + ww_mutex_init(&cycle->a_mutex, class); if (n == last) cycle->b_mutex = &cycles[0].a_mutex; else @@ -367,13 +372,13 @@ static int __test_cycle(unsigned int nthreads) return ret; } -static int test_cycle(unsigned int ncpus) +static int test_cycle(struct ww_class *class, unsigned int ncpus) { unsigned int n; int ret; for (n = 2; n <= ncpus + 1; n++) { - ret = __test_cycle(n); + ret = __test_cycle(class, n); if (ret) return ret; } @@ -384,6 +389,7 @@ static int test_cycle(unsigned int ncpus) struct stress { struct work_struct work; struct ww_mutex *locks; + struct ww_class *class; unsigned long timeout; int nlocks; }; @@ -443,7 +449,7 @@ static void stress_inorder_work(struct work_struct *work) int contended = -1; int n, err; - ww_acquire_init(&ctx, &ww_class); + ww_acquire_init(&ctx, stress->class); retry: err = 0; for (n = 0; n < nlocks; n++) { @@ -511,7 +517,7 @@ static void stress_reorder_work(struct work_struct *work) order = NULL; do { - ww_acquire_init(&ctx, &ww_class); + ww_acquire_init(&ctx, stress->class); list_for_each_entry(ll, &locks, link) { err = ww_mutex_lock(ll->lock, &ctx); @@ -570,7 +576,7 @@ static void stress_one_work(struct work_struct *work) #define STRESS_ONE BIT(2) #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE) -static int stress(int nlocks, int nthreads, unsigned int flags) +static int stress(struct ww_class *class, int nlocks, int nthreads, unsigned int flags) { struct ww_mutex *locks; struct stress *stress_array; @@ -588,7 +594,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags) } for (n = 0; n < nlocks; n++) - ww_mutex_init(&locks[n], &ww_class); + ww_mutex_init(&locks[n], class); count = 0; for (n = 0; nthreads; n++) { @@ -617,6 +623,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags) stress = &stress_array[count++]; INIT_WORK(&stress->work, fn); + stress->class = class; stress->locks = locks; stress->nlocks = nlocks; stress->timeout = jiffies + 2*HZ; @@ -635,59 +642,131 @@ static int stress(int nlocks, int nthreads, unsigned int flags) return 0; } -static int __init test_ww_mutex_init(void) +static int run_tests(struct ww_class *class) { int ncpus = num_online_cpus(); int ret, i; - printk(KERN_INFO "Beginning ww mutex selftests\n"); - - prandom_seed_state(&rng, get_random_u64()); - - wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); - if (!wq) - return -ENOMEM; - - ret = test_mutex(); + ret = test_mutex(class); if (ret) return ret; - ret = test_aa(false); + ret = test_aa(class, false); if (ret) return ret; - ret = test_aa(true); + ret = test_aa(class, true); if (ret) return ret; for (i = 0; i < 4; i++) { - ret = test_abba(i & 1, i & 2); + ret = test_abba(class, i & 1, i & 2); if (ret) return ret; } - ret = test_cycle(ncpus); + ret = test_cycle(class, ncpus); + if (ret) + return ret; + + ret = stress(class, 16, 2 * ncpus, STRESS_INORDER); + if (ret) + return ret; + + ret = stress(class, 16, 2 * ncpus, STRESS_REORDER); if (ret) return ret; - ret = stress(16, 2*ncpus, STRESS_INORDER); + ret = stress(class, 2046, hweight32(STRESS_ALL) * ncpus, STRESS_ALL); if (ret) return ret; - ret = stress(16, 2*ncpus, STRESS_REORDER); + return 0; +} + +static int run_test_classes(void) +{ + int ret; + + pr_info("Beginning ww (wound) mutex selftests\n"); + + ret = run_tests(&ww_class); if (ret) return ret; - ret = stress(2046, hweight32(STRESS_ALL)*ncpus, STRESS_ALL); + pr_info("Beginning ww (die) mutex selftests\n"); + ret = run_tests(&wd_class); if (ret) return ret; - printk(KERN_INFO "All ww mutex selftests passed\n"); + pr_info("All ww mutex selftests passed\n"); return 0; } +static DEFINE_MUTEX(run_lock); + +static ssize_t run_tests_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + if (!mutex_trylock(&run_lock)) { + pr_err("Test already running\n"); + return count; + } + + run_test_classes(); + mutex_unlock(&run_lock); + + return count; +} + +static struct kobj_attribute run_tests_attribute = + __ATTR(run_tests, 0664, NULL, run_tests_store); + +static struct attribute *attrs[] = { + &run_tests_attribute.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct attribute_group attr_group = { + .attrs = attrs, +}; + +static struct kobject *test_ww_mutex_kobj; + +static int __init test_ww_mutex_init(void) +{ + int ret; + + prandom_seed_state(&rng, get_random_u64()); + + wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); + if (!wq) + return -ENOMEM; + + test_ww_mutex_kobj = kobject_create_and_add("test_ww_mutex", kernel_kobj); + if (!test_ww_mutex_kobj) { + destroy_workqueue(wq); + return -ENOMEM; + } + + /* Create the files associated with this kobject */ + ret = sysfs_create_group(test_ww_mutex_kobj, &attr_group); + if (ret) { + kobject_put(test_ww_mutex_kobj); + destroy_workqueue(wq); + return ret; + } + + mutex_lock(&run_lock); + ret = run_test_classes(); + mutex_unlock(&run_lock); + + return ret; +} + static void __exit test_ww_mutex_exit(void) { + kobject_put(test_ww_mutex_kobj); destroy_workqueue(wq); } |
