From cb4081cd4e0c9fd88d186553edcc78e1bbb96ce6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 1 Dec 2017 15:28:11 -0800 Subject: srcu: Abstract function name This commit moves to __func__ for function names in the name of better resilience to change. Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index d5cea81378cc..01f4a8bf0e03 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -386,7 +386,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp) flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || WARN_ON(srcu_readers_active(sp))) { - pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); + pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); return; /* Caller forgot to stop doing call_srcu()? */ } free_percpu(sp->sda); -- cgit v1.2.3 From a35d13ec3686612b17771bf1abcad75d2ebd42ef Mon Sep 17 00:00:00 2001 From: Ildar Ismagilov Date: Wed, 31 Jan 2018 22:39:53 +0300 Subject: srcu: Prevent sdp->srcu_gp_seq_needed_exp counter wrap SRCU checks each srcu_data structure's grace-period number for counter wrap four times per cycle by default. This frequency guarantees that normal comparisons will detect potential wrap. However, the expedited grace-period number is not checked. The consquences are not too horrible (a failure to expedite a grace period when requested), but it would be good to avoid such things. This commit therefore adds this check to the expedited grace-period number. Signed-off-by: Ildar Ismagilov Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/rcu') diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 01f4a8bf0e03..bb9607baad9b 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -579,6 +579,9 @@ static void srcu_gp_end(struct srcu_struct *sp) if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100)) sdp->srcu_gp_seq_needed = gpseq; + if (ULONG_CMP_GE(gpseq, + sdp->srcu_gp_seq_needed_exp + 100)) + sdp->srcu_gp_seq_needed_exp = gpseq; spin_unlock_irqrestore_rcu_node(sdp, flags); } } -- cgit v1.2.3 From 8ddbd8832d25463a996a1075c35f312fb43c86b1 Mon Sep 17 00:00:00 2001 From: Ildar Ismagilov Date: Wed, 31 Jan 2018 22:42:21 +0300 Subject: srcu: Reduce scans of srcu_data in counter wrap check Currently, given a multi-level srcu_node tree, SRCU can scan the full set of srcu_data structures at each level when cleaning up after a grace period. This, though harmless otherwise, represents pointless overhead. This commit therefore eliminates this overhead by scanning the srcu_data structures only when traversing the leaf srcu_node structures. Signed-off-by: Ildar Ismagilov Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index bb9607baad9b..d582a2352cdb 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -527,6 +527,7 @@ static void srcu_gp_end(struct srcu_struct *sp) { unsigned long cbdelay; bool cbs; + bool last_lvl; int cpu; unsigned long flags; unsigned long gpseq; @@ -559,7 +560,8 @@ static void srcu_gp_end(struct srcu_struct *sp) rcu_for_each_node_breadth_first(sp, snp) { spin_lock_irq_rcu_node(snp); cbs = false; - if (snp >= sp->level[rcu_num_lvls - 1]) + last_lvl = snp >= sp->level[rcu_num_lvls - 1]; + if (last_lvl) cbs = snp->srcu_have_cbs[idx] == gpseq; snp->srcu_have_cbs[idx] = gpseq; rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); @@ -572,7 +574,7 @@ static void srcu_gp_end(struct srcu_struct *sp) srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); /* Occasionally prevent srcu_data counter wrap. */ - if (!(gpseq & counter_wrap_check)) + if (!(gpseq & counter_wrap_check) && last_lvl) for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { sdp = per_cpu_ptr(sp->sda, cpu); spin_lock_irqsave_rcu_node(sdp, flags); -- cgit v1.2.3 From a72da917f1861ed06f8824328b1b3ecd003a5b5b Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Wed, 14 Feb 2018 18:05:24 +0900 Subject: srcu: Remove dead code in srcu_gp_end() Of course, compilers will optimize out a dead code. Anyway, remove any dead code for better readibility. Signed-off-by: Byungchul Park Reviewed-by: Steven Rostedt (VMware) Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index d582a2352cdb..f962f4428c39 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -532,7 +532,6 @@ static void srcu_gp_end(struct srcu_struct *sp) unsigned long flags; unsigned long gpseq; int idx; - int idxnext; unsigned long mask; struct srcu_data *sdp; struct srcu_node *snp; @@ -556,7 +555,6 @@ static void srcu_gp_end(struct srcu_struct *sp) /* Initiate callback invocation as needed. */ idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); - idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); rcu_for_each_node_breadth_first(sp, snp) { spin_lock_irq_rcu_node(snp); cbs = false; -- cgit v1.2.3 From 6308f3477510e15391ac82deaee66f8425339014 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 14 Feb 2018 14:20:45 -0800 Subject: rcu: Remove SRCU throttling The code in srcu_gp_end() inserts a delay every 0x3ff grace periods in order to prevent SRCU grace-period work from consuming an entire CPU when there is a long sequence of expedited SRCU grace-period requests. However, all of SRCU's grace-period work is carried out in workqueues, which are in turn within kthreads, which are automatically throttled as needed by the scheduler. In particular, if there is plenty of idle time, there is no point in throttling. This commit therefore removes the expedited SRCU grace-period throttling. Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index f962f4428c39..e2796f79a597 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -596,9 +596,7 @@ static void srcu_gp_end(struct srcu_struct *sp) ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { srcu_gp_start(sp); spin_unlock_irq_rcu_node(sp); - /* Throttle expedited grace periods: Should be rare! */ - srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff - ? 0 : SRCU_INTERVAL); + srcu_reschedule(sp, 0); } else { spin_unlock_irq_rcu_node(sp); } -- cgit v1.2.3 From 68a675d433fbaa8a39aa9a63695da68192b70e3c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 1 Dec 2017 14:26:56 -0800 Subject: rcutorture: Replace multi-instance kzalloc() with kcalloc() This commit replaces array-allocation calls to kzalloc() with equivalent calls to kcalloc(). Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 308e6fdbced8..b4a201dcbc55 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1557,11 +1557,10 @@ static int rcu_torture_barrier_init(void) atomic_set(&barrier_cbs_count, 0); atomic_set(&barrier_cbs_invoked, 0); barrier_cbs_tasks = - kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]), + kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), GFP_KERNEL); barrier_cbs_wq = - kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]), - GFP_KERNEL); + kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) return -ENOMEM; for (i = 0; i < n_barrier_cbs; i++) { @@ -1799,7 +1798,7 @@ rcu_torture_init(void) if (firsterr) goto unwind; if (nfakewriters > 0) { - fakewriter_tasks = kzalloc(nfakewriters * + fakewriter_tasks = kcalloc(nfakewriters, sizeof(fakewriter_tasks[0]), GFP_KERNEL); if (fakewriter_tasks == NULL) { @@ -1814,7 +1813,7 @@ rcu_torture_init(void) if (firsterr) goto unwind; } - reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), + reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), GFP_KERNEL); if (reader_tasks == NULL) { VERBOSE_TOROUT_ERRSTRING("out of memory"); -- cgit v1.2.3 From e0d31a34c6db6381bfc630a9ee93f05b9447dcc3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 1 Dec 2017 15:22:38 -0800 Subject: rcutorture: Abstract function and module names This commit moves to __func__ for function names and for KBUILD_MODNAME for module names, all in the name of better resilience to change. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index b4a201dcbc55..0f94025c672a 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -924,19 +924,19 @@ rcu_torture_writer(void *arg) if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) synctype[nsynctypes++] = RTWS_COND_GET; else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) - pr_alert("rcu_torture_writer: gp_cond without primitives.\n"); + pr_alert("%s: gp_cond without primitives.\n", __func__); if (gp_exp1 && cur_ops->exp_sync) synctype[nsynctypes++] = RTWS_EXP_SYNC; else if (gp_exp && !cur_ops->exp_sync) - pr_alert("rcu_torture_writer: gp_exp without primitives.\n"); + pr_alert("%s: gp_exp without primitives.\n", __func__); if (gp_normal1 && cur_ops->deferred_free) synctype[nsynctypes++] = RTWS_DEF_FREE; else if (gp_normal && !cur_ops->deferred_free) - pr_alert("rcu_torture_writer: gp_normal without primitives.\n"); + pr_alert("%s: gp_normal without primitives.\n", __func__); if (gp_sync1 && cur_ops->sync) synctype[nsynctypes++] = RTWS_SYNC; else if (gp_sync && !cur_ops->sync) - pr_alert("rcu_torture_writer: gp_sync without primitives.\n"); + pr_alert("%s: gp_sync without primitives.\n", __func__); if (WARN_ONCE(nsynctypes == 0, "rcu_torture_writer: No update-side primitives.\n")) { /* @@ -1673,7 +1673,7 @@ static void rcu_torture_err_cb(struct rcu_head *rhp) * next grace period. Unlikely, but can happen. If it * does happen, the debug-objects subsystem won't have splatted. */ - pr_alert("rcutorture: duplicated callback was invoked.\n"); + pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); } #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ @@ -1690,7 +1690,7 @@ static void rcu_test_debug_objects(void) init_rcu_head_on_stack(&rh1); init_rcu_head_on_stack(&rh2); - pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n"); + pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); /* Try to queue the rh2 pair of callbacks for the same grace period. */ preempt_disable(); /* Prevent preemption from interrupting test. */ @@ -1705,11 +1705,11 @@ static void rcu_test_debug_objects(void) /* Wait for them all to get done so we can safely return. */ rcu_barrier(); - pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n"); + pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); destroy_rcu_head_on_stack(&rh1); destroy_rcu_head_on_stack(&rh2); #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ - pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n"); + pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ } -- cgit v1.2.3 From eb0339934f1d468ff09d9be1c608c89cb1da850b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 8 Dec 2017 10:48:41 -0800 Subject: rcutorture: Avoid fake-writer use of undefined primitives Currently the rcu_torture_fakewriter() function invokes cur_ops->sync() and cur_ops->exp_sync() without first checking to see if they are in fact non-NULL. This results in kernel NULL pointer dereferences when testing RCU implementations that choose not to provide the full set of primitives. Given that it is perfectly reasonable to have specialized RCU implementations that provide only a subset of the RCU API, this is a bug in rcutorture. This commit therefore makes rcu_torture_fakewriter() check function pointers before invoking them, thus allowing it to test subsetted RCU implementations. Reported-by: Lihao Liang Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 0f94025c672a..6c46cd1d8fd7 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1045,13 +1045,13 @@ rcu_torture_fakewriter(void *arg) torture_random(&rand) % (nfakewriters * 8) == 0) { cur_ops->cb_barrier(); } else if (gp_normal == gp_exp) { - if (torture_random(&rand) & 0x80) + if (cur_ops->sync && torture_random(&rand) & 0x80) cur_ops->sync(); - else + else if (cur_ops->exp_sync) cur_ops->exp_sync(); - } else if (gp_normal) { + } else if (gp_normal && cur_ops->sync) { cur_ops->sync(); - } else { + } else if (cur_ops->exp_sync) { cur_ops->exp_sync(); } stutter_wait("rcu_torture_fakewriter"); -- cgit v1.2.3 From f7c0e6ad4bb1ab1cf358b8815f7d0ba7ff94d786 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 8 Dec 2017 11:37:24 -0800 Subject: rcutorture: Re-enable testing of dynamic expediting During boot, normal grace periods are processed as expedited. When rcutorture is built into the kernel, it starts during boot and thus detects that normal grace periods are unconditionally expedited. Therefore, rcutorture concludes that there is no point in trying to dynamically enable expediting, do it disables this aspect of testing, which is a bit of an overreaction to the temporary boot-time expediting. This commit therefore rechecks forced expediting throughout the test, enabling dynamic expediting if normal grace periods are processed normally at any point. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 6c46cd1d8fd7..2964b9236ddc 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -909,14 +909,10 @@ rcu_torture_writer(void *arg) int nsynctypes = 0; VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); - if (!can_expedite) { + if (!can_expedite) pr_alert("%s" TORTURE_FLAG - " GP expediting controlled from boot/sysfs for %s,\n", + " GP expediting controlled from boot/sysfs for %s.\n", torture_type, cur_ops->name); - pr_alert("%s" TORTURE_FLAG - " Disabled dynamic grace-period expediting.\n", - torture_type); - } /* Initialize synctype[] array. If none set, take default. */ if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) @@ -1011,6 +1007,9 @@ rcu_torture_writer(void *arg) rcu_unexpedite_gp(); if (++expediting > 3) expediting = -expediting; + } else if (!can_expedite) { /* Disabled during boot, recheck. */ + can_expedite = !rcu_gp_is_expedited() && + !rcu_gp_is_normal(); } rcu_torture_writer_state = RTWS_STUTTER; stutter_wait("rcu_torture_writer"); @@ -1021,6 +1020,10 @@ rcu_torture_writer(void *arg) while (can_expedite && expediting++ < 0) rcu_unexpedite_gp(); WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); + if (!can_expedite) + pr_alert("%s" TORTURE_FLAG + " Dynamic grace-period expediting was disabled.\n", + torture_type); rcu_torture_writer_state = RTWS_STOPPING; torture_kthread_stopping("rcu_torture_writer"); return 0; -- cgit v1.2.3 From db0c1a8aba31edd1432f3a5925a32107065b5568 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 8 Dec 2017 12:23:10 -0800 Subject: rcutorture: Record which grace-period primitives are tested The rcu_torture_writer() function adapts to requested testing from module parameters as well as the function pointers in the structure referenced by cur_ops. However, as long as the module parameters do not conflict with the function pointers, this adaptation is silent. This silence can result in confusion as to exactly what was tested, which could in turn result in untested RCU code making its way into mainline. This commit therefore makes rcu_torture_writer() announce exactly which portions of RCU's API it ends up testing. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 2964b9236ddc..680c96d8c00f 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -917,22 +917,30 @@ rcu_torture_writer(void *arg) /* Initialize synctype[] array. If none set, take default. */ if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; - if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) + if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { synctype[nsynctypes++] = RTWS_COND_GET; - else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) + pr_info("%s: Testing conditional GPs.\n", __func__); + } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { pr_alert("%s: gp_cond without primitives.\n", __func__); - if (gp_exp1 && cur_ops->exp_sync) + } + if (gp_exp1 && cur_ops->exp_sync) { synctype[nsynctypes++] = RTWS_EXP_SYNC; - else if (gp_exp && !cur_ops->exp_sync) + pr_info("%s: Testing expedited GPs.\n", __func__); + } else if (gp_exp && !cur_ops->exp_sync) { pr_alert("%s: gp_exp without primitives.\n", __func__); - if (gp_normal1 && cur_ops->deferred_free) + } + if (gp_normal1 && cur_ops->deferred_free) { synctype[nsynctypes++] = RTWS_DEF_FREE; - else if (gp_normal && !cur_ops->deferred_free) + pr_info("%s: Testing asynchronous GPs.\n", __func__); + } else if (gp_normal && !cur_ops->deferred_free) { pr_alert("%s: gp_normal without primitives.\n", __func__); - if (gp_sync1 && cur_ops->sync) + } + if (gp_sync1 && cur_ops->sync) { synctype[nsynctypes++] = RTWS_SYNC; - else if (gp_sync && !cur_ops->sync) + pr_info("%s: Testing normal GPs.\n", __func__); + } else if (gp_sync && !cur_ops->sync) { pr_alert("%s: gp_sync without primitives.\n", __func__); + } if (WARN_ONCE(nsynctypes == 0, "rcu_torture_writer: No update-side primitives.\n")) { /* -- cgit v1.2.3 From 85ba6bfe8bb2a4d907f7380a8f37b31616ad694e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 1 Feb 2018 19:19:04 -0800 Subject: torture: Provide more sensible nreader/nwriter defaults for rcuperf The default values for nreader and nwriter are apparently not all that user-friendly, resulting in people doing scalability tests that ran all runs at large scale. This commit therefore makes both the nreaders and nwriters module default to the number of CPUs, and adds a comment to rcuperf.c stating that the number of CPUs should be specified using the nr_cpus kernel boot parameter. This commit also eliminates the redundant rcuperf scripting specification of default values for these parameters. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuperf.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'kernel/rcu') diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index d1ebdf9868bb..777e7a6a0292 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -61,11 +61,30 @@ MODULE_AUTHOR("Paul E. McKenney "); #define VERBOSE_PERFOUT_ERRSTRING(s) \ do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0) +/* + * The intended use cases for the nreaders and nwriters module parameters + * are as follows: + * + * 1. Specify only the nr_cpus kernel boot parameter. This will + * set both nreaders and nwriters to the value specified by + * nr_cpus for a mixed reader/writer test. + * + * 2. Specify the nr_cpus kernel boot parameter, but set + * rcuperf.nreaders to zero. This will set nwriters to the + * value specified by nr_cpus for an update-only test. + * + * 3. Specify the nr_cpus kernel boot parameter, but set + * rcuperf.nwriters to zero. This will set nreaders to the + * value specified by nr_cpus for a read-only test. + * + * Various other use cases may of course be specified. + */ + torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader"); torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); -torture_param(int, nreaders, 0, "Number of RCU reader threads"); +torture_param(int, nreaders, -1, "Number of RCU reader threads"); torture_param(int, nwriters, -1, "Number of RCU updater threads"); torture_param(bool, shutdown, !IS_ENABLED(MODULE), "Shutdown at end of performance tests."); -- cgit v1.2.3