summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2025-02-10 09:29:35 -0800
committerNeeraj Upadhyay (AMD) <neeraj.upadhyay@kernel.org>2025-06-25 08:39:01 +0530
commit93856948be8f5d784a28cfb2eb16884a5522faaa (patch)
treee8dc382f98fd24232070182016c6e8b5e5a48b1d
parent62d92c9b87db43cc255411fe2827b9de42fa0523 (diff)
rcutorture: Check for ->up_read() without matching ->down_read()
This commit creates counters in the rcu_torture_one_read_state_updown structure that check for a call to ->up_read() that lacks a matching call to ->down_read(). While in the area, add end-of-run cleanup code that prevents calls to rcu_torture_updown_hrt() from happening after the test has moved on. Yes, the srcu_barrier() at the end of the test will wait for them, but this could result in confusing states, statistics, and diagnostic information. So explicitly wait for them before we get to the end-of-test output. [ paulmck: Apply kernel test robot feedback. ] [ joel: Apply Boqun's fix for counter increment ordering. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Tested-by: kernel test robot <oliver.sang@intel.com> Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com> Signed-off-by: Neeraj Upadhyay (AMD) <neeraj.upadhyay@kernel.org>
-rw-r--r--kernel/rcu/rcutorture.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 22288efc8c67..f773d4f8f2ae 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -2449,6 +2449,8 @@ struct rcu_torture_one_read_state_updown {
bool rtorsu_inuse;
ktime_t rtorsu_kt;
unsigned long rtorsu_j;
+ unsigned long rtorsu_ndowns;
+ unsigned long rtorsu_nups;
struct torture_random_state rtorsu_trs;
struct rcu_torture_one_read_state rtorsu_rtors;
};
@@ -2463,6 +2465,8 @@ static enum hrtimer_restart rcu_torture_updown_hrt(struct hrtimer *hrtp)
rtorsup = container_of(hrtp, struct rcu_torture_one_read_state_updown, rtorsu_hrt);
rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1);
+ WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
+ rtorsup->rtorsu_nups++;
smp_store_release(&rtorsup->rtorsu_inuse, false);
return HRTIMER_NORESTART;
}
@@ -2507,8 +2511,12 @@ static void rcu_torture_updown_cleanup(void)
for (rtorsup = updownreaders; rtorsup < &updownreaders[n_up_down]; rtorsup++) {
if (!smp_load_acquire(&rtorsup->rtorsu_inuse))
continue;
- if (!hrtimer_cancel(&rtorsup->rtorsu_hrt))
- WARN_ON_ONCE(rtorsup->rtorsu_inuse);
+ if (hrtimer_cancel(&rtorsup->rtorsu_hrt) || WARN_ON_ONCE(rtorsup->rtorsu_inuse)) {
+ rcu_torture_one_read_end(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1);
+ WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
+ rtorsup->rtorsu_nups++;
+ smp_store_release(&rtorsup->rtorsu_inuse, false);
+ }
}
kfree(updownreaders);
@@ -2524,10 +2532,13 @@ static void rcu_torture_updown_one(struct rcu_torture_one_read_state_updown *rto
init_rcu_torture_one_read_state(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs);
rawidx = cur_ops->down_read();
+ rtorsup->rtorsu_ndowns++;
idx = (rawidx << RCUTORTURE_RDR_SHIFT_1) & RCUTORTURE_RDR_MASK_1;
rtorsup->rtorsu_rtors.readstate = idx | RCUTORTURE_RDR_UPDOWN;
rtorsup->rtorsu_rtors.rtrsp++;
if (!rcu_torture_one_read_start(&rtorsup->rtorsu_rtors, &rtorsup->rtorsu_trs, -1)) {
+ WARN_ONCE(rtorsup->rtorsu_nups >= rtorsup->rtorsu_ndowns, "%s: Up without matching down #%zu.\n", __func__, rtorsup - updownreaders);
+ rtorsup->rtorsu_nups++;
schedule_timeout_idle(HZ);
return;
}