summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS10
-rw-r--r--include/linux/srcu.h15
-rw-r--r--include/linux/srcutree.h2
-rw-r--r--kernel/rcu/refscale.c40
-rw-r--r--kernel/rcu/srcutree.c4
-rw-r--r--kernel/rcu/tree.c3
-rw-r--r--kernel/rcu/tree_exp.h72
-rw-r--r--kernel/rcu/tree_plugin.h1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote.sh25
9 files changed, 124 insertions, 48 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 17daa9ee9384..6ff7ba6cc270 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13301,7 +13301,7 @@ L: linux-kernel@vger.kernel.org
L: linux-arch@vger.kernel.org
L: lkmm@lists.linux.dev
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
F: Documentation/atomic_bitops.txt
F: Documentation/atomic_t.txt
F: Documentation/core-api/refcount-vs-atomic.rst
@@ -19603,7 +19603,7 @@ R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
L: rcu@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
F: tools/testing/selftests/rcutorture
RDACM20 Camera Sensor
@@ -19682,7 +19682,7 @@ R: Zqiang <qiang.zhang1211@gmail.com>
L: rcu@vger.kernel.org
S: Supported
W: http://www.rdrop.com/users/paulmck/RCU/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
F: Documentation/RCU/
F: include/linux/rcu*
F: kernel/rcu/
@@ -21578,7 +21578,7 @@ R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
L: rcu@vger.kernel.org
S: Supported
W: http://www.rdrop.com/users/paulmck/RCU/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
F: include/linux/srcu*.h
F: kernel/rcu/srcu*.c
@@ -23707,7 +23707,7 @@ M: "Paul E. McKenney" <paulmck@kernel.org>
M: Josh Triplett <josh@joshtriplett.org>
L: linux-kernel@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rcu/linux.git rcu/dev
F: Documentation/RCU/torture.rst
F: kernel/locking/locktorture.c
F: kernel/rcu/rcuscale.c
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index da8224d0f71c..d7ba46e74f58 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -238,13 +238,14 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited().
*
- * The return value from srcu_read_lock() must be passed unaltered
- * to the matching srcu_read_unlock(). Note that srcu_read_lock() and
- * the matching srcu_read_unlock() must occur in the same context, for
- * example, it is illegal to invoke srcu_read_unlock() in an irq handler
- * if the matching srcu_read_lock() was invoked in process context. Or,
- * for that matter to invoke srcu_read_unlock() from one task and the
- * matching srcu_read_lock() from another.
+ * The return value from srcu_read_lock() is guaranteed to be
+ * non-negative. This value must be passed unaltered to the matching
+ * srcu_read_unlock(). Note that srcu_read_lock() and the matching
+ * srcu_read_unlock() must occur in the same context, for example, it is
+ * illegal to invoke srcu_read_unlock() in an irq handler if the matching
+ * srcu_read_lock() was invoked in process context. Or, for that matter to
+ * invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
+ * from another.
*/
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 80016bbed672..b17814c9d1c7 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -254,7 +254,7 @@ static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp)
if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE))
return;
- // Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered.
+ // Note that the cmpxchg() in __srcu_check_read_flavor() is fully ordered.
__srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE);
}
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index aacfcc9838b3..1b47376acdc4 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/torture.h>
#include <linux/types.h>
+#include <linux/sched/clock.h>
#include "rcu.h"
@@ -531,6 +532,39 @@ static const struct ref_scale_ops acqrel_ops = {
static volatile u64 stopopts;
+static void ref_sched_clock_section(const int nloops)
+{
+ u64 x = 0;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--)
+ x += sched_clock();
+ preempt_enable();
+ stopopts = x;
+}
+
+static void ref_sched_clock_delay_section(const int nloops, const int udl, const int ndl)
+{
+ u64 x = 0;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ x += sched_clock();
+ un_delay(udl, ndl);
+ }
+ preempt_enable();
+ stopopts = x;
+}
+
+static const struct ref_scale_ops sched_clock_ops = {
+ .readsection = ref_sched_clock_section,
+ .delaysection = ref_sched_clock_delay_section,
+ .name = "sched-clock"
+};
+
+
static void ref_clock_section(const int nloops)
{
u64 x = 0;
@@ -1130,9 +1164,9 @@ ref_scale_init(void)
int firsterr = 0;
static const struct ref_scale_ops *scale_ops[] = {
&rcu_ops, &srcu_ops, &srcu_lite_ops, RCU_TRACE_OPS RCU_TASKS_OPS
- &refcnt_ops, &rwlock_ops, &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops,
- &clock_ops, &jiffies_ops, &typesafe_ref_ops, &typesafe_lock_ops,
- &typesafe_seqlock_ops,
+ &refcnt_ops, &rwlock_ops, &rwsem_ops, &lock_ops, &lock_irq_ops,
+ &acqrel_ops, &sched_clock_ops, &clock_ops, &jiffies_ops,
+ &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops,
};
if (!torture_init_begin(scale_type, verbose))
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 5e2e53464794..b83c74c4dcc0 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -738,7 +738,8 @@ EXPORT_SYMBOL_GPL(__srcu_check_read_flavor);
/*
* Counts the new reader in the appropriate per-CPU element of the
* srcu_struct.
- * Returns an index that must be passed to the matching srcu_read_unlock().
+ * Returns a guaranteed non-negative index that must be passed to the
+ * matching __srcu_read_unlock().
*/
int __srcu_read_lock(struct srcu_struct *ssp)
{
@@ -1076,7 +1077,6 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
/* If grace period not already in progress, start it. */
if (!WARN_ON_ONCE(rcu_seq_done(&sup->srcu_gp_seq, s)) &&
rcu_seq_state(sup->srcu_gp_seq) == SRCU_STATE_IDLE) {
- WARN_ON_ONCE(ULONG_CMP_GE(sup->srcu_gp_seq, sup->srcu_gp_seq_needed));
srcu_gp_start(ssp);
// And how can that list_add() in the "else" clause
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index ff98233d4aa5..24f1cb292a92 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3084,8 +3084,11 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
head->func = func;
head->next = NULL;
kasan_record_aux_stack_noalloc(head);
+
local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data);
+ RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!");
+
lazy = lazy_in && !rcu_async_should_hurry();
/* Add the callback to our list. */
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index fb664d3a01c9..77efed89c79e 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -227,16 +227,16 @@ static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
/*
* Report expedited quiescent state for multiple CPUs, all covered by the
- * specified leaf rcu_node structure.
+ * specified leaf rcu_node structure, which is acquired by the caller.
*/
-static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
+static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, unsigned long flags,
unsigned long mask, bool wake)
+ __releases(rnp->lock)
{
int cpu;
- unsigned long flags;
struct rcu_data *rdp;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ raw_lockdep_assert_held_rcu_node(rnp);
if (!(rnp->expmask & mask)) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
@@ -257,8 +257,13 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
*/
static void rcu_report_exp_rdp(struct rcu_data *rdp)
{
+ unsigned long flags;
+ struct rcu_node *rnp = rdp->mynode;
+
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
- rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
+ ASSERT_EXCLUSIVE_WRITER(rdp->cpu_no_qs.b.exp);
+ rcu_report_exp_cpu_mult(rnp, flags, rdp->grpmask, true);
}
/* Common code for work-done checking. */
@@ -432,8 +437,10 @@ retry_ipi:
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
/* Report quiescent states for those that went offline. */
- if (mask_ofl_test)
- rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
+ if (mask_ofl_test) {
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rcu_report_exp_cpu_mult(rnp, flags, mask_ofl_test, false);
+ }
}
static void rcu_exp_sel_wait_wake(unsigned long s);
@@ -712,6 +719,18 @@ static void rcu_exp_sel_wait_wake(unsigned long s)
rcu_exp_wait_wake(s);
}
+/* Request an expedited quiescent state. */
+static void rcu_exp_need_qs(void)
+{
+ lockdep_assert_irqs_disabled();
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(*this_cpu_ptr(&rcu_data.cpu_no_qs.b.exp));
+ __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
+ /* Store .exp before .rcu_urgent_qs. */
+ smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+}
+
#ifdef CONFIG_PREEMPT_RCU
/*
@@ -730,24 +749,34 @@ static void rcu_exp_handler(void *unused)
struct task_struct *t = current;
/*
- * First, the common case of not being in an RCU read-side
+ * First, is there no need for a quiescent state from this CPU,
+ * or is this CPU already looking for a quiescent state for the
+ * current grace period? If either is the case, just leave.
+ * However, this should not happen due to the preemptible
+ * sync_sched_exp_online_cleanup() implementation being a no-op,
+ * so warn if this does happen.
+ */
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
+ if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
+ READ_ONCE(rdp->cpu_no_qs.b.exp)))
+ return;
+
+ /*
+ * Second, the common case of not being in an RCU read-side
* critical section. If also enabled or idle, immediately
* report the quiescent state, otherwise defer.
*/
if (!depth) {
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
- rcu_is_cpu_rrupt_from_idle()) {
+ rcu_is_cpu_rrupt_from_idle())
rcu_report_exp_rdp(rdp);
- } else {
- WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
- set_tsk_need_resched(t);
- set_preempt_need_resched();
- }
+ else
+ rcu_exp_need_qs();
return;
}
/*
- * Second, the less-common case of being in an RCU read-side
+ * Third, the less-common case of being in an RCU read-side
* critical section. In this case we can count on a future
* rcu_read_unlock(). However, this rcu_read_unlock() might
* execute on some other CPU, but in that case there will be
@@ -768,7 +797,7 @@ static void rcu_exp_handler(void *unused)
return;
}
- // Finally, negative nesting depth should not happen.
+ // Fourth and finally, negative nesting depth should not happen.
WARN_ON_ONCE(1);
}
@@ -835,16 +864,6 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
#else /* #ifdef CONFIG_PREEMPT_RCU */
-/* Request an expedited quiescent state. */
-static void rcu_exp_need_qs(void)
-{
- __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
- /* Store .exp before .rcu_urgent_qs. */
- smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
- set_tsk_need_resched(current);
- set_preempt_need_resched();
-}
-
/* Invoked on each online non-idle CPU for expedited quiescent state. */
static void rcu_exp_handler(void *unused)
{
@@ -852,6 +871,7 @@ static void rcu_exp_handler(void *unused)
struct rcu_node *rnp = rdp->mynode;
bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
return;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 3927ea5f7955..bb7ca6eb9ef0 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -275,6 +275,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
rcu_report_exp_rdp(rdp);
else
WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
}
/*
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
index 134cdef5a6e0..48a8052d5dae 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
@@ -181,10 +181,11 @@ done
# Function to check for presence of a file on the specified system.
# Complain if the system cannot be reached, and retry after a wait.
-# Currently just waits forever if a machine disappears.
+# Currently just waits 15 minutes if a machine disappears.
#
# Usage: checkremotefile system pathname
checkremotefile () {
+ local nsshfails=0
local ret
local sleeptime=60
@@ -195,6 +196,11 @@ checkremotefile () {
if test "$ret" -eq 255
then
echo " ---" ssh failure to $1 checking for file $2, retry after $sleeptime seconds. `date` | tee -a "$oldrun/remote-log"
+ nsshfails=$((nsshfails+1))
+ if ((nsshfails > 15))
+ then
+ return 255
+ fi
elif test "$ret" -eq 0
then
return 0
@@ -268,12 +274,23 @@ echo All batches started. `date` | tee -a "$oldrun/remote-log"
for i in $systems
do
echo " ---" Waiting for $i `date` | tee -a "$oldrun/remote-log"
- while checkremotefile "$i" "$resdir/$ds/remote.run"
+ while :
do
+ checkremotefile "$i" "$resdir/$ds/remote.run"
+ ret=$?
+ if test "$ret" -eq 1
+ then
+ echo " ---" Collecting results from $i `date` | tee -a "$oldrun/remote-log"
+ ( cd "$oldrun"; ssh -o BatchMode=yes $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
+ break;
+ fi
+ if test "$ret" -eq 255
+ then
+ echo System $i persistent ssh failure, lost results `date` | tee -a "$oldrun/remote-log"
+ break;
+ fi
sleep 30
done
- echo " ---" Collecting results from $i `date` | tee -a "$oldrun/remote-log"
- ( cd "$oldrun"; ssh -o BatchMode=yes $i "cd $rundir; tar -czf - kvm-remote-*.sh.out */console.log */kvm-test-1-run*.sh.out */qemu[_-]pid */qemu-retval */qemu-affinity; rm -rf $T > /dev/null 2>&1" | tar -xzf - )
done
( kvm-end-run-stats.sh "$oldrun" "$starttime"; echo $? > $T/exitcode ) | tee -a "$oldrun/remote-log"