diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-07-01 18:16:30 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-09-07 16:27:26 -0700 |
commit | 52db30ab23b6d00cf80b22a510c4ea4be4458031 (patch) | |
tree | 98fdbf7de60a00768af1c62662f6f3e1e9ec6b27 /kernel/rcu | |
parent | f1a828f5fa3537456c417a81ad534c14022c268c (diff) |
rcu: Add stall-warning checks for RCU-tasks
This commit adds a ten-minute RCU-tasks stall warning. The actual
time is controlled by the boot/sysfs parameter rcu_task_stall_timeout,
with values less than or equal to zero disabling the stall warnings.
The default value is ten minutes, which means that the tasks that have
not yet responded will get their stacks dumped every ten minutes, until
they pass through a voluntary context switch.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/update.c | 29 |
1 files changed, 25 insertions, 4 deletions
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index aef8109152ce..bad7dbd4c2e3 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -371,7 +371,7 @@ static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); DEFINE_SRCU(tasks_rcu_exit_srcu); /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ -static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 3; +static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; module_param(rcu_task_stall_timeout, int, 0644); /* Post an RCU-tasks callback. */ @@ -445,8 +445,9 @@ void rcu_barrier_tasks(void) } EXPORT_SYMBOL_GPL(rcu_barrier_tasks); -/* See if the current task has stopped holding out, remove from list if so. */ -static void check_holdout_task(struct task_struct *t) +/* See if tasks are still holding out, complain if so. */ +static void check_holdout_task(struct task_struct *t, + bool needreport, bool *firstreport) { if (!ACCESS_ONCE(t->rcu_tasks_holdout) || t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || @@ -454,7 +455,15 @@ static void check_holdout_task(struct task_struct *t) ACCESS_ONCE(t->rcu_tasks_holdout) = false; list_del_rcu(&t->rcu_tasks_holdout_list); put_task_struct(t); + return; } + if (!needreport) + return; + if (*firstreport) { + pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); + *firstreport = false; + } + sched_show_task(t); } /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ @@ -462,6 +471,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) { unsigned long flags; struct task_struct *g, *t; + unsigned long lastreport; struct rcu_head *list; struct rcu_head *next; LIST_HEAD(rcu_tasks_holdouts); @@ -540,13 +550,24 @@ static int __noreturn rcu_tasks_kthread(void *arg) * of holdout tasks, removing any that are no longer * holdouts. When the list is empty, we are done. */ + lastreport = jiffies; while (!list_empty(&rcu_tasks_holdouts)) { + bool firstreport; + bool needreport; + int rtst; + schedule_timeout_interruptible(HZ); + rtst = ACCESS_ONCE(rcu_task_stall_timeout); + needreport = rtst > 0 && + time_after(jiffies, lastreport + rtst); + if (needreport) + lastreport = jiffies; + firstreport = true; WARN_ON(signal_pending(current)); rcu_read_lock(); list_for_each_entry_rcu(t, &rcu_tasks_holdouts, rcu_tasks_holdout_list) - check_holdout_task(t); + check_holdout_task(t, needreport, &firstreport); rcu_read_unlock(); } |