summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/hrtimer.h3
-rw-r--r--kernel/hrtimer.c190
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/time/tick-sched.c1
-rw-r--r--kernel/watchdog.c1
6 files changed, 179 insertions, 18 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index e8b395d556b9..0e3708636ce0 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -111,6 +111,8 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
+ struct list_head cb_entry;
+ int irqsafe;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -147,6 +149,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
+ struct list_head expired;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 905e2cd2af72..1dd627ba799e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -589,8 +589,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
- * reprogramming is handled either by the softirq, which called the
- * callback or at the end of the hrtimer_interrupt.
+ * reprogramming is handled at the end of the hrtimer_interrupt.
*/
if (hrtimer_callback_running(timer))
return 0;
@@ -625,6 +624,9 @@ static int hrtimer_reprogram(struct hrtimer *timer,
return res;
}
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
+static int hrtimer_rt_defer(struct hrtimer *timer);
+
/*
* Initialize the high resolution related parts of cpu_base
*/
@@ -644,7 +646,29 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base,
int wakeup)
{
+#ifdef CONFIG_PREEMPT_RT_BASE
+again:
+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+ /*
+ * Move softirq based timers away from the rbtree in
+ * case it expired already. Otherwise we would have a
+ * stale base->first entry until the softirq runs.
+ */
+ if (!hrtimer_rt_defer(timer)) {
+ ktime_t now = ktime_get();
+
+ __run_hrtimer(timer, &now);
+ /*
+ * __run_hrtimer might have requeued timer and
+ * it could be base->first again.
+ */
+ if (&timer->node == base->active.next)
+ goto again;
+ return 1;
+ }
+#else
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+#endif
if (wakeup) {
raw_spin_unlock(&base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
@@ -733,6 +757,11 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
}
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
+static inline int hrtimer_reprogram(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ return 0;
+}
#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -864,9 +893,9 @@ void hrtimer_wait_for_timer(const struct hrtimer *timer)
{
struct hrtimer_clock_base *base = timer->base;
- if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base))
+ if (base && base->cpu_base && !timer->irqsafe)
wait_event(base->cpu_base->wait,
- !(timer->state & HRTIMER_STATE_CALLBACK));
+ !(timer->state & HRTIMER_STATE_CALLBACK));
}
#else
@@ -916,6 +945,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
+ if (unlikely(!list_empty(&timer->cb_entry))) {
+ list_del_init(&timer->cb_entry);
+ goto out;
+ }
+
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
@@ -1178,6 +1212,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
+ INIT_LIST_HEAD(&timer->cb_entry);
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
@@ -1261,10 +1296,118 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
-#ifdef CONFIG_HIGH_RES_TIMERS
-
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+#ifdef CONFIG_PREEMPT_RT_BASE
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
+ /*
+ * Note, we clear the callback flag before we requeue the
+ * timer otherwise we trigger the callback_running() check
+ * in hrtimer_reprogram().
+ */
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
+
+ if (restart != HRTIMER_NORESTART) {
+ BUG_ON(hrtimer_active(timer));
+ /*
+ * Enqueue the timer, if it's the leftmost timer then
+ * we need to reprogram it.
+ */
+ if (!enqueue_hrtimer(timer, base))
+ return;
+
+ if (hrtimer_reprogram(timer, base))
+ goto requeue;
+
+ } else if (hrtimer_active(timer)) {
+ /*
+ * If the timer was rearmed on another CPU, reprogram
+ * the event device.
+ */
+ if (&timer->node == base->active.next &&
+ hrtimer_reprogram(timer, base))
+ goto requeue;
+ }
+ return;
+
+requeue:
+ /*
+ * Timer is expired. Thus move it from tree to pending list
+ * again.
+ */
+ __remove_hrtimer(timer, base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &base->expired);
+}
+
+/*
+ * The changes in mainline which removed the callback modes from
+ * hrtimer are not yet working with -rt. The non wakeup_process()
+ * based callbacks which involve sleeping locks need to be treated
+ * seperately.
+ */
+static void hrtimer_rt_run_pending(void)
+{
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ struct hrtimer_cpu_base *cpu_base;
+ struct hrtimer_clock_base *base;
+ struct hrtimer *timer;
+ int index, restart;
+
+ local_irq_disable();
+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
+
+ raw_spin_lock(&cpu_base->lock);
+
+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+ base = &cpu_base->clock_base[index];
+
+ while (!list_empty(&base->expired)) {
+ timer = list_first_entry(&base->expired,
+ struct hrtimer, cb_entry);
+
+ /*
+ * Same as the above __run_hrtimer function
+ * just we run with interrupts enabled.
+ */
+ debug_hrtimer_deactivate(timer);
+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
+ timer_stats_account_hrtimer(timer);
+ fn = timer->function;
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+ restart = fn(timer);
+ raw_spin_lock_irq(&cpu_base->lock);
+
+ hrtimer_rt_reprogram(restart, timer, base);
+ }
+ }
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+
+ wake_up_timer_waiters(cpu_base);
+}
+
+static int hrtimer_rt_defer(struct hrtimer *timer)
+{
+ if (timer->irqsafe)
+ return 0;
+
+ __remove_hrtimer(timer, timer->base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &timer->base->expired);
+ return 1;
+}
+
+#else
+
+static inline void hrtimer_rt_run_pending(void) { }
+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
+
+#endif
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1273,7 +1416,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
- int i, retries = 0;
+ int i, retries = 0, raise = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
@@ -1340,7 +1483,10 @@ retry:
break;
}
- __run_hrtimer(timer, &basenow);
+ if (!hrtimer_rt_defer(timer))
+ __run_hrtimer(timer, &basenow);
+ else
+ raise = 1;
}
}
@@ -1355,6 +1501,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
+
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+
return;
}
@@ -1430,17 +1580,17 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
- hrtimer_peek_ahead_timers();
-}
-
#else /* CONFIG_HIGH_RES_TIMERS */
static inline void __hrtimer_peek_ahead_timers(void) { }
#endif /* !CONFIG_HIGH_RES_TIMERS */
+static void run_hrtimer_softirq(struct softirq_action *h)
+{
+ hrtimer_rt_run_pending();
+}
+
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
@@ -1473,7 +1623,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
- int index, gettime = 1;
+ int index, gettime = 1, raise = 0;
if (hrtimer_hres_active())
return;
@@ -1498,12 +1648,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- __run_hrtimer(timer, &base->softirq_time);
+ if (!hrtimer_rt_defer(timer))
+ __run_hrtimer(timer, &base->softirq_time);
+ else
+ raise = 1;
}
raw_spin_unlock(&cpu_base->lock);
}
- wake_up_timer_waiters(cpu_base);
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*
@@ -1525,6 +1679,7 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
+ sl->timer.irqsafe = 1;
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -1663,6 +1818,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
}
hrtimer_init_hres(cpu_base);
@@ -1781,9 +1937,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
}
/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df06ea68e2b8..65c44bf92679 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -480,6 +480,7 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
+ rq->hrtick_timer.irqsafe = 1;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 44af55e6d5d0..8bb9f00f8b49 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rt_b->rt_period_timer.irqsafe = 1;
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6d44f87f83ed..e94b6b33123a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -830,6 +830,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ts->sched_timer.irqsafe = 1;
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index de08263ff8ae..87192eb940c0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -441,6 +441,7 @@ static void watchdog_prepare_cpu(int cpu)
WARN_ON(per_cpu(softlockup_watchdog, cpu));
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
+ hrtimer->irqsafe = 1;
}
static int watchdog_enable(int cpu)