summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/time/timekeeping.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 523670ec0d2e..568ba1ffba0b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -131,9 +131,11 @@ static struct tk_fast tk_fast_raw ____cacheline_aligned = {
#ifdef CONFIG_POSIX_AUX_CLOCKS
static __init void tk_aux_setup(void);
static void tk_aux_update_clocksource(void);
+static void tk_aux_advance(void);
#else
static inline void tk_aux_setup(void) { }
static inline void tk_aux_update_clocksource(void) { }
+static inline void tk_aux_advance(void) { }
#endif
unsigned long timekeeper_lock_irqsave(void)
@@ -2317,11 +2319,13 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
+ * It also updates the enabled auxiliary clock timekeepers
*/
void update_wall_time(void)
{
if (timekeeping_advance(TK_ADV_TICK))
clock_was_set_delayed();
+ tk_aux_advance();
}
/**
@@ -2764,6 +2768,21 @@ static void tk_aux_update_clocksource(void)
}
}
+static void tk_aux_advance(void)
+{
+ unsigned long active = READ_ONCE(aux_timekeepers);
+ unsigned int id;
+
+ /* Lockless quick check to avoid extra cache lines */
+ for_each_set_bit(id, &active, BITS_PER_LONG) {
+ struct tk_data *aux_tkd = &timekeeper_data[id + TIMEKEEPER_AUX_FIRST];
+
+ guard(raw_spinlock)(&aux_tkd->lock);
+ if (aux_tkd->shadow_timekeeper.clock_valid)
+ __timekeeping_advance(aux_tkd, TK_ADV_TICK);
+ }
+}
+
/**
* ktime_get_aux - Get time for a AUX clock
* @id: ID of the clock to read (CLOCK_AUX...)