summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 17:15:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 17:15:18 -0700
commitfdbfee9fc56e13a1307868829d438ad66ab308a4 (patch)
tree228c066fa11b1fdf44cff5d2df75c597580e5993 /include
parent5ed19574ebf0ba857c8a0d3d80ee409ff9498363 (diff)
parent00f0dadde8c5036fe6462621a6920549036dce70 (diff)
Merge tag 'trace-rv-v7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull runtime verification updates from Steven Rostedt: - Refactor da_monitor header to share handlers across monitor types No functional changes, only less code duplication. - Add Hybrid Automata model class Add a new model class that extends deterministic automata by adding constraints on transitions and states. Those constraints can take into account wall-clock time and as such allow RV monitor to make assertions on real time. Add documentation and code generation scripts. - Add stall monitor as hybrid automaton example Add a monitor that triggers a violation when a task is stalling as an example of automaton working with real time variables. - Convert the opid monitor to a hybrid automaton The opid monitor can be heavily simplified if written as a hybrid automaton: instead of tracking preempt and interrupt enable/disable events, it can just run constraints on the preemption/interrupt states when events like wakeup and need_resched verify. - Add support for per-object monitors in DA/HA Allow writing deterministic and hybrid automata monitors for generic objects (e.g. any struct), by exploiting a hash table where objects are saved. This allows to track more than just tasks in RV. For instance it will be used to track deadline entities in deadline monitors. - Add deadline tracepoints and move some deadline utilities Prepare the ground for deadline monitors by defining events and exporting helpers. - Add nomiss deadline monitor Add first example of deadline monitor asserting all entities complete before their deadline. - Improve rvgen error handling Introduce AutomataError exception class and better handle expected exceptions while showing a backtrace for unexpected ones. - Improve python code quality in rvgen Refactor the rvgen generation scripts to align with python best practices: use f-strings instead of %, use len() instead of __len__(), remove semicolons, use context managers for file operations, fix whitespace violations, extract magic strings into constants, remove unused imports and methods. - Fix small bugs in rvgen The generator scripts presented some corner case bugs: logical error in validating what a correct dot file looks like, fix an isinstance() check, enforce a dot file has an initial state, fix type annotations and typos in comments. - rvgen refactoring Refactor automata.py to use iterator-based parsing and handle required arguments directly in argparse. - Allow epoll in rtapp-sleep monitor The epoll_wait call is now rt-friendly so it should be allowed in the sleep monitor as a valid sleep method. * tag 'trace-rv-v7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (32 commits) rv: Allow epoll in rtapp-sleep monitor rv/rvgen: fix _fill_states() return type annotation rv/rvgen: fix unbound loop variable warning rv/rvgen: enforce presence of initial state rv/rvgen: extract node marker string to class constant rv/rvgen: fix isinstance check in Variable.expand() rv/rvgen: make monitor arguments required in rvgen rv/rvgen: remove unused __get_main_name method rv/rvgen: remove unused sys import from dot2c rv/rvgen: refactor automata.py to use iterator-based parsing rv/rvgen: use class constant for init marker rv/rvgen: fix DOT file validation logic error rv/rvgen: fix PEP 8 whitespace violations rv/rvgen: fix typos in automata and generator docstring and comments rv/rvgen: use context managers for file operations rv/rvgen: remove unnecessary semicolons rv/rvgen: replace __len__() calls with len() rv/rvgen: replace % string formatting with f-strings rv/rvgen: remove bare except clauses in generator rv/rvgen: introduce AutomataError exception class ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/rv.h39
-rw-r--r--include/linux/sched/deadline.h27
-rw-r--r--include/rv/da_monitor.h644
-rw-r--r--include/rv/ha_monitor.h478
-rw-r--r--include/trace/events/sched.h26
5 files changed, 1050 insertions, 164 deletions
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 58774eb3aecf..541ba404926a 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -13,6 +13,7 @@
#define RV_MON_GLOBAL 0
#define RV_MON_PER_CPU 1
#define RV_MON_PER_TASK 2
+#define RV_MON_PER_OBJ 3
#ifdef CONFIG_RV
#include <linux/array_size.h>
@@ -81,11 +82,49 @@ struct ltl_monitor {};
#endif /* CONFIG_RV_LTL_MONITOR */
+#ifdef CONFIG_RV_HA_MONITOR
+/*
+ * In the future, hybrid automata may rely on multiple
+ * environment variables, e.g. different clocks started at
+ * different times or running at different speed.
+ * For now we support only 1 variable.
+ */
+#define MAX_HA_ENV_LEN 1
+
+/*
+ * Monitors can pick the preferred timer implementation:
+ * No timer: if monitors don't have state invariants.
+ * Timer wheel: lightweight invariants check but far less precise.
+ * Hrtimer: accurate invariants check with higher overhead.
+ */
+#define HA_TIMER_NONE 0
+#define HA_TIMER_WHEEL 1
+#define HA_TIMER_HRTIMER 2
+
+/*
+ * Hybrid automaton per-object variables.
+ */
+struct ha_monitor {
+ struct da_monitor da_mon;
+ u64 env_store[MAX_HA_ENV_LEN];
+ union {
+ struct hrtimer hrtimer;
+ struct timer_list timer;
+ };
+};
+
+#else
+
+struct ha_monitor { };
+
+#endif /* CONFIG_RV_HA_MONITOR */
+
#define RV_PER_TASK_MONITOR_INIT (CONFIG_RV_PER_TASK_MONITORS)
union rv_task_monitor {
struct da_monitor da_mon;
struct ltl_monitor ltl_mon;
+ struct ha_monitor ha_mon;
};
#ifdef CONFIG_RV_REACTORS
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index c40115d4e34d..1198138cb839 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -37,4 +37,31 @@ extern void dl_clear_root_domain_cpu(int cpu);
extern u64 dl_cookie;
extern bool dl_bw_visited(int cpu, u64 cookie);
+static inline bool dl_server(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_server;
+}
+
+static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
+{
+ BUG_ON(dl_server(dl_se));
+ return container_of(dl_se, struct task_struct, dl);
+}
+
+/*
+ * Regarding the deadline, a task with implicit deadline has a relative
+ * deadline == relative period. A task with constrained deadline has a
+ * relative deadline <= relative period.
+ *
+ * We support constrained deadline tasks. However, there are some restrictions
+ * applied only for tasks which do not have an implicit deadline. See
+ * update_dl_entity() to know more about such restrictions.
+ *
+ * The dl_is_implicit() returns true if the task has an implicit deadline.
+ */
+static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_deadline == dl_se->dl_period;
+}
+
#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 7511f5464c48..39765ff6f098 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -3,9 +3,9 @@
* Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org>
*
* Deterministic automata (DA) monitor functions, to be used together
- * with automata models in C generated by the dot2k tool.
+ * with automata models in C generated by the rvgen tool.
*
- * The dot2k tool is available at tools/verification/dot2k/
+ * The rvgen tool is available at tools/verification/rvgen/
*
* For further information, see:
* Documentation/trace/rv/monitor_synthesis.rst
@@ -19,6 +19,8 @@
#include <linux/stringify.h>
#include <linux/bug.h>
#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/hashtable.h>
/*
* Per-cpu variables require a unique name although static in some
@@ -28,6 +30,43 @@
static struct rv_monitor rv_this;
+/*
+ * Hook to allow the implementation of hybrid automata: define it with a
+ * function that takes curr_state, event and next_state and returns true if the
+ * environment constraints (e.g. timing) are satisfied, false otherwise.
+ */
+#ifndef da_monitor_event_hook
+#define da_monitor_event_hook(...) true
+#endif
+
+/*
+ * Hook to allow the implementation of hybrid automata: define it with a
+ * function that takes the da_monitor and performs further initialisation
+ * (e.g. reset set up timers).
+ */
+#ifndef da_monitor_init_hook
+#define da_monitor_init_hook(da_mon)
+#endif
+
+/*
+ * Hook to allow the implementation of hybrid automata: define it with a
+ * function that takes the da_monitor and performs further reset (e.g. reset
+ * all clocks).
+ */
+#ifndef da_monitor_reset_hook
+#define da_monitor_reset_hook(da_mon)
+#endif
+
+/*
+ * Type for the target id, default to int but can be overridden.
+ * A long type can work as hash table key (PER_OBJ) but will be downgraded to
+ * int in the event tracepoint.
+ * Unused for implicit monitors.
+ */
+#ifndef da_id_type
+#define da_id_type int
+#endif
+
static void react(enum states curr_state, enum events event)
{
rv_react(&rv_this,
@@ -42,6 +81,7 @@ static void react(enum states curr_state, enum events event)
*/
static inline void da_monitor_reset(struct da_monitor *da_mon)
{
+ da_monitor_reset_hook(da_mon);
da_mon->monitoring = 0;
da_mon->curr_state = model_get_initial_state();
}
@@ -56,6 +96,7 @@ static inline void da_monitor_start(struct da_monitor *da_mon)
{
da_mon->curr_state = model_get_initial_state();
da_mon->monitoring = 1;
+ da_monitor_init_hook(da_mon);
}
/*
@@ -97,90 +138,6 @@ static inline bool da_monitor_handling_event(struct da_monitor *da_mon)
return 1;
}
-#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
-/*
- * Event handler for implicit monitors. Implicit monitor is the one which the
- * handler does not need to specify which da_monitor to manipulate. Examples
- * of implicit monitor are the per_cpu or the global ones.
- *
- * Retry in case there is a race between getting and setting the next state,
- * warn and reset the monitor if it runs out of retries. The monitor should be
- * able to handle various orders.
- */
-
-static inline bool da_event(struct da_monitor *da_mon, enum events event)
-{
- enum states curr_state, next_state;
-
- curr_state = READ_ONCE(da_mon->curr_state);
- for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
- next_state = model_get_next_state(curr_state, event);
- if (next_state == INVALID_STATE) {
- react(curr_state, event);
- CONCATENATE(trace_error_, MONITOR_NAME)(
- model_get_state_name(curr_state),
- model_get_event_name(event));
- return false;
- }
- if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
- CONCATENATE(trace_event_, MONITOR_NAME)(
- model_get_state_name(curr_state),
- model_get_event_name(event),
- model_get_state_name(next_state),
- model_is_final_state(next_state));
- return true;
- }
- }
-
- trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
- pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
- " retries reached for event %s, resetting monitor %s",
- model_get_event_name(event), __stringify(MONITOR_NAME));
- return false;
-}
-
-#elif RV_MON_TYPE == RV_MON_PER_TASK
-/*
- * Event handler for per_task monitors.
- *
- * Retry in case there is a race between getting and setting the next state,
- * warn and reset the monitor if it runs out of retries. The monitor should be
- * able to handle various orders.
- */
-
-static inline bool da_event(struct da_monitor *da_mon, struct task_struct *tsk,
- enum events event)
-{
- enum states curr_state, next_state;
-
- curr_state = READ_ONCE(da_mon->curr_state);
- for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
- next_state = model_get_next_state(curr_state, event);
- if (next_state == INVALID_STATE) {
- react(curr_state, event);
- CONCATENATE(trace_error_, MONITOR_NAME)(tsk->pid,
- model_get_state_name(curr_state),
- model_get_event_name(event));
- return false;
- }
- if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
- CONCATENATE(trace_event_, MONITOR_NAME)(tsk->pid,
- model_get_state_name(curr_state),
- model_get_event_name(event),
- model_get_state_name(next_state),
- model_is_final_state(next_state));
- return true;
- }
- }
-
- trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
- pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
- " retries reached for event %s, resetting monitor %s",
- model_get_event_name(event), __stringify(MONITOR_NAME));
- return false;
-}
-#endif /* RV_MON_TYPE */
-
#if RV_MON_TYPE == RV_MON_GLOBAL
/*
* Functions to define, init and get a global monitor.
@@ -219,7 +176,10 @@ static inline int da_monitor_init(void)
/*
* da_monitor_destroy - destroy the monitor
*/
-static inline void da_monitor_destroy(void) { }
+static inline void da_monitor_destroy(void)
+{
+ da_monitor_reset_all();
+}
#elif RV_MON_TYPE == RV_MON_PER_CPU
/*
@@ -265,7 +225,10 @@ static inline int da_monitor_init(void)
/*
* da_monitor_destroy - destroy the monitor
*/
-static inline void da_monitor_destroy(void) { }
+static inline void da_monitor_destroy(void)
+{
+ da_monitor_reset_all();
+}
#elif RV_MON_TYPE == RV_MON_PER_TASK
/*
@@ -286,6 +249,24 @@ static inline struct da_monitor *da_get_monitor(struct task_struct *tsk)
return &tsk->rv[task_mon_slot].da_mon;
}
+/*
+ * da_get_target - return the task associated to the monitor
+ */
+static inline struct task_struct *da_get_target(struct da_monitor *da_mon)
+{
+ return container_of(da_mon, struct task_struct, rv[task_mon_slot].da_mon);
+}
+
+/*
+ * da_get_id - return the id associated to the monitor
+ *
+ * For per-task monitors, the id is the task's PID.
+ */
+static inline da_id_type da_get_id(struct da_monitor *da_mon)
+{
+ return da_get_target(da_mon)->pid;
+}
+
static void da_monitor_reset_all(void)
{
struct task_struct *g, *p;
@@ -330,120 +311,411 @@ static inline void da_monitor_destroy(void)
}
rv_put_task_monitor_slot(task_mon_slot);
task_mon_slot = RV_PER_TASK_MONITOR_INIT;
+
+ da_monitor_reset_all();
}
-#endif /* RV_MON_TYPE */
-#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+#elif RV_MON_TYPE == RV_MON_PER_OBJ
/*
- * Handle event for implicit monitor: da_get_monitor() will figure out
- * the monitor.
+ * Functions to define, init and get a per-object monitor.
*/
-static inline void __da_handle_event(struct da_monitor *da_mon,
- enum events event)
+struct da_monitor_storage {
+ da_id_type id;
+ monitor_target target;
+ union rv_task_monitor rv;
+ struct hlist_node node;
+ struct rcu_head rcu;
+};
+
+#ifndef DA_MONITOR_HT_BITS
+#define DA_MONITOR_HT_BITS 10
+#endif
+static DEFINE_HASHTABLE(da_monitor_ht, DA_MONITOR_HT_BITS);
+
+/*
+ * da_create_empty_storage - pre-allocate an empty storage
+ */
+static inline struct da_monitor_storage *da_create_empty_storage(da_id_type id)
{
- bool retval;
+ struct da_monitor_storage *mon_storage;
- retval = da_event(da_mon, event);
- if (!retval)
- da_monitor_reset(da_mon);
+ mon_storage = kmalloc_nolock(sizeof(struct da_monitor_storage),
+ __GFP_ZERO, NUMA_NO_NODE);
+ if (!mon_storage)
+ return NULL;
+
+ hash_add_rcu(da_monitor_ht, &mon_storage->node, id);
+ mon_storage->id = id;
+ return mon_storage;
}
/*
- * da_handle_event - handle an event
+ * da_create_storage - create the per-object storage
+ *
+ * The caller is responsible to synchronise writers, either with locks or
+ * implicitly. For instance, if da_create_storage is only called from a single
+ * event for target (e.g. sched_switch), it's safe to call this without locks.
*/
-static inline void da_handle_event(enum events event)
+static inline struct da_monitor *da_create_storage(da_id_type id,
+ monitor_target target,
+ struct da_monitor *da_mon)
{
- struct da_monitor *da_mon = da_get_monitor();
- bool retval;
+ struct da_monitor_storage *mon_storage;
- retval = da_monitor_handling_event(da_mon);
- if (!retval)
- return;
+ if (da_mon)
+ return da_mon;
- __da_handle_event(da_mon, event);
+ mon_storage = da_create_empty_storage(id);
+ if (!mon_storage)
+ return NULL;
+
+ mon_storage->target = target;
+ return &mon_storage->rv.da_mon;
}
/*
- * da_handle_start_event - start monitoring or handle event
+ * __da_get_mon_storage - get the monitor storage from the hash table
+ */
+static inline struct da_monitor_storage *__da_get_mon_storage(da_id_type id)
+{
+ struct da_monitor_storage *mon_storage;
+
+ lockdep_assert_in_rcu_read_lock();
+ hash_for_each_possible_rcu(da_monitor_ht, mon_storage, node, id) {
+ if (mon_storage->id == id)
+ return mon_storage;
+ }
+
+ return NULL;
+}
+
+/*
+ * da_get_monitor - return the monitor for target
+ */
+static struct da_monitor *da_get_monitor(da_id_type id, monitor_target target)
+{
+ struct da_monitor_storage *mon_storage;
+
+ mon_storage = __da_get_mon_storage(id);
+ return mon_storage ? &mon_storage->rv.da_mon : NULL;
+}
+
+/*
+ * da_get_target - return the object associated to the monitor
+ */
+static inline monitor_target da_get_target(struct da_monitor *da_mon)
+{
+ return container_of(da_mon, struct da_monitor_storage, rv.da_mon)->target;
+}
+
+/*
+ * da_get_id - return the id associated to the monitor
+ */
+static inline da_id_type da_get_id(struct da_monitor *da_mon)
+{
+ return container_of(da_mon, struct da_monitor_storage, rv.da_mon)->id;
+}
+
+/*
+ * da_create_or_get - create the per-object storage if not already there
*
- * This function is used to notify the monitor that the system is returning
- * to the initial state, so the monitor can start monitoring in the next event.
- * Thus:
+ * This needs a lookup so should be guarded by RCU, the condition is checked
+ * directly in da_create_storage()
+ */
+static inline void da_create_or_get(da_id_type id, monitor_target target)
+{
+ guard(rcu)();
+ da_create_storage(id, target, da_get_monitor(id, target));
+}
+
+/*
+ * da_fill_empty_storage - store the target in a pre-allocated storage
*
- * If the monitor already started, handle the event.
- * If the monitor did not start yet, start the monitor but skip the event.
+ * Can be used as a substitute of da_create_storage when starting a monitor in
+ * an environment where allocation is unsafe.
*/
-static inline bool da_handle_start_event(enum events event)
+static inline struct da_monitor *da_fill_empty_storage(da_id_type id,
+ monitor_target target,
+ struct da_monitor *da_mon)
{
- struct da_monitor *da_mon;
+ if (unlikely(da_mon && !da_get_target(da_mon)))
+ container_of(da_mon, struct da_monitor_storage, rv.da_mon)->target = target;
+ return da_mon;
+}
- if (!da_monitor_enabled())
- return 0;
+/*
+ * da_get_target_by_id - return the object associated to the id
+ */
+static inline monitor_target da_get_target_by_id(da_id_type id)
+{
+ struct da_monitor_storage *mon_storage;
- da_mon = da_get_monitor();
+ guard(rcu)();
+ mon_storage = __da_get_mon_storage(id);
- if (unlikely(!da_monitoring(da_mon))) {
- da_monitor_start(da_mon);
- return 0;
+ if (unlikely(!mon_storage))
+ return NULL;
+ return mon_storage->target;
+}
+
+/*
+ * da_destroy_storage - destroy the per-object storage
+ *
+ * The caller is responsible to synchronise writers, either with locks or
+ * implicitly. For instance, if da_destroy_storage is called at sched_exit and
+ * da_create_storage can never occur after that, it's safe to call this without
+ * locks.
+ * This function includes an RCU read-side critical section to synchronise
+ * against da_monitor_destroy().
+ */
+static inline void da_destroy_storage(da_id_type id)
+{
+ struct da_monitor_storage *mon_storage;
+
+ guard(rcu)();
+ mon_storage = __da_get_mon_storage(id);
+
+ if (!mon_storage)
+ return;
+ da_monitor_reset_hook(&mon_storage->rv.da_mon);
+ hash_del_rcu(&mon_storage->node);
+ kfree_rcu(mon_storage, rcu);
+}
+
+static void da_monitor_reset_all(void)
+{
+ struct da_monitor_storage *mon_storage;
+ int bkt;
+
+ rcu_read_lock();
+ hash_for_each_rcu(da_monitor_ht, bkt, mon_storage, node)
+ da_monitor_reset(&mon_storage->rv.da_mon);
+ rcu_read_unlock();
+}
+
+static inline int da_monitor_init(void)
+{
+ hash_init(da_monitor_ht);
+ return 0;
+}
+
+static inline void da_monitor_destroy(void)
+{
+ struct da_monitor_storage *mon_storage;
+ struct hlist_node *tmp;
+ int bkt;
+
+ /*
+ * This function is called after all probes are disabled, we need only
+ * worry about concurrency against old events.
+ */
+ synchronize_rcu();
+ hash_for_each_safe(da_monitor_ht, bkt, tmp, mon_storage, node) {
+ da_monitor_reset_hook(&mon_storage->rv.da_mon);
+ hash_del_rcu(&mon_storage->node);
+ kfree(mon_storage);
}
+}
- __da_handle_event(da_mon, event);
+/*
+ * Allow the per-object monitors to run allocation manually, necessary if the
+ * start condition is in a context problematic for allocation (e.g. scheduling).
+ * In such case, if the storage was pre-allocated without a target, set it now.
+ */
+#ifdef DA_SKIP_AUTO_ALLOC
+#define da_prepare_storage da_fill_empty_storage
+#else
+#define da_prepare_storage da_create_storage
+#endif /* DA_SKIP_AUTO_ALLOC */
- return 1;
+#endif /* RV_MON_TYPE */
+
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+/*
+ * Trace events for implicit monitors. Implicit monitor is the one which the
+ * handler does not need to specify which da_monitor to manipulate. Examples
+ * of implicit monitor are the per_cpu or the global ones.
+ */
+
+static inline void da_trace_event(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ char *next_state, bool is_final,
+ da_id_type id)
+{
+ CONCATENATE(trace_event_, MONITOR_NAME)(curr_state, event, next_state,
+ is_final);
+}
+
+static inline void da_trace_error(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(curr_state, event);
}
/*
- * da_handle_start_run_event - start monitoring and handle event
+ * da_get_id - unused for implicit monitors
+ */
+static inline da_id_type da_get_id(struct da_monitor *da_mon)
+{
+ return 0;
+}
+
+#elif RV_MON_TYPE == RV_MON_PER_TASK || RV_MON_TYPE == RV_MON_PER_OBJ
+/*
+ * Trace events for per_task/per_object monitors, report the target id.
+ */
+
+static inline void da_trace_event(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ char *next_state, bool is_final,
+ da_id_type id)
+{
+ CONCATENATE(trace_event_, MONITOR_NAME)(id, curr_state, event,
+ next_state, is_final);
+}
+
+static inline void da_trace_error(struct da_monitor *da_mon,
+ char *curr_state, char *event,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(id, curr_state, event);
+}
+#endif /* RV_MON_TYPE */
+
+/*
+ * da_event - handle an event for the da_mon
*
- * This function is used to notify the monitor that the system is in the
- * initial state, so the monitor can start monitoring and handling event.
+ * This function is valid for both implicit and id monitors.
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
-static inline bool da_handle_start_run_event(enum events event)
+static inline bool da_event(struct da_monitor *da_mon, enum events event, da_id_type id)
{
- struct da_monitor *da_mon;
+ enum states curr_state, next_state;
+ curr_state = READ_ONCE(da_mon->curr_state);
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) {
+ next_state = model_get_next_state(curr_state, event);
+ if (next_state == INVALID_STATE) {
+ react(curr_state, event);
+ da_trace_error(da_mon, model_get_state_name(curr_state),
+ model_get_event_name(event), id);
+ return false;
+ }
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) {
+ if (!da_monitor_event_hook(da_mon, curr_state, event, next_state, id))
+ return false;
+ da_trace_event(da_mon, model_get_state_name(curr_state),
+ model_get_event_name(event),
+ model_get_state_name(next_state),
+ model_is_final_state(next_state), id);
+ return true;
+ }
+ }
+
+ trace_rv_retries_error(__stringify(MONITOR_NAME), model_get_event_name(event));
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS)
+ " retries reached for event %s, resetting monitor %s",
+ model_get_event_name(event), __stringify(MONITOR_NAME));
+ return false;
+}
+
+static inline void __da_handle_event_common(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
+ if (!da_event(da_mon, event, id))
+ da_monitor_reset(da_mon);
+}
+
+static inline void __da_handle_event(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
+ if (da_monitor_handling_event(da_mon))
+ __da_handle_event_common(da_mon, event, id);
+}
+
+static inline bool __da_handle_start_event(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
if (!da_monitor_enabled())
return 0;
+ if (unlikely(!da_monitoring(da_mon))) {
+ da_monitor_start(da_mon);
+ return 0;
+ }
+
+ __da_handle_event_common(da_mon, event, id);
- da_mon = da_get_monitor();
+ return 1;
+}
+static inline bool __da_handle_start_run_event(struct da_monitor *da_mon,
+ enum events event, da_id_type id)
+{
+ if (!da_monitor_enabled())
+ return 0;
if (unlikely(!da_monitoring(da_mon)))
da_monitor_start(da_mon);
- __da_handle_event(da_mon, event);
+ __da_handle_event_common(da_mon, event, id);
return 1;
}
-#elif RV_MON_TYPE == RV_MON_PER_TASK
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
/*
- * Handle event for per task.
+ * Handle event for implicit monitor: da_get_monitor() will figure out
+ * the monitor.
*/
-static inline void __da_handle_event(struct da_monitor *da_mon,
- struct task_struct *tsk, enum events event)
+/*
+ * da_handle_event - handle an event
+ */
+static inline void da_handle_event(enum events event)
{
- bool retval;
+ __da_handle_event(da_get_monitor(), event, 0);
+}
- retval = da_event(da_mon, tsk, event);
- if (!retval)
- da_monitor_reset(da_mon);
+/*
+ * da_handle_start_event - start monitoring or handle event
+ *
+ * This function is used to notify the monitor that the system is returning
+ * to the initial state, so the monitor can start monitoring in the next event.
+ * Thus:
+ *
+ * If the monitor already started, handle the event.
+ * If the monitor did not start yet, start the monitor but skip the event.
+ */
+static inline bool da_handle_start_event(enum events event)
+{
+ return __da_handle_start_event(da_get_monitor(), event, 0);
}
/*
- * da_handle_event - handle an event
+ * da_handle_start_run_event - start monitoring and handle event
+ *
+ * This function is used to notify the monitor that the system is in the
+ * initial state, so the monitor can start monitoring and handling event.
*/
-static inline void da_handle_event(struct task_struct *tsk, enum events event)
+static inline bool da_handle_start_run_event(enum events event)
{
- struct da_monitor *da_mon = da_get_monitor(tsk);
- bool retval;
+ return __da_handle_start_run_event(da_get_monitor(), event, 0);
+}
- retval = da_monitor_handling_event(da_mon);
- if (!retval)
- return;
+#elif RV_MON_TYPE == RV_MON_PER_TASK
+/*
+ * Handle event for per task.
+ */
- __da_handle_event(da_mon, tsk, event);
+/*
+ * da_handle_event - handle an event
+ */
+static inline void da_handle_event(struct task_struct *tsk, enum events event)
+{
+ __da_handle_event(da_get_monitor(tsk), event, tsk->pid);
}
/*
@@ -459,21 +731,60 @@ static inline void da_handle_event(struct task_struct *tsk, enum events event)
static inline bool da_handle_start_event(struct task_struct *tsk,
enum events event)
{
- struct da_monitor *da_mon;
+ return __da_handle_start_event(da_get_monitor(tsk), event, tsk->pid);
+}
- if (!da_monitor_enabled())
- return 0;
+/*
+ * da_handle_start_run_event - start monitoring and handle event
+ *
+ * This function is used to notify the monitor that the system is in the
+ * initial state, so the monitor can start monitoring and handling event.
+ */
+static inline bool da_handle_start_run_event(struct task_struct *tsk,
+ enum events event)
+{
+ return __da_handle_start_run_event(da_get_monitor(tsk), event, tsk->pid);
+}
- da_mon = da_get_monitor(tsk);
+#elif RV_MON_TYPE == RV_MON_PER_OBJ
+/*
+ * Handle event for per object.
+ */
- if (unlikely(!da_monitoring(da_mon))) {
- da_monitor_start(da_mon);
- return 0;
- }
+/*
+ * da_handle_event - handle an event
+ */
+static inline void da_handle_event(da_id_type id, monitor_target target, enum events event)
+{
+ struct da_monitor *da_mon;
- __da_handle_event(da_mon, tsk, event);
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ if (likely(da_mon))
+ __da_handle_event(da_mon, event, id);
+}
- return 1;
+/*
+ * da_handle_start_event - start monitoring or handle event
+ *
+ * This function is used to notify the monitor that the system is returning
+ * to the initial state, so the monitor can start monitoring in the next event.
+ * Thus:
+ *
+ * If the monitor already started, handle the event.
+ * If the monitor did not start yet, start the monitor but skip the event.
+ */
+static inline bool da_handle_start_event(da_id_type id, monitor_target target,
+ enum events event)
+{
+ struct da_monitor *da_mon;
+
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ da_mon = da_prepare_storage(id, target, da_mon);
+ if (unlikely(!da_mon))
+ return 0;
+ return __da_handle_start_event(da_mon, event, id);
}
/*
@@ -482,22 +793,27 @@ static inline bool da_handle_start_event(struct task_struct *tsk,
* This function is used to notify the monitor that the system is in the
* initial state, so the monitor can start monitoring and handling event.
*/
-static inline bool da_handle_start_run_event(struct task_struct *tsk,
+static inline bool da_handle_start_run_event(da_id_type id, monitor_target target,
enum events event)
{
struct da_monitor *da_mon;
- if (!da_monitor_enabled())
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ da_mon = da_prepare_storage(id, target, da_mon);
+ if (unlikely(!da_mon))
return 0;
+ return __da_handle_start_run_event(da_mon, event, id);
+}
- da_mon = da_get_monitor(tsk);
-
- if (unlikely(!da_monitoring(da_mon)))
- da_monitor_start(da_mon);
-
- __da_handle_event(da_mon, tsk, event);
+static inline void da_reset(da_id_type id, monitor_target target)
+{
+ struct da_monitor *da_mon;
- return 1;
+ guard(rcu)();
+ da_mon = da_get_monitor(id, target);
+ if (likely(da_mon))
+ da_monitor_reset(da_mon);
}
#endif /* RV_MON_TYPE */
diff --git a/include/rv/ha_monitor.h b/include/rv/ha_monitor.h
new file mode 100644
index 000000000000..d59507e8cb30
--- /dev/null
+++ b/include/rv/ha_monitor.h
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025-2028 Red Hat, Inc. Gabriele Monaco <gmonaco@redhat.com>
+ *
+ * Hybrid automata (HA) monitor functions, to be used together
+ * with automata models in C generated by the rvgen tool.
+ *
+ * This type of monitors extends the Deterministic automata (DA) class by
+ * adding a set of environment variables (e.g. clocks) that can be used to
+ * constraint the valid transitions.
+ *
+ * The rvgen tool is available at tools/verification/rvgen/
+ *
+ * For further information, see:
+ * Documentation/trace/rv/monitor_synthesis.rst
+ */
+
+#ifndef _RV_HA_MONITOR_H
+#define _RV_HA_MONITOR_H
+
+#include <rv/automata.h>
+
+#ifndef da_id_type
+#define da_id_type int
+#endif
+
+static inline void ha_monitor_init_env(struct da_monitor *da_mon);
+static inline void ha_monitor_reset_env(struct da_monitor *da_mon);
+static inline void ha_setup_timer(struct ha_monitor *ha_mon);
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon);
+static bool ha_monitor_handle_constraint(struct da_monitor *da_mon,
+ enum states curr_state,
+ enum events event,
+ enum states next_state,
+ da_id_type id);
+#define da_monitor_event_hook ha_monitor_handle_constraint
+#define da_monitor_init_hook ha_monitor_init_env
+#define da_monitor_reset_hook ha_monitor_reset_env
+
+#include <rv/da_monitor.h>
+#include <linux/seq_buf.h>
+
+/* This simplifies things since da_mon and ha_mon coexist in the same union */
+_Static_assert(offsetof(struct ha_monitor, da_mon) == 0,
+ "da_mon must be the first element in an ha_mon!");
+#define to_ha_monitor(da) container_of(da, struct ha_monitor, da_mon)
+
+#define ENV_MAX CONCATENATE(env_max_, MONITOR_NAME)
+#define ENV_MAX_STORED CONCATENATE(env_max_stored_, MONITOR_NAME)
+#define envs CONCATENATE(envs_, MONITOR_NAME)
+
+/* Environment storage before being reset */
+#define ENV_INVALID_VALUE U64_MAX
+/* Error with no event occurs only on timeouts */
+#define EVENT_NONE EVENT_MAX
+#define EVENT_NONE_LBL "none"
+#define ENV_BUFFER_SIZE 64
+
+#ifdef CONFIG_RV_REACTORS
+
+/*
+ * ha_react - trigger the reaction after a failed environment constraint
+ *
+ * The transition from curr_state with event is otherwise valid, but the
+ * environment constraint is false. This function can be called also with no
+ * event from a timer (state constraints only).
+ */
+static void ha_react(enum states curr_state, enum events event, char *env)
+{
+ rv_react(&rv_this,
+ "rv: monitor %s does not allow event %s on state %s with env %s\n",
+ __stringify(MONITOR_NAME),
+ event == EVENT_NONE ? EVENT_NONE_LBL : model_get_event_name(event),
+ model_get_state_name(curr_state), env);
+}
+
+#else /* CONFIG_RV_REACTOR */
+
+static void ha_react(enum states curr_state, enum events event, char *env) { }
+#endif
+
+/*
+ * model_get_state_name - return the (string) name of the given state
+ */
+static char *model_get_env_name(enum envs env)
+{
+ if ((env < 0) || (env >= ENV_MAX))
+ return "INVALID";
+
+ return RV_AUTOMATON_NAME.env_names[env];
+}
+
+/*
+ * Monitors requiring a timer implementation need to request it explicitly.
+ */
+#ifndef HA_TIMER_TYPE
+#define HA_TIMER_TYPE HA_TIMER_NONE
+#endif
+
+#if HA_TIMER_TYPE == HA_TIMER_WHEEL
+static void ha_monitor_timer_callback(struct timer_list *timer);
+#elif HA_TIMER_TYPE == HA_TIMER_HRTIMER
+static enum hrtimer_restart ha_monitor_timer_callback(struct hrtimer *hrtimer);
+#endif
+
+/*
+ * ktime_get_ns is expensive, since we usually don't require precise accounting
+ * of changes within the same event, cache the current time at the beginning of
+ * the constraint handler and use the cache for subsequent calls.
+ * Monitors without ns clocks automatically skip this.
+ */
+#ifdef HA_CLK_NS
+#define ha_get_ns() ktime_get_ns()
+#else
+#define ha_get_ns() 0
+#endif /* HA_CLK_NS */
+
+/* Should be supplied by the monitor */
+static u64 ha_get_env(struct ha_monitor *ha_mon, enum envs env, u64 time_ns);
+static bool ha_verify_constraint(struct ha_monitor *ha_mon,
+ enum states curr_state,
+ enum events event,
+ enum states next_state,
+ u64 time_ns);
+
+/*
+ * ha_monitor_reset_all_stored - reset all environment variables in the monitor
+ */
+static inline void ha_monitor_reset_all_stored(struct ha_monitor *ha_mon)
+{
+ for (int i = 0; i < ENV_MAX_STORED; i++)
+ WRITE_ONCE(ha_mon->env_store[i], ENV_INVALID_VALUE);
+}
+
+/*
+ * ha_monitor_init_env - setup timer and reset all environment
+ *
+ * Called from a hook in the DA start functions, it supplies the da_mon
+ * corresponding to the current ha_mon.
+ * Not all hybrid automata require the timer, still set it for simplicity.
+ */
+static inline void ha_monitor_init_env(struct da_monitor *da_mon)
+{
+ struct ha_monitor *ha_mon = to_ha_monitor(da_mon);
+
+ ha_monitor_reset_all_stored(ha_mon);
+ ha_setup_timer(ha_mon);
+}
+
+/*
+ * ha_monitor_reset_env - stop timer and reset all environment
+ *
+ * Called from a hook in the DA reset functions, it supplies the da_mon
+ * corresponding to the current ha_mon.
+ * Not all hybrid automata require the timer, still clear it for simplicity.
+ */
+static inline void ha_monitor_reset_env(struct da_monitor *da_mon)
+{
+ struct ha_monitor *ha_mon = to_ha_monitor(da_mon);
+
+ /* Initialisation resets the monitor before initialising the timer */
+ if (likely(da_monitoring(da_mon)))
+ ha_cancel_timer(ha_mon);
+}
+
+/*
+ * ha_monitor_env_invalid - return true if env has not been initialised
+ */
+static inline bool ha_monitor_env_invalid(struct ha_monitor *ha_mon, enum envs env)
+{
+ return READ_ONCE(ha_mon->env_store[env]) == ENV_INVALID_VALUE;
+}
+
+static inline void ha_get_env_string(struct seq_buf *s,
+ struct ha_monitor *ha_mon, u64 time_ns)
+{
+ const char *format_str = "%s=%llu";
+
+ for (int i = 0; i < ENV_MAX; i++) {
+ seq_buf_printf(s, format_str, model_get_env_name(i),
+ ha_get_env(ha_mon, i, time_ns));
+ format_str = ",%s=%llu";
+ }
+}
+
+#if RV_MON_TYPE == RV_MON_GLOBAL || RV_MON_TYPE == RV_MON_PER_CPU
+static inline void ha_trace_error_env(struct ha_monitor *ha_mon,
+ char *curr_state, char *event, char *env,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_env_, MONITOR_NAME)(curr_state, event, env);
+}
+#elif RV_MON_TYPE == RV_MON_PER_TASK || RV_MON_TYPE == RV_MON_PER_OBJ
+
+#define ha_get_target(ha_mon) da_get_target(&ha_mon->da_mon)
+
+static inline void ha_trace_error_env(struct ha_monitor *ha_mon,
+ char *curr_state, char *event, char *env,
+ da_id_type id)
+{
+ CONCATENATE(trace_error_env_, MONITOR_NAME)(id, curr_state, event, env);
+}
+#endif /* RV_MON_TYPE */
+
+/*
+ * ha_get_monitor - return the current monitor
+ */
+#define ha_get_monitor(...) to_ha_monitor(da_get_monitor(__VA_ARGS__))
+
+/*
+ * ha_monitor_handle_constraint - handle the constraint on the current transition
+ *
+ * If the monitor implementation defines a constraint in the transition from
+ * curr_state to event, react and trace appropriately as well as return false.
+ * This function is called from the hook in the DA event handle function and
+ * triggers a failure in the monitor.
+ */
+static bool ha_monitor_handle_constraint(struct da_monitor *da_mon,
+ enum states curr_state,
+ enum events event,
+ enum states next_state,
+ da_id_type id)
+{
+ struct ha_monitor *ha_mon = to_ha_monitor(da_mon);
+ u64 time_ns = ha_get_ns();
+ DECLARE_SEQ_BUF(env_string, ENV_BUFFER_SIZE);
+
+ if (ha_verify_constraint(ha_mon, curr_state, event, next_state, time_ns))
+ return true;
+
+ ha_get_env_string(&env_string, ha_mon, time_ns);
+ ha_react(curr_state, event, env_string.buffer);
+ ha_trace_error_env(ha_mon,
+ model_get_state_name(curr_state),
+ model_get_event_name(event),
+ env_string.buffer, id);
+ return false;
+}
+
+static inline void __ha_monitor_timer_callback(struct ha_monitor *ha_mon)
+{
+ enum states curr_state = READ_ONCE(ha_mon->da_mon.curr_state);
+ DECLARE_SEQ_BUF(env_string, ENV_BUFFER_SIZE);
+ u64 time_ns = ha_get_ns();
+
+ ha_get_env_string(&env_string, ha_mon, time_ns);
+ ha_react(curr_state, EVENT_NONE, env_string.buffer);
+ ha_trace_error_env(ha_mon, model_get_state_name(curr_state),
+ EVENT_NONE_LBL, env_string.buffer,
+ da_get_id(&ha_mon->da_mon));
+
+ da_monitor_reset(&ha_mon->da_mon);
+}
+
+/*
+ * The clock variables have 2 different representations in the env_store:
+ * - The guard representation is the timestamp of the last reset
+ * - The invariant representation is the timestamp when the invariant expires
+ * As the representations are incompatible, care must be taken when switching
+ * between them: the invariant representation can only be used when starting a
+ * timer when the previous representation was guard (e.g. no other invariant
+ * started since the last reset operation).
+ * Likewise, switching from invariant to guard representation without a reset
+ * can be done only by subtracting the exact value used to start the invariant.
+ *
+ * Reading the environment variable (ha_get_clk) also reflects this difference
+ * any reads in states that have an invariant return the (possibly negative)
+ * time since expiration, other reads return the time since last reset.
+ */
+
+/*
+ * Helper functions for env variables describing clocks with ns granularity
+ */
+static inline u64 ha_get_clk_ns(struct ha_monitor *ha_mon, enum envs env, u64 time_ns)
+{
+ return time_ns - READ_ONCE(ha_mon->env_store[env]);
+}
+static inline void ha_reset_clk_ns(struct ha_monitor *ha_mon, enum envs env, u64 time_ns)
+{
+ WRITE_ONCE(ha_mon->env_store[env], time_ns);
+}
+static inline void ha_set_invariant_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 value, u64 time_ns)
+{
+ WRITE_ONCE(ha_mon->env_store[env], time_ns + value);
+}
+static inline bool ha_check_invariant_ns(struct ha_monitor *ha_mon,
+ enum envs env, u64 time_ns)
+{
+ return READ_ONCE(ha_mon->env_store[env]) >= time_ns;
+}
+/*
+ * ha_invariant_passed_ns - prepare the invariant and return the time since reset
+ */
+static inline u64 ha_invariant_passed_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = 0;
+
+ if (env < 0 || env >= ENV_MAX_STORED)
+ return 0;
+ if (ha_monitor_env_invalid(ha_mon, env))
+ return 0;
+ passed = ha_get_env(ha_mon, env, time_ns);
+ ha_set_invariant_ns(ha_mon, env, expire - passed, time_ns);
+ return passed;
+}
+
+/*
+ * Helper functions for env variables describing clocks with jiffy granularity
+ */
+static inline u64 ha_get_clk_jiffy(struct ha_monitor *ha_mon, enum envs env)
+{
+ return get_jiffies_64() - READ_ONCE(ha_mon->env_store[env]);
+}
+static inline void ha_reset_clk_jiffy(struct ha_monitor *ha_mon, enum envs env)
+{
+ WRITE_ONCE(ha_mon->env_store[env], get_jiffies_64());
+}
+static inline void ha_set_invariant_jiffy(struct ha_monitor *ha_mon,
+ enum envs env, u64 value)
+{
+ WRITE_ONCE(ha_mon->env_store[env], get_jiffies_64() + value);
+}
+static inline bool ha_check_invariant_jiffy(struct ha_monitor *ha_mon,
+ enum envs env, u64 time_ns)
+{
+ return time_after64(READ_ONCE(ha_mon->env_store[env]), get_jiffies_64());
+
+}
+/*
+ * ha_invariant_passed_jiffy - prepare the invariant and return the time since reset
+ */
+static inline u64 ha_invariant_passed_jiffy(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = 0;
+
+ if (env < 0 || env >= ENV_MAX_STORED)
+ return 0;
+ if (ha_monitor_env_invalid(ha_mon, env))
+ return 0;
+ passed = ha_get_env(ha_mon, env, time_ns);
+ ha_set_invariant_jiffy(ha_mon, env, expire - passed);
+ return passed;
+}
+
+/*
+ * Retrieve the last reset time (guard representation) from the invariant
+ * representation (expiration).
+ * It the caller's responsibility to make sure the storage was actually in the
+ * invariant representation (e.g. the current state has an invariant).
+ * The provided value must be the same used when starting the invariant.
+ *
+ * This function's access to the storage is NOT atomic, due to the rarity when
+ * this is used. If a monitor allows writes concurrent to this, likely
+ * other things are broken and need rethinking the model or additional locking.
+ */
+static inline void ha_inv_to_guard(struct ha_monitor *ha_mon, enum envs env,
+ u64 value, u64 time_ns)
+{
+ WRITE_ONCE(ha_mon->env_store[env], READ_ONCE(ha_mon->env_store[env]) - value);
+}
+
+#if HA_TIMER_TYPE == HA_TIMER_WHEEL
+/*
+ * Helper functions to handle the monitor timer.
+ * Not all monitors require a timer, in such case the timer will be set up but
+ * never armed.
+ * Timers start since the last reset of the supplied env or from now if env is
+ * not an environment variable. If env was not initialised no timer starts.
+ * Timers can expire on any CPU unless the monitor is per-cpu,
+ * where we assume every event occurs on the local CPU.
+ */
+static void ha_monitor_timer_callback(struct timer_list *timer)
+{
+ struct ha_monitor *ha_mon = container_of(timer, struct ha_monitor, timer);
+
+ __ha_monitor_timer_callback(ha_mon);
+}
+static inline void ha_setup_timer(struct ha_monitor *ha_mon)
+{
+ int mode = 0;
+
+ if (RV_MON_TYPE == RV_MON_PER_CPU)
+ mode |= TIMER_PINNED;
+ timer_setup(&ha_mon->timer, ha_monitor_timer_callback, mode);
+}
+static inline void ha_start_timer_jiffy(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = ha_invariant_passed_jiffy(ha_mon, env, expire, time_ns);
+
+ mod_timer(&ha_mon->timer, get_jiffies_64() + expire - passed);
+}
+static inline void ha_start_timer_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = ha_invariant_passed_ns(ha_mon, env, expire, time_ns);
+
+ ha_start_timer_jiffy(ha_mon, ENV_MAX_STORED,
+ nsecs_to_jiffies(expire - passed + TICK_NSEC - 1), time_ns);
+}
+/*
+ * ha_cancel_timer - Cancel the timer
+ *
+ * Returns:
+ * * 1 when the timer was active
+ * * 0 when the timer was not active or running a callback
+ */
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon)
+{
+ return timer_delete(&ha_mon->timer);
+}
+#elif HA_TIMER_TYPE == HA_TIMER_HRTIMER
+/*
+ * Helper functions to handle the monitor timer.
+ * Not all monitors require a timer, in such case the timer will be set up but
+ * never armed.
+ * Timers start since the last reset of the supplied env or from now if env is
+ * not an environment variable. If env was not initialised no timer starts.
+ * Timers can expire on any CPU unless the monitor is per-cpu,
+ * where we assume every event occurs on the local CPU.
+ */
+static enum hrtimer_restart ha_monitor_timer_callback(struct hrtimer *hrtimer)
+{
+ struct ha_monitor *ha_mon = container_of(hrtimer, struct ha_monitor, hrtimer);
+
+ __ha_monitor_timer_callback(ha_mon);
+ return HRTIMER_NORESTART;
+}
+static inline void ha_setup_timer(struct ha_monitor *ha_mon)
+{
+ hrtimer_setup(&ha_mon->hrtimer, ha_monitor_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+}
+static inline void ha_start_timer_ns(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ int mode = HRTIMER_MODE_REL_HARD;
+ u64 passed = ha_invariant_passed_ns(ha_mon, env, expire, time_ns);
+
+ if (RV_MON_TYPE == RV_MON_PER_CPU)
+ mode |= HRTIMER_MODE_PINNED;
+ hrtimer_start(&ha_mon->hrtimer, ns_to_ktime(expire - passed), mode);
+}
+static inline void ha_start_timer_jiffy(struct ha_monitor *ha_mon, enum envs env,
+ u64 expire, u64 time_ns)
+{
+ u64 passed = ha_invariant_passed_jiffy(ha_mon, env, expire, time_ns);
+
+ ha_start_timer_ns(ha_mon, ENV_MAX_STORED,
+ jiffies_to_nsecs(expire - passed), time_ns);
+}
+/*
+ * ha_cancel_timer - Cancel the timer
+ *
+ * Returns:
+ * * 1 when the timer was active
+ * * 0 when the timer was not active or running a callback
+ */
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon)
+{
+ return hrtimer_try_to_cancel(&ha_mon->hrtimer) == 1;
+}
+#else /* HA_TIMER_NONE */
+/*
+ * Start function is intentionally not defined, monitors using timers must
+ * set HA_TIMER_TYPE to either HA_TIMER_WHEEL or HA_TIMER_HRTIMER.
+ */
+static inline void ha_setup_timer(struct ha_monitor *ha_mon) { }
+static inline bool ha_cancel_timer(struct ha_monitor *ha_mon)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 7b2645b50e78..535860581f15 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -896,6 +896,32 @@ DECLARE_TRACE(sched_set_need_resched,
TP_PROTO(struct task_struct *tsk, int cpu, int tif),
TP_ARGS(tsk, cpu, tif));
+#define DL_OTHER 0
+#define DL_TASK 1
+#define DL_SERVER_FAIR 2
+#define DL_SERVER_EXT 3
+
+DECLARE_TRACE(sched_dl_throttle,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_replenish,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+/* Call to update_curr_dl_se not involving throttle or replenish */
+DECLARE_TRACE(sched_dl_update,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_server_start,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
+DECLARE_TRACE(sched_dl_server_stop,
+ TP_PROTO(struct sched_dl_entity *dl_se, int cpu, u8 type),
+ TP_ARGS(dl_se, cpu, type));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */