summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-12 07:27:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-12 07:27:19 -0400
commit9837acff77f51e40ab21521e914aa19f85beb312 (patch)
tree76e9363fd72afea51e6634fb38cc76e6d4be8767 /include
parentca321885b0511a85e2d1cd40caafedbeb18f4af6 (diff)
parent3ddee63a099ebbdc8f84697fe46730b58240c09d (diff)
Merge tag 'trace-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "This set has a few minor updates, but the big change is the redesign of the trampoline logic. The trampoline logic of 3.17 required a descriptor for every function that is registered to be traced and uses a trampoline. Currently, only the function graph tracer uses a trampoline, but if you were to trace all 32,000 (give or take a few thousand) functions with the function graph tracer, it would create 32,000 descriptors to let us know that there's a trampoline associated with it. This takes up a bit of memory when there's a better way to do it. The redesign now reuses the ftrace_ops' (what registers the function graph tracer) hash tables. The hash tables tell ftrace what the tracer wants to trace or doesn't want to trace. There's two of them: one that tells us what to trace, the other tells us what not to trace. If the first one is empty, it means all functions should be traced, otherwise only the ones that are listed should be. The second hash table tells us what not to trace, and if it is empty, all functions may be traced, and if there's any listed, then those should not be traced even if they exist in the first hash table. It took a bit of massaging, but now these hashes can be used to keep track of what has a trampoline and what does not, and allows the ftrace accounting to work. Now we can trace all functions when using the function graph trampoline, and avoid needing to create any special descriptors to hold all the functions that are being traced" * tag 'trace-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ftrace: Only disable ftrace_enabled to test buffer in selftest ftrace: Add sanity check when unregistering last ftrace_ops kernel: trace_syscalls: Replace rcu_assign_pointer() with RCU_INIT_POINTER() tracing: generate RCU warnings even when tracepoints are disabled ftrace: Replace tramp_hash with old_*_hash to save space ftrace: Annotate the ops operation on update ftrace: Grab any ops for a rec for enabled_functions output ftrace: Remove freeing of old_hash from ftrace_hash_move() ftrace: Set callback to ftrace_stub when no ops are registered ftrace: Add helper function ftrace_ops_get_func() ftrace: Add separate function for non recursive callbacks
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace.h10
-rw-r--r--include/linux/tracepoint.h11
2 files changed, 20 insertions, 1 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f0b0edbf55a9..662697babd48 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -56,6 +56,8 @@ struct ftrace_ops;
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
+ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
+
/*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member.
@@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops)
* DELETED - The ops are being deleted, do not let them be registered again.
+ * ADDING - The ops is in the process of being added.
+ * REMOVING - The ops is in the process of being removed.
+ * MODIFYING - The ops is in the process of changing its filter functions.
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -100,6 +105,9 @@ enum {
FTRACE_OPS_FL_STUB = 1 << 6,
FTRACE_OPS_FL_INITIALIZED = 1 << 7,
FTRACE_OPS_FL_DELETED = 1 << 8,
+ FTRACE_OPS_FL_ADDING = 1 << 9,
+ FTRACE_OPS_FL_REMOVING = 1 << 10,
+ FTRACE_OPS_FL_MODIFYING = 1 << 11,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -132,7 +140,7 @@ struct ftrace_ops {
int nr_trampolines;
struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
- struct ftrace_hash *tramp_hash;
+ struct ftrace_ops_hash old_hash;
unsigned long trampoline;
#endif
};
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b1293f15f592..e08e21e5f601 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -157,6 +157,12 @@ extern void syscall_unregfunc(void);
* Make sure the alignment of the structure in the __tracepoints section will
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
+ *
+ * When lockdep is enabled, we make sure to always do the RCU portions of
+ * the tracepoint code, regardless of whether tracing is on or we match the
+ * condition. This lets us find RCU issues triggered with tracepoints even
+ * when this tracepoint is off. This code has no purpose other than poking
+ * RCU a bit.
*/
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \
@@ -167,6 +173,11 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
TP_CONDITION(cond),,); \
+ if (IS_ENABLED(CONFIG_LOCKDEP)) { \
+ rcu_read_lock_sched_notrace(); \
+ rcu_dereference_sched(__tracepoint_##name.funcs);\
+ rcu_read_unlock_sched_notrace(); \
+ } \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \