diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-17 14:58:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-17 14:58:01 -0800 |
commit | 2dcd9c71c1ffa9a036e09047f60e08383bb0abb6 (patch) | |
tree | 8caabaf493288a3d5adb46ac49ad5097d0c907c7 /include/linux | |
parent | b1c2a344cc19b40d2f1e7cbd9c2f4f205ae6d650 (diff) | |
parent | a96a5037ed0f52e2d86739f4a1ef985bd036e575 (diff) |
Merge tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from
- allow module init functions to be traced
- clean up some unused or not used by config events (saves space)
- clean up of trace histogram code
- add support for preempt and interrupt enabled/disable events
- other various clean ups
* tag 'trace-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (30 commits)
tracing, thermal: Hide cpu cooling trace events when not in use
tracing, thermal: Hide devfreq trace events when not in use
ftrace: Kill FTRACE_OPS_FL_PER_CPU
perf/ftrace: Small cleanup
perf/ftrace: Fix function trace events
perf/ftrace: Revert ("perf/ftrace: Fix double traces of perf on ftrace:function")
tracing, dma-buf: Remove unused trace event dma_fence_annotate_wait_on
tracing, memcg, vmscan: Hide trace events when not in use
tracing/xen: Hide events that are not used when X86_PAE is not defined
tracing: mark trace_test_buffer as __maybe_unused
printk: Remove superfluous memory barriers from printk_safe
ftrace: Clear hashes of stale ips of init memory
tracing: Add support for preempt and irq enable/disable events
tracing: Prepare to add preempt and irq trace events
ftrace/kallsyms: Have /proc/kallsyms show saved mod init functions
ftrace: Add freeing algorithm to free ftrace_mod_maps
ftrace: Save module init functions kallsyms symbols for tracing
ftrace: Allow module init functions to be traced
ftrace: Add a ftrace_free_mem() function for modules to use
tracing: Reimplement log2
...
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/ftrace.h | 113 | ||||
-rw-r--r-- | include/linux/init.h | 4 | ||||
-rw-r--r-- | include/linux/perf_event.h | 2 | ||||
-rw-r--r-- | include/linux/trace_events.h | 9 |
4 files changed, 52 insertions, 76 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e54d257983f2..2bab81951ced 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -52,6 +52,30 @@ static inline void early_trace_init(void) { } struct module; struct ftrace_hash; +#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ + defined(CONFIG_DYNAMIC_FTRACE) +const char * +ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym); +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported); +#else +static inline const char * +ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, + unsigned long *off, char **modname, char *sym) +{ + return NULL; +} +static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported) +{ + return -1; +} +#endif + + #ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; @@ -79,10 +103,6 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically * allocated ftrace_ops which need special care - * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops - * could be controlled by following calls: - * ftrace_function_local_enable - * ftrace_function_local_disable * SAVE_REGS - The ftrace_ops wants regs saved at each function called * and passed to the callback. If this flag is set, but the * architecture does not support passing regs @@ -126,21 +146,20 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); enum { FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_DYNAMIC = 1 << 1, - FTRACE_OPS_FL_PER_CPU = 1 << 2, - FTRACE_OPS_FL_SAVE_REGS = 1 << 3, - FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, - FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, - FTRACE_OPS_FL_STUB = 1 << 6, - FTRACE_OPS_FL_INITIALIZED = 1 << 7, - FTRACE_OPS_FL_DELETED = 1 << 8, - FTRACE_OPS_FL_ADDING = 1 << 9, - FTRACE_OPS_FL_REMOVING = 1 << 10, - FTRACE_OPS_FL_MODIFYING = 1 << 11, - FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, - FTRACE_OPS_FL_IPMODIFY = 1 << 13, - FTRACE_OPS_FL_PID = 1 << 14, - FTRACE_OPS_FL_RCU = 1 << 15, - FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16, + FTRACE_OPS_FL_SAVE_REGS = 1 << 2, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3, + FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4, + FTRACE_OPS_FL_STUB = 1 << 5, + FTRACE_OPS_FL_INITIALIZED = 1 << 6, + FTRACE_OPS_FL_DELETED = 1 << 7, + FTRACE_OPS_FL_ADDING = 1 << 8, + FTRACE_OPS_FL_REMOVING = 1 << 9, + FTRACE_OPS_FL_MODIFYING = 1 << 10, + FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11, + FTRACE_OPS_FL_IPMODIFY = 1 << 12, + FTRACE_OPS_FL_PID = 1 << 13, + FTRACE_OPS_FL_RCU = 1 << 14, + FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -152,8 +171,10 @@ struct ftrace_ops_hash { }; void ftrace_free_init_mem(void); +void ftrace_free_mem(struct module *mod, void *start, void *end); #else static inline void ftrace_free_init_mem(void) { } +static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif /* @@ -173,7 +194,6 @@ struct ftrace_ops { unsigned long flags; void *private; ftrace_func_t saved_func; - int __percpu *disabled; #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_ops_hash local_hash; struct ftrace_ops_hash *func_hash; @@ -205,55 +225,6 @@ int register_ftrace_function(struct ftrace_ops *ops); int unregister_ftrace_function(struct ftrace_ops *ops); void clear_ftrace_function(void); -/** - * ftrace_function_local_enable - enable ftrace_ops on current cpu - * - * This function enables tracing on current cpu by decreasing - * the per cpu control variable. - * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption - * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. - */ -static inline void ftrace_function_local_enable(struct ftrace_ops *ops) -{ - if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) - return; - - (*this_cpu_ptr(ops->disabled))--; -} - -/** - * ftrace_function_local_disable - disable ftrace_ops on current cpu - * - * This function disables tracing on current cpu by increasing - * the per cpu control variable. - * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption - * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. - */ -static inline void ftrace_function_local_disable(struct ftrace_ops *ops) -{ - if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) - return; - - (*this_cpu_ptr(ops->disabled))++; -} - -/** - * ftrace_function_local_disabled - returns ftrace_ops disabled value - * on current cpu - * - * This function returns value of ftrace_ops::disabled on current cpu. - * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption - * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. - */ -static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) -{ - WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); - return *this_cpu_ptr(ops->disabled); -} - extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op, struct pt_regs *regs); @@ -271,6 +242,7 @@ static inline int ftrace_nr_registered_ops(void) static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } static inline void ftrace_free_init_mem(void) { } +static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER @@ -743,7 +715,8 @@ static inline unsigned long get_lock_parent_ip(void) static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } #endif -#ifdef CONFIG_PREEMPT_TRACER +#if defined(CONFIG_PREEMPT_TRACER) || \ + (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) extern void trace_preempt_on(unsigned long a0, unsigned long a1); extern void trace_preempt_off(unsigned long a0, unsigned long a1); #else diff --git a/include/linux/init.h b/include/linux/init.h index f38b993edacb..ea1b31101d9e 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -40,7 +40,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold __inittrace __latent_entropy +#define __init __section(.init.text) __cold __latent_entropy #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) @@ -69,10 +69,8 @@ #ifdef MODULE #define __exitused -#define __inittrace notrace #else #define __exitused __used -#define __inittrace #endif #define __exit __section(.exit.text) __exitused __cold notrace diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 874b71a70058..2c9c87d8a0c1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1169,7 +1169,7 @@ extern void perf_event_init(void); extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, - struct task_struct *task, struct perf_event *event); + struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 84014ecfa67f..af44e7c2d577 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -174,6 +174,11 @@ enum trace_reg { TRACE_REG_PERF_UNREGISTER, TRACE_REG_PERF_OPEN, TRACE_REG_PERF_CLOSE, + /* + * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a + * custom action was taken and the default action is not to be + * performed. + */ TRACE_REG_PERF_ADD, TRACE_REG_PERF_DEL, #endif @@ -542,9 +547,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, u64 count, struct pt_regs *regs, void *head, - struct task_struct *task, struct perf_event *event) + struct task_struct *task) { - perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event); + perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); } #endif |