diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 09:30:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-06 09:30:52 -0700 |
commit | 4aed2fd8e3181fea7c09ba79cf64e7e3f4413bf9 (patch) | |
tree | 1f69733e5daab4915a76a41de0e4d1dc61e12cfb /include | |
parent | 3a3527b6461b1298cc53ce72f336346739297ac8 (diff) | |
parent | fc9ea5a1e53ee54f681e226d735008e2a6f8f470 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (162 commits)
tracing/kprobes: unregister_trace_probe needs to be called under mutex
perf: expose event__process function
perf events: Fix mmap offset determination
perf, powerpc: fsl_emb: Restore setting perf_sample_data.period
perf, powerpc: Convert the FSL driver to use local64_t
perf tools: Don't keep unreferenced maps when unmaps are detected
perf session: Invalidate last_match when removing threads from rb_tree
perf session: Free the ref_reloc_sym memory at the right place
x86,mmiotrace: Add support for tracing STOS instruction
perf, sched migration: Librarize task states and event headers helpers
perf, sched migration: Librarize the GUI class
perf, sched migration: Make the GUI class client agnostic
perf, sched migration: Make it vertically scrollable
perf, sched migration: Parameterize cpu height and spacing
perf, sched migration: Fix key bindings
perf, sched migration: Ignore unhandled task states
perf, sched migration: Handle ignored migrate out events
perf: New migration tool overview
tracing: Drop cpparg() macro
perf: Use tracepoint_synchronize_unregister() to flush any pending tracepoint call
...
Fix up trivial conflicts in Makefile and drivers/cpufreq/cpufreq.c
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/local64.h | 96 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 4 | ||||
-rw-r--r-- | include/linux/ftrace.h | 5 | ||||
-rw-r--r-- | include/linux/ftrace_event.h | 18 | ||||
-rw-r--r-- | include/linux/kernel.h | 5 | ||||
-rw-r--r-- | include/linux/kmemtrace.h | 25 | ||||
-rw-r--r-- | include/linux/nmi.h | 13 | ||||
-rw-r--r-- | include/linux/perf_event.h | 95 | ||||
-rw-r--r-- | include/linux/sched.h | 24 | ||||
-rw-r--r-- | include/linux/slab_def.h | 3 | ||||
-rw-r--r-- | include/linux/slub_def.h | 3 | ||||
-rw-r--r-- | include/linux/syscalls.h | 2 | ||||
-rw-r--r-- | include/trace/boot.h | 60 | ||||
-rw-r--r-- | include/trace/events/sched.h | 32 | ||||
-rw-r--r-- | include/trace/events/timer.h | 80 | ||||
-rw-r--r-- | include/trace/ftrace.h | 23 | ||||
-rw-r--r-- | include/trace/syscall.h | 1 |
17 files changed, 228 insertions, 261 deletions
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 000000000000..02ac760c1a8b --- /dev/null +++ b/include/asm-generic/local64.h @@ -0,0 +1,96 @@ +#ifndef _ASM_GENERIC_LOCAL64_H +#define _ASM_GENERIC_LOCAL64_H + +#include <linux/percpu.h> +#include <asm/types.h> + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic64_t. Which is + * rather pointless. The whole point behind local64_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local64_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +#if BITS_PER_LONG == 64 + +#include <asm/local.h> + +typedef struct { + local_t a; +} local64_t; + +#define LOCAL64_INIT(i) { LOCAL_INIT(i) } + +#define local64_read(l) local_read(&(l)->a) +#define local64_set(l,i) local_set((&(l)->a),(i)) +#define local64_inc(l) local_inc(&(l)->a) +#define local64_dec(l) local_dec(&(l)->a) +#define local64_add(i,l) local_add((i),(&(l)->a)) +#define local64_sub(i,l) local_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) local_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) local_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#else /* BITS_PER_LONG != 64 */ + +#include <asm/atomic.h> + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct { + atomic64_t a; +} local64_t; + +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local64_read(l) atomic64_read(&(l)->a) +#define local64_set(l,i) atomic64_set((&(l)->a),(i)) +#define local64_inc(l) atomic64_inc(&(l)->a) +#define local64_dec(l) atomic64_dec(&(l)->a) +#define local64_add(i,l) atomic64_add((i),(&(l)->a)) +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) atomic64_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#endif /* BITS_PER_LONG != 64 */ + +#endif /* _ASM_GENERIC_LOCAL64_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 4e7ae6002056..8a92a170fb7d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -156,10 +156,6 @@ CPU_KEEP(exit.data) \ MEM_KEEP(init.data) \ MEM_KEEP(exit.data) \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__start___markers) = .; \ - *(__markers) \ - VMLINUX_SYMBOL(__stop___markers) = .; \ . = ALIGN(32); \ VMLINUX_SYMBOL(__start___tracepoints) = .; \ *(__tracepoints) \ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 41e46330d9be..dcd6a7c3a435 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1,3 +1,8 @@ +/* + * Ftrace header. For implementation details beyond the random comments + * scattered below, see: Documentation/trace/ftrace-design.txt + */ + #ifndef _LINUX_FTRACE_H #define _LINUX_FTRACE_H diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 3167f2df4126..02b8b24f8f51 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -11,8 +11,6 @@ struct trace_array; struct tracer; struct dentry; -DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); - struct trace_print_flags { unsigned long mask; const char *name; @@ -58,6 +56,9 @@ struct trace_iterator { struct ring_buffer_iter *buffer_iter[NR_CPUS]; unsigned long iter_flags; + /* trace_seq for __print_flags() and __print_symbolic() etc. */ + struct trace_seq tmp_seq; + /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; @@ -146,14 +147,19 @@ struct ftrace_event_class { int (*raw_init)(struct ftrace_event_call *); }; +extern int ftrace_event_reg(struct ftrace_event_call *event, + enum trace_reg type); + enum { TRACE_EVENT_FL_ENABLED_BIT, TRACE_EVENT_FL_FILTERED_BIT, + TRACE_EVENT_FL_RECORDED_CMD_BIT, }; enum { - TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), - TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), + TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), + TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), + TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), }; struct ftrace_event_call { @@ -171,6 +177,7 @@ struct ftrace_event_call { * 32 bit flags: * bit 1: enabled * bit 2: filter_active + * bit 3: enabled cmd record * * Changes to flags must hold the event_mutex. * @@ -257,8 +264,7 @@ static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, u64 count, struct pt_regs *regs, void *head) { - perf_tp_event(addr, count, raw_data, size, regs, head); - perf_swevent_put_recursion_context(rctx); + perf_tp_event(addr, count, raw_data, size, regs, head, rctx); } #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5de838b0fc1a..38e462e00594 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -513,9 +513,6 @@ extern void tracing_start(void); extern void tracing_stop(void); extern void ftrace_off_permanent(void); -extern void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); - static inline void __attribute__ ((format (printf, 1, 2))) ____trace_printk_check_format(const char *fmt, ...) { @@ -591,8 +588,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); #else -static inline void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } static inline int trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h deleted file mode 100644 index b616d3930c3b..000000000000 --- a/include/linux/kmemtrace.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (C) 2008 Eduard - Gabriel Munteanu - * - * This file is released under GPL version 2. - */ - -#ifndef _LINUX_KMEMTRACE_H -#define _LINUX_KMEMTRACE_H - -#ifdef __KERNEL__ - -#include <trace/events/kmem.h> - -#ifdef CONFIG_KMEMTRACE -extern void kmemtrace_init(void); -#else -static inline void kmemtrace_init(void) -{ -} -#endif - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_KMEMTRACE_H */ - diff --git a/include/linux/nmi.h b/include/linux/nmi.h index b752e807adde..06aab5eee134 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -20,10 +20,14 @@ extern void touch_nmi_watchdog(void); extern void acpi_nmi_disable(void); extern void acpi_nmi_enable(void); #else +#ifndef CONFIG_HARDLOCKUP_DETECTOR static inline void touch_nmi_watchdog(void) { touch_softlockup_watchdog(); } +#else +extern void touch_nmi_watchdog(void); +#endif static inline void acpi_nmi_disable(void) { } static inline void acpi_nmi_enable(void) { } #endif @@ -47,4 +51,13 @@ static inline bool trigger_all_cpu_backtrace(void) } #endif +#ifdef CONFIG_LOCKUP_DETECTOR +int hw_nmi_is_cpu_stuck(struct pt_regs *); +u64 hw_nmi_get_sample_period(void); +extern int watchdog_enabled; +struct ctl_table; +extern int proc_dowatchdog_enabled(struct ctl_table *, int , + void __user *, size_t *, loff_t *); +#endif + #endif diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5d0266d94985..937495c25073 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -214,8 +214,9 @@ struct perf_event_attr { * See also PERF_RECORD_MISC_EXACT_IP */ precise_ip : 2, /* skid constraint */ + mmap_data : 1, /* non-exec mmap data */ - __reserved_1 : 47; + __reserved_1 : 46; union { __u32 wakeup_events; /* wakeup every n events */ @@ -461,6 +462,7 @@ enum perf_callchain_context { #ifdef CONFIG_PERF_EVENTS # include <asm/perf_event.h> +# include <asm/local64.h> #endif struct perf_guest_info_callbacks { @@ -531,14 +533,16 @@ struct hw_perf_event { struct hrtimer hrtimer; }; #ifdef CONFIG_HAVE_HW_BREAKPOINT - /* breakpoint */ - struct arch_hw_breakpoint info; + struct { /* breakpoint */ + struct arch_hw_breakpoint info; + struct list_head bp_list; + }; #endif }; - atomic64_t prev_count; + local64_t prev_count; u64 sample_period; u64 last_period; - atomic64_t period_left; + local64_t period_left; u64 interrupts; u64 freq_time_stamp; @@ -548,7 +552,10 @@ struct hw_perf_event { struct perf_event; -#define PERF_EVENT_TXN_STARTED 1 +/* + * Common implementation detail of pmu::{start,commit,cancel}_txn + */ +#define PERF_EVENT_TXN 0x1 /** * struct pmu - generic performance monitoring unit @@ -562,14 +569,28 @@ struct pmu { void (*unthrottle) (struct perf_event *event); /* - * group events scheduling is treated as a transaction, - * add group events as a whole and perform one schedulability test. - * If test fails, roll back the whole group + * Group events scheduling is treated as a transaction, add group + * events as a whole and perform one schedulability test. If the test + * fails, roll back the whole group */ + /* + * Start the transaction, after this ->enable() doesn't need + * to do schedulability tests. + */ void (*start_txn) (const struct pmu *pmu); - void (*cancel_txn) (const struct pmu *pmu); + /* + * If ->start_txn() disabled the ->enable() schedulability test + * then ->commit_txn() is required to perform one. On success + * the transaction is closed. On error the transaction is kept + * open until ->cancel_txn() is called. + */ int (*commit_txn) (const struct pmu *pmu); + /* + * Will cancel the transaction, assumes ->disable() is called for + * each successfull ->enable() during the transaction. + */ + void (*cancel_txn) (const struct pmu *pmu); }; /** @@ -584,7 +605,9 @@ enum perf_event_active_state { struct file; -struct perf_mmap_data { +#define PERF_BUFFER_WRITABLE 0x01 + +struct perf_buffer { atomic_t refcount; struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC @@ -650,7 +673,8 @@ struct perf_event { enum perf_event_active_state state; unsigned int attach_state; - atomic64_t count; + local64_t count; + atomic64_t child_count; /* * These are the total time in nanoseconds that the event @@ -709,7 +733,7 @@ struct perf_event { atomic_t mmap_count; int mmap_locked; struct user_struct *mmap_user; - struct perf_mmap_data *data; + struct perf_buffer *buffer; /* poll related */ wait_queue_head_t waitq; @@ -807,7 +831,7 @@ struct perf_cpu_context { struct perf_output_handle { struct perf_event *event; - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned long wakeup; unsigned long size; void *addr; @@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); -extern void -perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); +#ifndef perf_arch_fetch_caller_regs +static inline void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +#endif /* * Take a snapshot of the regs. Skip ip and frame pointer to @@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); * - bp for callchains * - eflags, for future purposes, just in case */ -static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) +static inline void perf_fetch_caller_regs(struct pt_regs *regs) { - unsigned long ip; - memset(regs, 0, sizeof(*regs)); - switch (skip) { - case 1 : - ip = CALLER_ADDR0; - break; - case 2 : - ip = CALLER_ADDR1; - break; - case 3 : - ip = CALLER_ADDR2; - break; - case 4: - ip = CALLER_ADDR3; - break; - /* No need to support further for now */ - default: - ip = 0; - } - - return perf_arch_fetch_caller_regs(regs, ip, skip); + perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } static inline void @@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) struct pt_regs hot_regs; if (!regs) { - perf_fetch_caller_regs(&hot_regs, 1); + perf_fetch_caller_regs(&hot_regs); regs = &hot_regs; } __perf_sw_event(event_id, nr, nmi, regs, addr); } } -extern void __perf_event_mmap(struct vm_area_struct *vma); - -static inline void perf_event_mmap(struct vm_area_struct *vma) -{ - if (vma->vm_flags & VM_EXEC) - __perf_event_mmap(vma); -} - +extern void perf_event_mmap(struct vm_area_struct *vma); extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); @@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void) extern void perf_event_init(void); extern void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, struct pt_regs *regs, - struct hlist_head *head); + struct hlist_head *head, int rctx); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/linux/sched.h b/include/linux/sched.h index 0478888c6899..3992f50de614 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -316,20 +316,16 @@ extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); -#ifdef CONFIG_DETECT_SOFTLOCKUP -extern void softlockup_tick(void); +#ifdef CONFIG_LOCKUP_DETECTOR extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); -extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); +extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; extern int softlockup_thresh; #else -static inline void softlockup_tick(void) -{ -} static inline void touch_softlockup_watchdog(void) { } @@ -2435,18 +2431,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ -#ifdef CONFIG_TRACING -extern void -__trace_special(void *__tr, void *__data, - unsigned long arg1, unsigned long arg2, unsigned long arg3); -#else -static inline void -__trace_special(void *__tr, void *__data, - unsigned long arg1, unsigned long arg2, unsigned long arg3) -{ -} -#endif - extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 1812dac8c496..1acfa73ce2ac 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -14,7 +14,8 @@ #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include <linux/compiler.h> -#include <linux/kmemtrace.h> + +#include <trace/events/kmem.h> #ifndef ARCH_KMALLOC_MINALIGN /* diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4ba59cfc1f75..6447a723ecb1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,9 +10,10 @@ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/kobject.h> -#include <linux/kmemtrace.h> #include <linux/kmemleak.h> +#include <trace/events/kmem.h> + enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 13ebb5413a79..a6bfd1367d2a 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -167,7 +167,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; .enter_event = &event_enter_##sname, \ .exit_event = &event_exit_##sname, \ .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ - .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ }; #define SYSCALL_DEFINE0(sname) \ @@ -182,7 +181,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; .enter_event = &event_enter__##sname, \ .exit_event = &event_exit__##sname, \ .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ - .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ }; \ asmlinkage long sys_##sname(void) #else diff --git a/include/trace/boot.h b/include/trace/boot.h deleted file mode 100644 index 088ea089e31d..000000000000 --- a/include/trace/boot.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _LINUX_TRACE_BOOT_H -#define _LINUX_TRACE_BOOT_H - -#include <linux/module.h> -#include <linux/kallsyms.h> -#include <linux/init.h> - -/* - * Structure which defines the trace of an initcall - * while it is called. - * You don't have to fill the func field since it is - * only used internally by the tracer. - */ -struct boot_trace_call { - pid_t caller; - char func[KSYM_SYMBOL_LEN]; -}; - -/* - * Structure which defines the trace of an initcall - * while it returns. - */ -struct boot_trace_ret { - char func[KSYM_SYMBOL_LEN]; - int result; - unsigned long long duration; /* nsecs */ -}; - -#ifdef CONFIG_BOOT_TRACER -/* Append the traces on the ring-buffer */ -extern void trace_boot_call(struct boot_trace_call *bt, initcall_t fn); -extern void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn); - -/* Tells the tracer that smp_pre_initcall is finished. - * So we can start the tracing - */ -extern void start_boot_trace(void); - -/* Resume the tracing of other necessary events - * such as sched switches - */ -extern void enable_boot_trace(void); - -/* Suspend this tracing. Actually, only sched_switches tracing have - * to be suspended. Initcalls doesn't need it.) - */ -extern void disable_boot_trace(void); -#else -static inline -void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { } - -static inline -void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { } - -static inline void start_boot_trace(void) { } -static inline void enable_boot_trace(void) { } -static inline void disable_boot_trace(void) { } -#endif /* CONFIG_BOOT_TRACER */ - -#endif /* __LINUX_TRACE_BOOT_H */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index b9e1dd6c6208..9208c92aeab5 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -50,31 +50,6 @@ TRACE_EVENT(sched_kthread_stop_ret, ); /* - * Tracepoint for waiting on task to unschedule: - */ -TRACE_EVENT(sched_wait_task, - - TP_PROTO(struct task_struct *p), - - TP_ARGS(p), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, prio ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->prio = p->prio; - ), - - TP_printk("comm=%s pid=%d prio=%d", - __entry->comm, __entry->pid, __entry->prio) -); - -/* * Tracepoint for waking up a task: */ DECLARE_EVENT_CLASS(sched_wakeup_template, @@ -240,6 +215,13 @@ DEFINE_EVENT(sched_process_template, sched_process_exit, TP_ARGS(p)); /* + * Tracepoint for waiting on task to unschedule: + */ +DEFINE_EVENT(sched_process_template, sched_wait_task, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + +/* * Tracepoint for a waiting task: */ TRACE_EVENT(sched_process_wait, diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 9496b965d62a..c624126a9c8a 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -8,11 +8,7 @@ #include <linux/hrtimer.h> #include <linux/timer.h> -/** - * timer_init - called when the timer is initialized - * @timer: pointer to struct timer_list - */ -TRACE_EVENT(timer_init, +DECLARE_EVENT_CLASS(timer_class, TP_PROTO(struct timer_list *timer), @@ -30,6 +26,17 @@ TRACE_EVENT(timer_init, ); /** + * timer_init - called when the timer is initialized + * @timer: pointer to struct timer_list + */ +DEFINE_EVENT(timer_class, timer_init, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** * timer_start - called when the timer is started * @timer: pointer to struct timer_list * @expires: the timers expiry time @@ -94,42 +101,22 @@ TRACE_EVENT(timer_expire_entry, * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might * be invalid. We solely track the pointer. */ -TRACE_EVENT(timer_expire_exit, +DEFINE_EVENT(timer_class, timer_expire_exit, TP_PROTO(struct timer_list *timer), - TP_ARGS(timer), - - TP_STRUCT__entry( - __field(void *, timer ) - ), - - TP_fast_assign( - __entry->timer = timer; - ), - - TP_printk("timer=%p", __entry->timer) + TP_ARGS(timer) ); /** * timer_cancel - called when the timer is canceled * @timer: pointer to struct timer_list */ -TRACE_EVENT(timer_cancel, +DEFINE_EVENT(timer_class, timer_cancel, TP_PROTO(struct timer_list *timer), - TP_ARGS(timer), - - TP_STRUCT__entry( - __field( void *, timer ) - ), - - TP_fast_assign( - __entry->timer = timer; - ), - - TP_printk("timer=%p", __entry->timer) + TP_ARGS(timer) ); /** @@ -224,14 +211,7 @@ TRACE_EVENT(hrtimer_expire_entry, (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) ); -/** - * hrtimer_expire_exit - called immediately after the hrtimer callback returns - * @timer: pointer to struct hrtimer - * - * When used in combination with the hrtimer_expire_entry tracepoint we can - * determine the runtime of the callback function. - */ -TRACE_EVENT(hrtimer_expire_exit, +DECLARE_EVENT_CLASS(hrtimer_class, TP_PROTO(struct hrtimer *hrtimer), @@ -249,24 +229,28 @@ TRACE_EVENT(hrtimer_expire_exit, ); /** - * hrtimer_cancel - called when the hrtimer is canceled - * @hrtimer: pointer to struct hrtimer + * hrtimer_expire_exit - called immediately after the hrtimer callback returns + * @timer: pointer to struct hrtimer + * + * When used in combination with the hrtimer_expire_entry tracepoint we can + * determine the runtime of the callback function. */ -TRACE_EVENT(hrtimer_cancel, +DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit, TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(hrtimer), + TP_ARGS(hrtimer) +); - TP_STRUCT__entry( - __field( void *, hrtimer ) - ), +/** + * hrtimer_cancel - called when the hrtimer is canceled + * @hrtimer: pointer to struct hrtimer + */ +DEFINE_EVENT(hrtimer_class, hrtimer_cancel, - TP_fast_assign( - __entry->hrtimer = hrtimer; - ), + TP_PROTO(struct hrtimer *hrtimer), - TP_printk("hrtimer=%p", __entry->hrtimer) + TP_ARGS(hrtimer) ); /** diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 5a64905d7278..a9377c0083ad 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -75,15 +75,12 @@ #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) -#undef __cpparg -#define __cpparg(arg...) arg - /* Callbacks are meaningless to ftrace. */ #undef TRACE_EVENT_FN #define TRACE_EVENT_FN(name, proto, args, tstruct, \ assign, print, reg, unreg) \ - TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ - __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ + TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ + PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -145,7 +142,7 @@ * struct trace_seq *s = &iter->seq; * struct ftrace_raw_<call> *field; <-- defined in stage 1 * struct trace_entry *entry; - * struct trace_seq *p; + * struct trace_seq *p = &iter->tmp_seq; * int ret; * * entry = iter->ent; @@ -157,12 +154,10 @@ * * field = (typeof(field))entry; * - * p = &get_cpu_var(ftrace_event_seq); * trace_seq_init(p); * ret = trace_seq_printf(s, "%s: ", <call>); * if (ret) * ret = trace_seq_printf(s, <TP_printk> "\n"); - * put_cpu(); * if (!ret) * return TRACE_TYPE_PARTIAL_LINE; * @@ -216,7 +211,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##call *field; \ struct trace_entry *entry; \ - struct trace_seq *p; \ + struct trace_seq *p = &iter->tmp_seq; \ int ret; \ \ event = container_of(trace_event, struct ftrace_event_call, \ @@ -231,12 +226,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ \ field = (typeof(field))entry; \ \ - p = &get_cpu_var(ftrace_event_seq); \ trace_seq_init(p); \ ret = trace_seq_printf(s, "%s: ", event->name); \ if (ret) \ ret = trace_seq_printf(s, print); \ - put_cpu(); \ if (!ret) \ return TRACE_TYPE_PARTIAL_LINE; \ \ @@ -255,7 +248,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##template *field; \ struct trace_entry *entry; \ - struct trace_seq *p; \ + struct trace_seq *p = &iter->tmp_seq; \ int ret; \ \ entry = iter->ent; \ @@ -267,12 +260,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ \ field = (typeof(field))entry; \ \ - p = &get_cpu_var(ftrace_event_seq); \ trace_seq_init(p); \ ret = trace_seq_printf(s, "%s: ", #call); \ if (ret) \ ret = trace_seq_printf(s, print); \ - put_cpu(); \ if (!ret) \ return TRACE_TYPE_PARTIAL_LINE; \ \ @@ -439,6 +430,7 @@ static inline notrace int ftrace_get_offsets_##call( \ * .fields = LIST_HEAD_INIT(event_class_##call.fields), * .raw_init = trace_event_raw_init, * .probe = ftrace_raw_event_##call, + * .reg = ftrace_event_reg, * }; * * static struct ftrace_event_call __used @@ -567,6 +559,7 @@ static struct ftrace_event_class __used event_class_##call = { \ .fields = LIST_HEAD_INIT(event_class_##call.fields),\ .raw_init = trace_event_raw_init, \ .probe = ftrace_raw_event_##call, \ + .reg = ftrace_event_reg, \ _TRACE_PERF_INIT(call) \ }; @@ -705,7 +698,7 @@ perf_trace_##call(void *__data, proto) \ int __data_size; \ int rctx; \ \ - perf_fetch_caller_regs(&__regs, 1); \ + perf_fetch_caller_regs(&__regs); \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 257e08960d7b..31966a4fb8cc 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -26,7 +26,6 @@ struct syscall_metadata { const char **types; const char **args; struct list_head enter_fields; - struct list_head exit_fields; struct ftrace_event_call *enter_event; struct ftrace_event_call *exit_event; |