From 4f41c013f553957765902fb01475972f0af3e8e7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 18:08:32 +0200 Subject: perf/ftrace: Optimize perf/tracepoint interaction for single events When we've got but a single event per tracepoint there is no reason to try and multiplex it so don't. Signed-off-by: Peter Zijlstra Tested-by: Ingo Molnar Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a4fa381db3c2..17ac47f4bce6 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4468,8 +4468,9 @@ static int swevent_hlist_get(struct perf_event *event) #ifdef CONFIG_EVENT_TRACING void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs) + int entry_size, struct pt_regs *regs, void *event) { + const int type = PERF_TYPE_TRACEPOINT; struct perf_sample_data data; struct perf_raw_record raw = { .size = entry_size, @@ -4479,9 +4480,13 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - /* Trace events already protected against recursion */ - do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, - &data, regs); + if (!event) { + do_perf_sw_event(type, event_id, count, 1, &data, regs); + return; + } + + if (perf_swevent_match(event, type, event_id, &data, regs)) + perf_swevent_add(event, count, 1, &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); @@ -4514,7 +4519,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (perf_trace_enable(event->attr.config)) + if (perf_trace_enable(event->attr.config, event)) return NULL; event->destroy = tp_perf_event_destroy; -- cgit v1.2.3 From 00d1d0b095ba4e5c0958cb228b2a9c445d4a339d Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 17 May 2010 12:46:01 +0200 Subject: perf: Fix errors path in perf_output_begin() In case the sampling buffer has no "payload" pages, nr_pages is 0. The problem is that the error path in perf_output_begin() skips to a label which assumes perf_output_lock() has been issued which is not the case. That triggers a WARN_ON() in perf_output_unlock(). This patch fixes the problem by skipping perf_output_unlock() in case data->nr_pages is 0. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <4bf13674.014fd80a.6c82.ffffb20c@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 17ac47f4bce6..8d61d292f719 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3036,7 +3036,7 @@ int perf_output_begin(struct perf_output_handle *handle, handle->sample = sample; if (!data->nr_pages) - goto fail; + goto out; have_lost = atomic_read(&data->lost); if (have_lost) -- cgit v1.2.3 From a19d35c11fd559dd7dfd5a2078df7c9af74a5d88 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 17 May 2010 18:48:00 +0200 Subject: perf: Optimize buffer placement by allocating buffers NUMA aware Ensure cpu bound buffers live on the right NUMA node. Suggested-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: <1274114880.5605.5236.camel@twins> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8d61d292f719..6ae62186dd0c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2320,6 +2320,19 @@ perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) return virt_to_page(data->data_pages[pgoff - 1]); } +static void *perf_mmap_alloc_page(int cpu) +{ + struct page *page; + int node; + + node = (cpu == -1) ? cpu : cpu_to_node(cpu); + page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + return NULL; + + return page_address(page); +} + static struct perf_mmap_data * perf_mmap_data_alloc(struct perf_event *event, int nr_pages) { @@ -2336,12 +2349,12 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) if (!data) goto fail; - data->user_page = (void *)get_zeroed_page(GFP_KERNEL); + data->user_page = perf_mmap_alloc_page(event->cpu); if (!data->user_page) goto fail_user_page; for (i = 0; i < nr_pages; i++) { - data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); + data->data_pages[i] = perf_mmap_alloc_page(event->cpu); if (!data->data_pages[i]) goto fail_data_pages; } -- cgit v1.2.3 From c7920614cebbf269a7c8397ff959a8dcf727465c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 10:33:24 +0200 Subject: perf: Disallow mmap() on per-task inherited events Since we now have working per-task-per-cpu events for a while, disallow mmap() on per-task inherited events. Those things were a performance problem anyway, and doing away with it allows us to optimize the buffer somewhat by assuming there is only a single writer. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 6ae62186dd0c..ff5d430d45a7 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2593,6 +2593,14 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) long user_extra, extra; int ret = 0; + /* + * Don't allow mmap() of inherited per-task counters. This would + * create a performance issue due to all children writing to the + * same buffer. + */ + if (event->cpu == -1 && event->attr.inherit) + return -EINVAL; + if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; -- cgit v1.2.3 From ef60777c9abd999db5eb4e338aae3eb593ae8e10 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 10:50:41 +0200 Subject: perf: Optimize the perf_output() path by removing IRQ-disables Since we can now assume there is only a single writer to each buffer, we can remove per-cpu lock thingy and use a simply nest-count to the same effect. This removes the need to disable IRQs. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 94 ++++++++++++++++------------------------------------- 1 file changed, 28 insertions(+), 66 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ff5d430d45a7..8cf737da3ec4 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2519,8 +2519,6 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) { long max_size = perf_data_size(data); - atomic_set(&data->lock, -1); - if (event->attr.watermark) { data->watermark = min_t(long, max_size, event->attr.wakeup_watermark); @@ -2906,82 +2904,56 @@ static void perf_output_wakeup(struct perf_output_handle *handle) } /* - * Curious locking construct. - * * We need to ensure a later event_id doesn't publish a head when a former - * event_id isn't done writing. However since we need to deal with NMIs we + * event isn't done writing. However since we need to deal with NMIs we * cannot fully serialize things. * - * What we do is serialize between CPUs so we only have to deal with NMI - * nesting on a single CPU. - * * We only publish the head (and generate a wakeup) when the outer-most - * event_id completes. + * event completes. */ -static void perf_output_lock(struct perf_output_handle *handle) +static void perf_output_get_handle(struct perf_output_handle *handle) { struct perf_mmap_data *data = handle->data; - int cur, cpu = get_cpu(); - handle->locked = 0; - - for (;;) { - cur = atomic_cmpxchg(&data->lock, -1, cpu); - if (cur == -1) { - handle->locked = 1; - break; - } - if (cur == cpu) - break; - - cpu_relax(); - } + preempt_disable(); + atomic_inc(&data->nest); } -static void perf_output_unlock(struct perf_output_handle *handle) +static void perf_output_put_handle(struct perf_output_handle *handle) { struct perf_mmap_data *data = handle->data; unsigned long head; - int cpu; - - data->done_head = data->head; - - if (!handle->locked) - goto out; again: - /* - * The xchg implies a full barrier that ensures all writes are done - * before we publish the new head, matched by a rmb() in userspace when - * reading this position. - */ - while ((head = atomic_long_xchg(&data->done_head, 0))) - data->user_page->data_head = head; + head = atomic_long_read(&data->head); /* - * NMI can happen here, which means we can miss a done_head update. + * IRQ/NMI can happen here, which means we can miss a head update. */ - cpu = atomic_xchg(&data->lock, -1); - WARN_ON_ONCE(cpu != smp_processor_id()); + if (!atomic_dec_and_test(&data->nest)) + return; /* - * Therefore we have to validate we did not indeed do so. + * Publish the known good head. Rely on the full barrier implied + * by atomic_dec_and_test() order the data->head read and this + * write. */ - if (unlikely(atomic_long_read(&data->done_head))) { - /* - * Since we had it locked, we can lock it again. - */ - while (atomic_cmpxchg(&data->lock, -1, cpu) != -1) - cpu_relax(); + data->user_page->data_head = head; + /* + * Now check if we missed an update, rely on the (compiler) + * barrier in atomic_dec_and_test() to re-read data->head. + */ + if (unlikely(head != atomic_long_read(&data->head))) { + atomic_inc(&data->nest); goto again; } if (atomic_xchg(&data->wakeup, 0)) perf_output_wakeup(handle); -out: - put_cpu(); + + preempt_enable(); } void perf_output_copy(struct perf_output_handle *handle, @@ -3063,7 +3035,7 @@ int perf_output_begin(struct perf_output_handle *handle, if (have_lost) size += sizeof(lost_event); - perf_output_lock(handle); + perf_output_get_handle(handle); do { /* @@ -3083,7 +3055,7 @@ int perf_output_begin(struct perf_output_handle *handle, handle->head = head; if (head - tail > data->watermark) - atomic_set(&data->wakeup, 1); + atomic_inc(&data->wakeup); if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; @@ -3099,7 +3071,7 @@ int perf_output_begin(struct perf_output_handle *handle, fail: atomic_inc(&data->lost); - perf_output_unlock(handle); + perf_output_put_handle(handle); out: rcu_read_unlock(); @@ -3117,11 +3089,11 @@ void perf_output_end(struct perf_output_handle *handle) int events = atomic_inc_return(&data->events); if (events >= wakeup_events) { atomic_sub(wakeup_events, &data->events); - atomic_set(&data->wakeup, 1); + atomic_inc(&data->wakeup); } } - perf_output_unlock(handle); + perf_output_put_handle(handle); rcu_read_unlock(); } @@ -3457,22 +3429,13 @@ static void perf_event_task_output(struct perf_event *event, { struct perf_output_handle handle; struct task_struct *task = task_event->task; - unsigned long flags; int size, ret; - /* - * If this CPU attempts to acquire an rq lock held by a CPU spinning - * in perf_output_lock() from interrupt context, it's game over. - */ - local_irq_save(flags); - size = task_event->event_id.header.size; ret = perf_output_begin(&handle, event, size, 0, 0); - if (ret) { - local_irq_restore(flags); + if (ret) return; - } task_event->event_id.pid = perf_event_pid(event, task); task_event->event_id.ppid = perf_event_pid(event, current); @@ -3483,7 +3446,6 @@ static void perf_event_task_output(struct perf_event *event, perf_output_put(&handle, task_event->event_id); perf_output_end(&handle); - local_irq_restore(flags); } static int perf_event_task_match(struct perf_event *event) -- cgit v1.2.3 From fa5881514ef9c9bcb29319aad85cf2d8889d91f1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 10:54:20 +0200 Subject: perf: Optimize the hotpath by converting the perf output buffer to local_t Since there is now only a single writer, we can use local_t instead and avoid all these pesky LOCK insn. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 8cf737da3ec4..1f98c78c3343 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2916,7 +2916,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle) struct perf_mmap_data *data = handle->data; preempt_disable(); - atomic_inc(&data->nest); + local_inc(&data->nest); } static void perf_output_put_handle(struct perf_output_handle *handle) @@ -2925,13 +2925,13 @@ static void perf_output_put_handle(struct perf_output_handle *handle) unsigned long head; again: - head = atomic_long_read(&data->head); + head = local_read(&data->head); /* * IRQ/NMI can happen here, which means we can miss a head update. */ - if (!atomic_dec_and_test(&data->nest)) + if (!local_dec_and_test(&data->nest)) return; /* @@ -2945,12 +2945,12 @@ again: * Now check if we missed an update, rely on the (compiler) * barrier in atomic_dec_and_test() to re-read data->head. */ - if (unlikely(head != atomic_long_read(&data->head))) { - atomic_inc(&data->nest); + if (unlikely(head != local_read(&data->head))) { + local_inc(&data->nest); goto again; } - if (atomic_xchg(&data->wakeup, 0)) + if (local_xchg(&data->wakeup, 0)) perf_output_wakeup(handle); preempt_enable(); @@ -3031,7 +3031,7 @@ int perf_output_begin(struct perf_output_handle *handle, if (!data->nr_pages) goto out; - have_lost = atomic_read(&data->lost); + have_lost = local_read(&data->lost); if (have_lost) size += sizeof(lost_event); @@ -3045,24 +3045,24 @@ int perf_output_begin(struct perf_output_handle *handle, */ tail = ACCESS_ONCE(data->user_page->data_tail); smp_rmb(); - offset = head = atomic_long_read(&data->head); + offset = head = local_read(&data->head); head += size; if (unlikely(!perf_output_space(data, tail, offset, head))) goto fail; - } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); + } while (local_cmpxchg(&data->head, offset, head) != offset); handle->offset = offset; handle->head = head; if (head - tail > data->watermark) - atomic_inc(&data->wakeup); + local_inc(&data->wakeup); if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; lost_event.header.size = sizeof(lost_event); lost_event.id = event->id; - lost_event.lost = atomic_xchg(&data->lost, 0); + lost_event.lost = local_xchg(&data->lost, 0); perf_output_put(handle, lost_event); } @@ -3070,7 +3070,7 @@ int perf_output_begin(struct perf_output_handle *handle, return 0; fail: - atomic_inc(&data->lost); + local_inc(&data->lost); perf_output_put_handle(handle); out: rcu_read_unlock(); @@ -3086,10 +3086,10 @@ void perf_output_end(struct perf_output_handle *handle) int wakeup_events = event->attr.wakeup_events; if (handle->sample && wakeup_events) { - int events = atomic_inc_return(&data->events); + int events = local_inc_return(&data->events); if (events >= wakeup_events) { - atomic_sub(wakeup_events, &data->events); - atomic_inc(&data->wakeup); + local_sub(wakeup_events, &data->events); + local_inc(&data->wakeup); } } -- cgit v1.2.3 From 6d1acfd5c6bfd5231c13a8f2858d7f2afbaa1b62 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 18 May 2010 11:12:48 +0200 Subject: perf: Optimize perf_output_*() by avoiding local_xchg() Since the x86 XCHG ins implies LOCK, avoid the use by using a sequence count instead. Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1f98c78c3343..7e3bcf1a29f0 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2917,6 +2917,7 @@ static void perf_output_get_handle(struct perf_output_handle *handle) preempt_disable(); local_inc(&data->nest); + handle->wakeup = local_read(&data->wakeup); } static void perf_output_put_handle(struct perf_output_handle *handle) @@ -2950,7 +2951,7 @@ again: goto again; } - if (local_xchg(&data->wakeup, 0)) + if (handle->wakeup != local_read(&data->wakeup)) perf_output_wakeup(handle); preempt_enable(); -- cgit v1.2.3 From 49f135ed02828a58b2401f149926c2e3c9cb0116 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 20 May 2010 10:17:46 +0200 Subject: perf: Comply with new rcu checks API The software events hlist doesn't fully comply with the new rcu checks api. We need to consider three different sides that access the hlist: - the hlist allocation/release side. This side happens when an events is created or released, accesses to the hlist are serialized under the cpuctx mutex. - the events insertion/removal in the hlist. This side is always serialized against the above one. The hlist is always present during such operations. This side happens when a software event is scheduled in/out. The serialization that ensures the software event is really attached to the context is made under the ctx->lock. - events triggering. This is the read side, it can happen concurrently with any update side. This patch deals with them one by one and anticipates with the separate rcu mem space patches in preparation. This patch fixes various annoying rcu warnings. Reported-by: Paul E. McKenney Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras --- kernel/perf_event.c | 58 ++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 12 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a4fa381db3c2..511677bc1c6a 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4066,19 +4066,46 @@ static inline u64 swevent_hash(u64 type, u32 event_id) return hash_64(val, SWEVENT_HLIST_BITS); } -static struct hlist_head * -find_swevent_head(struct perf_cpu_context *ctx, u64 type, u32 event_id) +static inline struct hlist_head * +__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) { - u64 hash; - struct swevent_hlist *hlist; + u64 hash = swevent_hash(type, event_id); + + return &hlist->heads[hash]; +} - hash = swevent_hash(type, event_id); +/* For the read side: events when they trigger */ +static inline struct hlist_head * +find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id) +{ + struct swevent_hlist *hlist; hlist = rcu_dereference(ctx->swevent_hlist); if (!hlist) return NULL; - return &hlist->heads[hash]; + return __find_swevent_head(hlist, type, event_id); +} + +/* For the event head insertion and removal in the hlist */ +static inline struct hlist_head * +find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event) +{ + struct swevent_hlist *hlist; + u32 event_id = event->attr.config; + u64 type = event->attr.type; + + /* + * Event scheduling is always serialized against hlist allocation + * and release. Which makes the protected version suitable here. + * The context lock guarantees that. + */ + hlist = rcu_dereference_protected(ctx->swevent_hlist, + lockdep_is_held(&event->ctx->lock)); + if (!hlist) + return NULL; + + return __find_swevent_head(hlist, type, event_id); } static void do_perf_sw_event(enum perf_type_id type, u32 event_id, @@ -4095,7 +4122,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, rcu_read_lock(); - head = find_swevent_head(cpuctx, type, event_id); + head = find_swevent_head_rcu(cpuctx, type, event_id); if (!head) goto end; @@ -4178,7 +4205,7 @@ static int perf_swevent_enable(struct perf_event *event) perf_swevent_set_period(event); } - head = find_swevent_head(cpuctx, event->attr.type, event->attr.config); + head = find_swevent_head(cpuctx, event); if (WARN_ON_ONCE(!head)) return -EINVAL; @@ -4366,6 +4393,14 @@ static const struct pmu perf_ops_task_clock = { .read = task_clock_perf_event_read, }; +/* Deref the hlist from the update side */ +static inline struct swevent_hlist * +swevent_hlist_deref(struct perf_cpu_context *cpuctx) +{ + return rcu_dereference_protected(cpuctx->swevent_hlist, + lockdep_is_held(&cpuctx->hlist_mutex)); +} + static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) { struct swevent_hlist *hlist; @@ -4376,12 +4411,11 @@ static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) static void swevent_hlist_release(struct perf_cpu_context *cpuctx) { - struct swevent_hlist *hlist; + struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx); - if (!cpuctx->swevent_hlist) + if (!hlist) return; - hlist = cpuctx->swevent_hlist; rcu_assign_pointer(cpuctx->swevent_hlist, NULL); call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); } @@ -4418,7 +4452,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) mutex_lock(&cpuctx->hlist_mutex); - if (!cpuctx->swevent_hlist && cpu_online(cpu)) { + if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) { struct swevent_hlist *hlist; hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); -- cgit v1.2.3 From acd35a463cb2a8d2b28e094d718cf6e653ad7191 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 20 May 2010 21:28:34 +0200 Subject: perf: Fix forgotten preempt_enable by nested writers A writer that gets a reference to the buffer handle disables preemption. When we put that reference, we check if we are the outer most writer and if not, we simply return and defer the head update to the outer most writer. The problem here is that preemption is only reenabled by the outer most, that produces preemption count imbalance for every nested writer that exit. So just don't forget to always re-enable preemption when we put the buffer reference, whoever we are. Fixes lots of sleeping in atomic warnings, visible with lock events recording. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Paul Mackerras Cc: Stephane Eranian Cc: Robert Richter --- kernel/perf_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2a060be3b07f..45b7aec55458 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2933,7 +2933,7 @@ again: */ if (!local_dec_and_test(&data->nest)) - return; + goto out; /* * Publish the known good head. Rely on the full barrier implied @@ -2954,6 +2954,7 @@ again: if (handle->wakeup != local_read(&data->wakeup)) perf_output_wakeup(handle); + out: preempt_enable(); } -- cgit v1.2.3 From 1c024eca51fdc965290acf342ae16a476c2189d0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 19 May 2010 14:02:22 +0200 Subject: perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track events Avoid the swevent hash-table by using per-tracepoint hlists. Also, avoid conditionals on the fast path by ordering with probe unregister so that we should never get on the callback path without the data being there. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.473188012@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 94 +++++++++++++++++++++++++++-------------------------- 1 file changed, 48 insertions(+), 46 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 45b7aec55458..3f2cc313ee25 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4005,9 +4005,6 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, perf_swevent_overflow(event, 0, nmi, data, regs); } -static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data); - static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { @@ -4037,10 +4034,6 @@ static int perf_swevent_match(struct perf_event *event, if (perf_exclude_event(event, regs)) return 0; - if (event->attr.type == PERF_TYPE_TRACEPOINT && - !perf_tp_event_match(event, data)) - return 0; - return 1; } @@ -4122,7 +4115,7 @@ end: int perf_swevent_get_recursion_context(void) { - struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); + struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); int rctx; if (in_nmi()) @@ -4134,10 +4127,8 @@ int perf_swevent_get_recursion_context(void) else rctx = 0; - if (cpuctx->recursion[rctx]) { - put_cpu_var(perf_cpu_context); + if (cpuctx->recursion[rctx]) return -1; - } cpuctx->recursion[rctx]++; barrier(); @@ -4151,7 +4142,6 @@ void perf_swevent_put_recursion_context(int rctx) struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); barrier(); cpuctx->recursion[rctx]--; - put_cpu_var(perf_cpu_context); } EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); @@ -4162,6 +4152,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi, struct perf_sample_data data; int rctx; + preempt_disable_notrace(); rctx = perf_swevent_get_recursion_context(); if (rctx < 0) return; @@ -4171,6 +4162,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi, do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); perf_swevent_put_recursion_context(rctx); + preempt_enable_notrace(); } static void perf_swevent_read(struct perf_event *event) @@ -4486,11 +4478,43 @@ static int swevent_hlist_get(struct perf_event *event) #ifdef CONFIG_EVENT_TRACING -void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs, void *event) +static const struct pmu perf_ops_tracepoint = { + .enable = perf_trace_enable, + .disable = perf_trace_disable, + .read = perf_swevent_read, + .unthrottle = perf_swevent_unthrottle, +}; + +static int perf_tp_filter_match(struct perf_event *event, + struct perf_sample_data *data) +{ + void *record = data->raw->data; + + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +} + +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + if (perf_exclude_event(event, regs)) + return 0; + + if (!perf_tp_filter_match(event, data)) + return 0; + + return 1; +} + +void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, + struct pt_regs *regs, struct hlist_head *head) { - const int type = PERF_TYPE_TRACEPOINT; struct perf_sample_data data; + struct perf_event *event; + struct hlist_node *node; + struct perf_raw_record raw = { .size = entry_size, .data = record, @@ -4499,30 +4523,18 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - if (!event) { - do_perf_sw_event(type, event_id, count, 1, &data, regs); - return; + rcu_read_lock(); + hlist_for_each_entry_rcu(event, node, head, hlist_entry) { + if (perf_tp_event_match(event, &data, regs)) + perf_swevent_add(event, count, 1, &data, regs); } - - if (perf_swevent_match(event, type, event_id, &data, regs)) - perf_swevent_add(event, count, 1, &data, regs); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(perf_tp_event); -static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data) -{ - void *record = data->raw->data; - - if (likely(!event->filter) || filter_match_preds(event->filter, record)) - return 1; - return 0; -} - static void tp_perf_event_destroy(struct perf_event *event) { - perf_trace_disable(event->attr.config); - swevent_hlist_put(event); + perf_trace_destroy(event); } static const struct pmu *tp_perf_event_init(struct perf_event *event) @@ -4538,17 +4550,13 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (perf_trace_enable(event->attr.config, event)) + err = perf_trace_init(event); + if (err) return NULL; event->destroy = tp_perf_event_destroy; - err = swevent_hlist_get(event); - if (err) { - perf_trace_disable(event->attr.config); - return ERR_PTR(err); - } - return &perf_ops_generic; + return &perf_ops_tracepoint; } static int perf_event_set_filter(struct perf_event *event, void __user *arg) @@ -4576,12 +4584,6 @@ static void perf_event_free_filter(struct perf_event *event) #else -static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data) -{ - return 1; -} - static const struct pmu *tp_perf_event_init(struct perf_event *event) { return NULL; -- cgit v1.2.3 From 0f139300c9057c16b5833a4636b715b104fe0baa Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 14:35:15 +0200 Subject: perf: Ensure that IOC_OUTPUT isn't used to create multi-writer buffers Since we want to ensure buffers only have a single writer, we must avoid creating one with multiple. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.528215873@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 3f2cc313ee25..7a932526946f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4920,6 +4920,13 @@ static int perf_event_set_output(struct perf_event *event, int output_fd) int fput_needed = 0; int ret = -EINVAL; + /* + * Don't allow output of inherited per-task events. This would + * create performance issues due to cross cpu access. + */ + if (event->cpu == -1 && event->attr.inherit) + return -EINVAL; + if (!output_fd) goto set; @@ -4940,6 +4947,18 @@ static int perf_event_set_output(struct perf_event *event, int output_fd) if (event->data) goto out; + /* + * Don't allow cross-cpu buffers + */ + if (output_event->cpu != event->cpu) + goto out; + + /* + * If its not a per-cpu buffer, it must be the same task. + */ + if (output_event->cpu == -1 && output_event->ctx != event->ctx) + goto out; + atomic_long_inc(&output_file->f_count); set: -- cgit v1.2.3 From adb8e118f288dc4c569ac9a89010b81a4745fbf0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 16:21:55 +0200 Subject: perf: Fix wakeup storm for RO mmap()s RO mmap()s don't update the tail pointer, so comparing against it for determining the written data size doesn't really do any good. Keep track of when we last did a wakeup, and compare against that. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.684479310@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 7a932526946f..1531e0b409a5 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -3056,8 +3056,8 @@ int perf_output_begin(struct perf_output_handle *handle, handle->offset = offset; handle->head = head; - if (head - tail > data->watermark) - local_inc(&data->wakeup); + if (head - local_read(&data->wakeup) > data->watermark) + local_add(data->watermark, &data->wakeup); if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; -- cgit v1.2.3 From 5d967a8be636a4f301a8daad642bd1007299d9ec Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 16:46:39 +0200 Subject: perf: Optimize perf_output_copy() Reduce the clutter in perf_output_copy() by keeping an interator in perf_output_handle. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.742809176@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 54 ++++++++++++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 28 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1531e0b409a5..b67549a08626 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2961,39 +2961,30 @@ again: void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { - unsigned int pages_mask; - unsigned long offset; - unsigned int size; - void **pages; - - offset = handle->offset; - pages_mask = handle->data->nr_pages - 1; - pages = handle->data->data_pages; - - do { - unsigned long page_offset; - unsigned long page_size; - int nr; - - nr = (offset >> PAGE_SHIFT) & pages_mask; - page_size = 1UL << (handle->data->data_order + PAGE_SHIFT); - page_offset = offset & (page_size - 1); - size = min_t(unsigned int, page_size - page_offset, len); - - memcpy(pages[nr] + page_offset, buf, size); - - len -= size; - buf += size; - offset += size; - } while (len); - - handle->offset = offset; + handle->offset += len; /* * Check we didn't copy past our reservation window, taking the * possible unsigned int wrap into account. */ - WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); + if (WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0)) + return; + + do { + unsigned long size = min(handle->size, len); + + memcpy(handle->addr, buf, size); + + len -= size; + handle->addr += size; + handle->size -= size; + if (!handle->size) { + handle->page++; + handle->page &= handle->data->nr_pages - 1; + handle->addr = handle->data->data_pages[handle->page]; + handle->size = PAGE_SIZE << handle->data->data_order; + } + } while (len); } int perf_output_begin(struct perf_output_handle *handle, @@ -3059,6 +3050,13 @@ int perf_output_begin(struct perf_output_handle *handle, if (head - local_read(&data->wakeup) > data->watermark) local_add(data->watermark, &data->wakeup); + handle->page = handle->offset >> (PAGE_SHIFT + data->data_order); + handle->page &= data->nr_pages - 1; + handle->size = handle->offset & ((PAGE_SIZE << data->data_order) - 1); + handle->addr = data->data_pages[handle->page]; + handle->addr += handle->size; + handle->size = (PAGE_SIZE << data->data_order) - handle->size; + if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; lost_event.header.misc = 0; -- cgit v1.2.3 From 3cafa9fbb5c1d564b7b8e7224f493effbf04ffee Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 19:07:56 +0200 Subject: perf: Optimize the !vmalloc backed buffer Reduce code and data by using the knowledge that for !PERF_USE_VMALLOC data_order is always 0. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.795019386@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b67549a08626..953ce46d7b2f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2297,11 +2297,6 @@ unlock: rcu_read_unlock(); } -static unsigned long perf_data_size(struct perf_mmap_data *data) -{ - return data->nr_pages << (PAGE_SHIFT + data->data_order); -} - #ifndef CONFIG_PERF_USE_VMALLOC /* @@ -2359,7 +2354,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) goto fail_data_pages; } - data->data_order = 0; data->nr_pages = nr_pages; return data; @@ -2395,6 +2389,11 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) kfree(data); } +static inline int page_order(struct perf_mmap_data *data) +{ + return 0; +} + #else /* @@ -2403,10 +2402,15 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) * Required for architectures that have d-cache aliasing issues. */ +static inline int page_order(struct perf_mmap_data *data) +{ + return data->page_order; +} + static struct page * perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) { - if (pgoff > (1UL << data->data_order)) + if (pgoff > (1UL << page_order(data))) return NULL; return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); @@ -2426,7 +2430,7 @@ static void perf_mmap_data_free_work(struct work_struct *work) int i, nr; data = container_of(work, struct perf_mmap_data, work); - nr = 1 << data->data_order; + nr = 1 << page_order(data); base = data->user_page; for (i = 0; i < nr + 1; i++) @@ -2465,7 +2469,7 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) data->user_page = all_buf; data->data_pages[0] = all_buf + PAGE_SIZE; - data->data_order = ilog2(nr_pages); + data->page_order = ilog2(nr_pages); data->nr_pages = 1; return data; @@ -2479,6 +2483,11 @@ fail: #endif +static unsigned long perf_data_size(struct perf_mmap_data *data) +{ + return data->nr_pages << (PAGE_SHIFT + page_order(data)); +} + static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct perf_event *event = vma->vm_file->private_data; @@ -2979,10 +2988,12 @@ void perf_output_copy(struct perf_output_handle *handle, handle->addr += size; handle->size -= size; if (!handle->size) { + struct perf_mmap_data *data = handle->data; + handle->page++; - handle->page &= handle->data->nr_pages - 1; - handle->addr = handle->data->data_pages[handle->page]; - handle->size = PAGE_SIZE << handle->data->data_order; + handle->page &= data->nr_pages - 1; + handle->addr = data->data_pages[handle->page]; + handle->size = PAGE_SIZE << page_order(data); } } while (len); } @@ -3050,12 +3061,12 @@ int perf_output_begin(struct perf_output_handle *handle, if (head - local_read(&data->wakeup) > data->watermark) local_add(data->watermark, &data->wakeup); - handle->page = handle->offset >> (PAGE_SHIFT + data->data_order); + handle->page = handle->offset >> (PAGE_SHIFT + page_order(data)); handle->page &= data->nr_pages - 1; - handle->size = handle->offset & ((PAGE_SIZE << data->data_order) - 1); + handle->size = handle->offset & ((PAGE_SIZE << page_order(data)) - 1); handle->addr = data->data_pages[handle->page]; handle->addr += handle->size; - handle->size = (PAGE_SIZE << data->data_order) - handle->size; + handle->size = (PAGE_SIZE << page_order(data)) - handle->size; if (have_lost) { lost_event.header.type = PERF_RECORD_LOST; -- cgit v1.2.3 From a94ffaaf55552769af328eaca9260fe6291c66c7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 19:50:07 +0200 Subject: perf: Remove more code from the fastpath Sanity checks cost instructions. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.852926930@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 953ce46d7b2f..d25c864cadbf 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2967,20 +2967,11 @@ again: preempt_enable(); } -void perf_output_copy(struct perf_output_handle *handle, +__always_inline void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { - handle->offset += len; - - /* - * Check we didn't copy past our reservation window, taking the - * possible unsigned int wrap into account. - */ - if (WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0)) - return; - do { - unsigned long size = min(handle->size, len); + unsigned long size = min_t(unsigned long, handle->size, len); memcpy(handle->addr, buf, size); @@ -3055,15 +3046,12 @@ int perf_output_begin(struct perf_output_handle *handle, goto fail; } while (local_cmpxchg(&data->head, offset, head) != offset); - handle->offset = offset; - handle->head = head; - if (head - local_read(&data->wakeup) > data->watermark) local_add(data->watermark, &data->wakeup); - handle->page = handle->offset >> (PAGE_SHIFT + page_order(data)); + handle->page = offset >> (PAGE_SHIFT + page_order(data)); handle->page &= data->nr_pages - 1; - handle->size = handle->offset & ((PAGE_SIZE << page_order(data)) - 1); + handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1); handle->addr = data->data_pages[handle->page]; handle->addr += handle->size; handle->size = (PAGE_SIZE << page_order(data)) - handle->size; -- cgit v1.2.3 From 580d607cd666dfabfc1c7b0fb08c8ac690c7c87f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 May 2010 20:54:31 +0200 Subject: perf: Optimize perf_tp_event_match() Since we know tracepoints come from kernel context, avoid conditionals that try and establish that very fact. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Paul Mackerras Cc: Mike Galbraith Cc: Steven Rostedt LKML-Reference: <20100521090710.904944001@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d25c864cadbf..e099650cd249 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4496,7 +4496,10 @@ static int perf_tp_event_match(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { - if (perf_exclude_event(event, regs)) + /* + * All tracepoints are from kernel-space. + */ + if (event->attr.exclude_kernel) return 0; if (!perf_tp_filter_match(event, data)) -- cgit v1.2.3 From ea635c64e007061f6468ece5cc9cc62d41d4ecf2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 26 May 2010 17:40:29 -0400 Subject: Fix racy use of anon_inode_getfd() in perf_event.c once anon_inode_getfd() is called, you can't expect *anything* about struct file that descriptor points to - another thread might be doing whatever it likes with descriptor table at that point. Cc: stable Signed-off-by: Al Viro --- kernel/perf_event.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) (limited to 'kernel/perf_event.c') diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e099650cd249..bd7ce8ca5bb9 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4999,8 +4999,8 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event_context *ctx; struct file *event_file = NULL; struct file *group_file = NULL; + int event_fd; int fput_needed = 0; - int fput_needed2 = 0; int err; /* for future expandability... */ @@ -5021,12 +5021,18 @@ SYSCALL_DEFINE5(perf_event_open, return -EINVAL; } + event_fd = get_unused_fd_flags(O_RDWR); + if (event_fd < 0) + return event_fd; + /* * Get the target context (task or percpu): */ ctx = find_get_context(pid, cpu); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto err_fd; + } /* * Look up the group leader (we will attach this event to it): @@ -5066,13 +5072,11 @@ SYSCALL_DEFINE5(perf_event_open, if (IS_ERR(event)) goto err_put_context; - err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR); - if (err < 0) - goto err_free_put_context; - - event_file = fget_light(err, &fput_needed2); - if (!event_file) + event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); + if (IS_ERR(event_file)) { + err = PTR_ERR(event_file); goto err_free_put_context; + } if (flags & PERF_FLAG_FD_OUTPUT) { err = perf_event_set_output(event, group_fd); @@ -5093,19 +5097,19 @@ SYSCALL_DEFINE5(perf_event_open, list_add_tail(&event->owner_entry, ¤t->perf_event_list); mutex_unlock(¤t->perf_event_mutex); -err_fput_free_put_context: - fput_light(event_file, fput_needed2); + fput_light(group_file, fput_needed); + fd_install(event_fd, event_file); + return event_fd; +err_fput_free_put_context: + fput(event_file); err_free_put_context: - if (err < 0) - free_event(event); - + free_event(event); err_put_context: - if (err < 0) - put_ctx(ctx); - fput_light(group_file, fput_needed); - + put_ctx(ctx); +err_fd: + put_unused_fd(event_fd); return err; } -- cgit v1.2.3