diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2024-11-04 14:39:23 +0100 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2025-03-04 09:43:05 +0100 |
| commit | 954878377bc81459b95937a05f01e8ebf6a05083 (patch) | |
| tree | 9d93b71878ca0caf3ab37ea2fd098c6fdd90f577 /kernel | |
| parent | c5b96789575b670b1e776071bb243e0ed3d3abaa (diff) | |
perf/core: Simplify the perf_mmap() control flow
Identity-transform:
if (c) {
X1;
} else {
Y;
goto l;
}
X2;
l:
into the simpler:
if (c) {
X1;
X2;
} else {
Y;
}
[ mingo: Forward ported it ]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20241104135519.095904637@infradead.org
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 75 |
1 files changed, 36 insertions, 39 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index ab4e497087da..d1b04c850881 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6701,6 +6701,42 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_pgoff == 0) { nr_pages = (vma_size / PAGE_SIZE) - 1; + + /* + * If we have rb pages ensure they're a power-of-two number, so we + * can do bitmasks instead of modulo. + */ + if (nr_pages != 0 && !is_power_of_2(nr_pages)) + return -EINVAL; + + if (vma_size != PAGE_SIZE * (1 + nr_pages)) + return -EINVAL; + + WARN_ON_ONCE(event->ctx->parent_ctx); +again: + mutex_lock(&event->mmap_mutex); + if (event->rb) { + if (data_page_nr(event->rb) != nr_pages) { + ret = -EINVAL; + goto unlock; + } + + if (!atomic_inc_not_zero(&event->rb->mmap_count)) { + /* + * Raced against perf_mmap_close(); remove the + * event and try again. + */ + ring_buffer_attach(event, NULL); + mutex_unlock(&event->mmap_mutex); + goto again; + } + + /* We need the rb to map pages. */ + rb = event->rb; + goto unlock; + } + + user_extra = nr_pages + 1; } else { /* * AUX area mapping: if rb->aux_nr_pages != 0, it's already @@ -6760,47 +6796,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) atomic_set(&rb->aux_mmap_count, 1); user_extra = nr_pages; - - goto accounting; - } - - /* - * If we have rb pages ensure they're a power-of-two number, so we - * can do bitmasks instead of modulo. - */ - if (nr_pages != 0 && !is_power_of_2(nr_pages)) - return -EINVAL; - - if (vma_size != PAGE_SIZE * (1 + nr_pages)) - return -EINVAL; - - WARN_ON_ONCE(event->ctx->parent_ctx); -again: - mutex_lock(&event->mmap_mutex); - if (event->rb) { - if (data_page_nr(event->rb) != nr_pages) { - ret = -EINVAL; - goto unlock; - } - - if (!atomic_inc_not_zero(&event->rb->mmap_count)) { - /* - * Raced against perf_mmap_close(); remove the - * event and try again. - */ - ring_buffer_attach(event, NULL); - mutex_unlock(&event->mmap_mutex); - goto again; - } - - /* We need the rb to map pages. */ - rb = event->rb; - goto unlock; } - user_extra = nr_pages + 1; - -accounting: user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); /* |
