diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2023-04-21 13:24:18 +0200 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2023-04-21 13:24:18 +0200 |
| commit | 5a4d3b38ed0cd5bbb03eccea6d9949136abc45c3 (patch) | |
| tree | 1e653da8837c52ed1d24126974a0d7dbbdf34e02 /kernel/trace/ring_buffer.c | |
| parent | 9b8e17813aeccc29c2f9f2e6e68997a6eac2d26d (diff) | |
| parent | 6a8f57ae2eb07ab39a6f0ccad60c760743051026 (diff) | |
Merge branch 'v6.3-rc7'
Sync with the urgent patches; in particular:
a53ce18cacb4 ("sched/fair: Sanitize vruntime of entity being migrated")
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c6f47b6cfd5f..76a2d91eecad 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3098,6 +3098,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) if (RB_WARN_ON(cpu_buffer, rb_is_reader_page(cpu_buffer->tail_page))) return; + /* + * No need for a memory barrier here, as the update + * of the tail_page did it for this page. + */ local_set(&cpu_buffer->commit_page->page->commit, rb_page_write(cpu_buffer->commit_page)); rb_inc_page(&cpu_buffer->commit_page); @@ -3107,6 +3111,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) while (rb_commit_index(cpu_buffer) != rb_page_write(cpu_buffer->commit_page)) { + /* Make sure the readers see the content of what is committed. */ + smp_wmb(); local_set(&cpu_buffer->commit_page->page->commit, rb_page_write(cpu_buffer->commit_page)); RB_WARN_ON(cpu_buffer, @@ -4684,7 +4690,12 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) /* * Make sure we see any padding after the write update - * (see rb_reset_tail()) + * (see rb_reset_tail()). + * + * In addition, a writer may be writing on the reader page + * if the page has not been fully filled, so the read barrier + * is also needed to make sure we see the content of what is + * committed by the writer (see rb_set_commit_to_write()). */ smp_rmb(); |
