diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-09-29 23:02:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 10:38:56 +0200 |
commit | ed56829cb3195de499f97fa6108fe9134319bae6 (patch) | |
tree | e038bfb9898146dead5a20d2a1ffb44f8fb24676 /kernel/trace | |
parent | a7b1374333407f409cf8df7e623b12490f073c84 (diff) |
ring_buffer: reset buffer page when freeing
Mathieu Desnoyers pointed out that the freeing of the page frame needs
to be reset otherwise we might trigger BUG_ON in the page free code.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ring_buffer.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 95ca9338cb6c..cfa711374d9a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -128,6 +128,17 @@ struct buffer_page { }; /* + * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing + * this issue out. + */ +static inline void free_buffer_page(struct buffer_page *bpage) +{ + reset_page_mapcount(&bpage->page); + bpage->page.mapping = NULL; + __free_page(&bpage->page); +} + +/* * We need to fit the time_stamp delta into 27 bits. */ static inline int test_time_stamp(u64 delta) @@ -240,7 +251,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, free_pages: list_for_each_entry_safe(page, tmp, &pages, list) { list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } return -ENOMEM; } @@ -284,7 +295,7 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) list_for_each_entry_safe(page, tmp, head, list) { list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } kfree(cpu_buffer); } @@ -393,7 +404,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) p = cpu_buffer->pages.next; page = list_entry(p, struct buffer_page, list); list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } BUG_ON(list_empty(&cpu_buffer->pages)); @@ -520,7 +531,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) free_pages: list_for_each_entry_safe(page, tmp, &pages, list) { list_del_init(&page->list); - __free_page(&page->page); + free_buffer_page(page); } return -ENOMEM; } |