summaryrefslogtreecommitdiff
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-04-08 13:36:36 +0200
committerIngo Molnar <mingo@elte.hu>2010-04-08 13:37:18 +0200
commitca7e0c612005937a4a5a75d3fed90459993de65c (patch)
treeb574fc0f0189b52ffc87ba20c418228db556faa1 /kernel/trace/ring_buffer.c
parent8141d0050d76e5695011b5ab577ec66fb51a998c (diff)
parentf5284e7635787224dda1a2bf82a4c56b1c75671f (diff)
Merge branch 'linus' into perf/core
Semantic conflict: arch/x86/kernel/cpu/perf_event_intel_ds.c Merge reason: pick up latest fixes, fix the conflict Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d1187ef20caf..41ca394feb22 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/list.h>
@@ -1209,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
- return;
+ goto out;
p = cpu_buffer->pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
- return;
+ goto out;
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
+out:
spin_unlock_irq(&cpu_buffer->reader_lock);
}
@@ -1237,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
- return;
+ goto out;
p = pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
@@ -1246,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
+out:
spin_unlock_irq(&cpu_buffer->reader_lock);
}