summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-11-22 05:26:55 +0100
committerIngo Molnar <mingo@elte.hu>2009-11-22 09:03:42 +0100
commitce71b9df8893ec954e56c5979df6da274f20f65e (patch)
tree76e8a5e33393c2f4fca4083628fc142dcbb55250 /include/trace
parente25613683bd5c46d3e8c8ae6416dccc9f357dcdc (diff)
downloadlinux-3.10-ce71b9df8893ec954e56c5979df6da274f20f65e.tar.gz
linux-3.10-ce71b9df8893ec954e56c5979df6da274f20f65e.tar.bz2
linux-3.10-ce71b9df8893ec954e56c5979df6da274f20f65e.zip
tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h23
1 files changed, 12 insertions, 11 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 4945d1c9986..c222ef5238b 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -724,16 +724,19 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
static void ftrace_profile_##call(proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+ extern int perf_swevent_get_recursion_context(int **recursion); \
+ extern void perf_swevent_put_recursion_context(int *recursion); \
struct ftrace_event_call *event_call = &event_##call; \
extern void perf_tp_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
- struct perf_trace_buf *trace_buf; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
struct trace_entry *ent; \
int __entry_size; \
int __data_size; \
+ char *trace_buf; \
char *raw_data; \
+ int *recursion; \
int __cpu; \
int pc; \
\
@@ -749,6 +752,10 @@ static void ftrace_profile_##call(proto) \
return; \
\
local_irq_save(irq_flags); \
+ \
+ if (perf_swevent_get_recursion_context(&recursion)) \
+ goto end_recursion; \
+ \
__cpu = smp_processor_id(); \
\
if (in_nmi()) \
@@ -759,13 +766,7 @@ static void ftrace_profile_##call(proto) \
if (!trace_buf) \
goto end; \
\
- trace_buf = per_cpu_ptr(trace_buf, __cpu); \
- if (trace_buf->recursion++) \
- goto end_recursion; \
- \
- barrier(); \
- \
- raw_data = trace_buf->buf; \
+ raw_data = per_cpu_ptr(trace_buf, __cpu); \
\
*(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
entry = (struct ftrace_raw_##call *)raw_data; \
@@ -780,9 +781,9 @@ static void ftrace_profile_##call(proto) \
perf_tp_event(event_call->id, __addr, __count, entry, \
__entry_size); \
\
-end_recursion: \
- trace_buf->recursion--; \
-end: \
+end: \
+ perf_swevent_put_recursion_context(recursion); \
+end_recursion: \
local_irq_restore(irq_flags); \
\
}