Commit d4c40383 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/tracing/urgent-1' of...

Merge branch 'tip/tracing/urgent-1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
parents 3daeb4da 71e308a2
...@@ -605,7 +605,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ...@@ -605,7 +605,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return; return;
} }
if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
*parent = old; *parent = old;
return; return;
} }
......
...@@ -190,7 +190,7 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) ...@@ -190,7 +190,7 @@ unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
goto out; goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out; goto out;
if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out; goto out;
trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
/* Only trace if the calling function expects to. */ /* Only trace if the calling function expects to. */
......
...@@ -33,6 +33,7 @@ config X86 ...@@ -33,6 +33,7 @@ config X86
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_FP_TEST
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
select HAVE_FTRACE_SYSCALLS select HAVE_FTRACE_SYSCALLS
......
...@@ -1154,6 +1154,7 @@ ENTRY(ftrace_graph_caller) ...@@ -1154,6 +1154,7 @@ ENTRY(ftrace_graph_caller)
pushl %edx pushl %edx
movl 0xc(%esp), %edx movl 0xc(%esp), %edx
lea 0x4(%ebp), %eax lea 0x4(%ebp), %eax
movl (%ebp), %ecx
subl $MCOUNT_INSN_SIZE, %edx subl $MCOUNT_INSN_SIZE, %edx
call prepare_ftrace_return call prepare_ftrace_return
popl %edx popl %edx
...@@ -1168,6 +1169,7 @@ return_to_handler: ...@@ -1168,6 +1169,7 @@ return_to_handler:
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
movl %ebp, %eax
call ftrace_return_to_handler call ftrace_return_to_handler
movl %eax, 0xc(%esp) movl %eax, 0xc(%esp)
popl %edx popl %edx
......
...@@ -135,6 +135,7 @@ ENTRY(ftrace_graph_caller) ...@@ -135,6 +135,7 @@ ENTRY(ftrace_graph_caller)
leaq 8(%rbp), %rdi leaq 8(%rbp), %rdi
movq 0x38(%rsp), %rsi movq 0x38(%rsp), %rsi
movq (%rbp), %rdx
subq $MCOUNT_INSN_SIZE, %rsi subq $MCOUNT_INSN_SIZE, %rsi
call prepare_ftrace_return call prepare_ftrace_return
...@@ -150,6 +151,7 @@ GLOBAL(return_to_handler) ...@@ -150,6 +151,7 @@ GLOBAL(return_to_handler)
/* Save the return values */ /* Save the return values */
movq %rax, (%rsp) movq %rax, (%rsp)
movq %rdx, 8(%rsp) movq %rdx, 8(%rsp)
movq %rbp, %rdi
call ftrace_return_to_handler call ftrace_return_to_handler
......
...@@ -408,7 +408,8 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -408,7 +408,8 @@ int ftrace_disable_ftrace_graph_caller(void)
* Hook the return address and push it in the stack of return addrs * Hook the return address and push it in the stack of return addrs
* in current thread info. * in current thread info.
*/ */
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{ {
unsigned long old; unsigned long old;
int faulted; int faulted;
...@@ -453,7 +454,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ...@@ -453,7 +454,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return; return;
} }
if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) { if (ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer) == -EBUSY) {
*parent = old; *parent = old;
return; return;
} }
......
...@@ -362,6 +362,7 @@ struct ftrace_ret_stack { ...@@ -362,6 +362,7 @@ struct ftrace_ret_stack {
unsigned long func; unsigned long func;
unsigned long long calltime; unsigned long long calltime;
unsigned long long subtime; unsigned long long subtime;
unsigned long fp;
}; };
/* /*
...@@ -372,7 +373,8 @@ struct ftrace_ret_stack { ...@@ -372,7 +373,8 @@ struct ftrace_ret_stack {
extern void return_to_handler(void); extern void return_to_handler(void);
extern int extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
unsigned long frame_pointer);
/* /*
* Sometimes we don't want to trace a function with the function * Sometimes we don't want to trace a function with the function
......
...@@ -18,6 +18,13 @@ config HAVE_FUNCTION_TRACER ...@@ -18,6 +18,13 @@ config HAVE_FUNCTION_TRACER
config HAVE_FUNCTION_GRAPH_TRACER config HAVE_FUNCTION_GRAPH_TRACER
bool bool
config HAVE_FUNCTION_GRAPH_FP_TEST
bool
help
An arch may pass in a unique value (frame pointer) to both the
entering and exiting of a function. On exit, the value is compared
and if it does not match, then it will panic the kernel.
config HAVE_FUNCTION_TRACE_MCOUNT_TEST config HAVE_FUNCTION_TRACE_MCOUNT_TEST
bool bool
help help
...@@ -121,6 +128,7 @@ config FUNCTION_GRAPH_TRACER ...@@ -121,6 +128,7 @@ config FUNCTION_GRAPH_TRACER
bool "Kernel Function Graph Tracer" bool "Kernel Function Graph Tracer"
depends on HAVE_FUNCTION_GRAPH_TRACER depends on HAVE_FUNCTION_GRAPH_TRACER
depends on FUNCTION_TRACER depends on FUNCTION_TRACER
depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
default y default y
help help
Enable the kernel to trace a function at both its return Enable the kernel to trace a function at both its return
......
...@@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = { ...@@ -57,7 +57,8 @@ static struct tracer_flags tracer_flags = {
/* Add a function return address to the trace stack on thread info.*/ /* Add a function return address to the trace stack on thread info.*/
int int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
unsigned long frame_pointer)
{ {
unsigned long long calltime; unsigned long long calltime;
int index; int index;
...@@ -85,6 +86,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) ...@@ -85,6 +86,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
current->ret_stack[index].func = func; current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime; current->ret_stack[index].calltime = calltime;
current->ret_stack[index].subtime = 0; current->ret_stack[index].subtime = 0;
current->ret_stack[index].fp = frame_pointer;
*depth = index; *depth = index;
return 0; return 0;
...@@ -92,7 +94,8 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) ...@@ -92,7 +94,8 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
/* Retrieve a function return address to the trace stack on thread info.*/ /* Retrieve a function return address to the trace stack on thread info.*/
static void static void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
unsigned long frame_pointer)
{ {
int index; int index;
...@@ -106,6 +109,31 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) ...@@ -106,6 +109,31 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
return; return;
} }
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
/*
* The arch may choose to record the frame pointer used
* and check it here to make sure that it is what we expect it
* to be. If gcc does not set the place holder of the return
* address in the frame pointer, and does a copy instead, then
* the function graph trace will fail. This test detects this
* case.
*
* Currently, x86_32 with optimize for size (-Os) makes the latest
* gcc do the above.
*/
if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
ftrace_graph_stop();
WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
" from func %pF return to %lx\n",
current->ret_stack[index].fp,
frame_pointer,
(void *)current->ret_stack[index].func,
current->ret_stack[index].ret);
*ret = (unsigned long)panic;
return;
}
#endif
*ret = current->ret_stack[index].ret; *ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func; trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime; trace->calltime = current->ret_stack[index].calltime;
...@@ -117,12 +145,12 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) ...@@ -117,12 +145,12 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
* Send the trace to the ring-buffer. * Send the trace to the ring-buffer.
* @return the original return address. * @return the original return address.
*/ */
unsigned long ftrace_return_to_handler(void) unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
{ {
struct ftrace_graph_ret trace; struct ftrace_graph_ret trace;
unsigned long ret; unsigned long ret;
ftrace_pop_return_trace(&trace, &ret); ftrace_pop_return_trace(&trace, &ret, frame_pointer);
trace.rettime = trace_clock_local(); trace.rettime = trace_clock_local();
ftrace_graph_return(&trace); ftrace_graph_return(&trace);
barrier(); barrier();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment