Commit cf3271a7 authored by Soeren Sandmann's avatar Soeren Sandmann Committed by Thomas Gleixner

ftrace/sysprof: don't trace the user stack if we are a kernel thread.

Check that current->mm is non-NULL before attempting to trace the user
stack.

Also take depth of the kernel stack into account when comparing
against sample_max_depth.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 8a9e94c1
...@@ -95,13 +95,12 @@ const static struct stacktrace_ops backtrace_ops = { ...@@ -95,13 +95,12 @@ const static struct stacktrace_ops backtrace_ops = {
.address = backtrace_address, .address = backtrace_address,
}; };
static struct pt_regs * static int
trace_kernel(struct pt_regs *regs, struct trace_array *tr, trace_kernel(struct pt_regs *regs, struct trace_array *tr,
struct trace_array_cpu *data) struct trace_array_cpu *data)
{ {
struct backtrace_info info; struct backtrace_info info;
unsigned long bp; unsigned long bp;
char *user_stack;
char *stack; char *stack;
info.tr = tr; info.tr = tr;
...@@ -119,10 +118,7 @@ trace_kernel(struct pt_regs *regs, struct trace_array *tr, ...@@ -119,10 +118,7 @@ trace_kernel(struct pt_regs *regs, struct trace_array *tr,
dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
/* Now trace the user stack */ return info.pos;
user_stack = ((char *)current->thread.sp0 - sizeof(struct pt_regs));
return (struct pt_regs *)user_stack;
} }
static void timer_notify(struct pt_regs *regs, int cpu) static void timer_notify(struct pt_regs *regs, int cpu)
...@@ -150,32 +146,44 @@ static void timer_notify(struct pt_regs *regs, int cpu) ...@@ -150,32 +146,44 @@ static void timer_notify(struct pt_regs *regs, int cpu)
__trace_special(tr, data, 0, 0, current->pid); __trace_special(tr, data, 0, 0, current->pid);
if (!is_user) if (!is_user)
regs = trace_kernel(regs, tr, data); i = trace_kernel(regs, tr, data);
else
i = 0;
fp = (void __user *)regs->bp; /*
* Trace user stack if we are not a kernel thread
*/
if (current->mm && i < sample_max_depth) {
regs = (struct pt_regs *)current->thread.sp0 - 1;
__trace_special(tr, data, 2, regs->ip, 0); fp = (void __user *)regs->bp;
for (i = 0; i < sample_max_depth; i++) { __trace_special(tr, data, 2, regs->ip, 0);
frame.next_fp = 0;
frame.return_address = 0;
if (!copy_stack_frame(fp, &frame))
break;
if ((unsigned long)fp < regs->sp)
break;
__trace_special(tr, data, 2, frame.return_address, while (i < sample_max_depth) {
(unsigned long)fp); frame.next_fp = 0;
fp = frame.next_fp; frame.return_address = 0;
} if (!copy_stack_frame(fp, &frame))
break;
if ((unsigned long)fp < regs->sp)
break;
__trace_special(tr, data, 3, current->pid, i); __trace_special(tr, data, 2, frame.return_address,
(unsigned long)fp);
fp = frame.next_fp;
i++;
}
}
/* /*
* Special trace entry if we overflow the max depth: * Special trace entry if we overflow the max depth:
*/ */
if (i == sample_max_depth) if (i == sample_max_depth)
__trace_special(tr, data, -1, -1, -1); __trace_special(tr, data, -1, -1, -1);
__trace_special(tr, data, 3, current->pid, i);
} }
static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment