Commit b5fae128 authored by Ingo Molnar's avatar Ingo Molnar

perf sched: Clean up PID sorting logic

Use a sort list for thread atoms insertion as well - instead of
hardcoded for PID.

Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b1ffe8f3
...@@ -144,7 +144,7 @@ struct task_atoms { ...@@ -144,7 +144,7 @@ struct task_atoms {
u64 total_runtime; u64 total_runtime;
}; };
typedef int (*sort_thread_lat)(struct task_atoms *, struct task_atoms *); typedef int (*sort_fn_t)(struct task_atoms *, struct task_atoms *);
static struct rb_root atom_root, sorted_atom_root; static struct rb_root atom_root, sorted_atom_root;
...@@ -869,41 +869,22 @@ static struct trace_sched_handler replay_ops = { ...@@ -869,41 +869,22 @@ static struct trace_sched_handler replay_ops = {
.fork_event = replay_fork_event, .fork_event = replay_fork_event,
}; };
static struct task_atoms *
thread_atoms_search(struct rb_root *root, struct thread *thread)
{
struct rb_node *node = root->rb_node;
while (node) {
struct task_atoms *atoms;
atoms = container_of(node, struct task_atoms, node);
if (thread->pid > atoms->thread->pid)
node = node->rb_left;
else if (thread->pid < atoms->thread->pid)
node = node->rb_right;
else {
return atoms;
}
}
return NULL;
}
struct sort_dimension { struct sort_dimension {
const char *name; const char *name;
sort_thread_lat cmp; sort_fn_t cmp;
struct list_head list; struct list_head list;
}; };
static LIST_HEAD(cmp_pid); static LIST_HEAD(cmp_pid);
static int static int
thread_lat_cmp(struct list_head *list, struct task_atoms *l, thread_lat_cmp(struct list_head *list, struct task_atoms *l, struct task_atoms *r)
struct task_atoms *r)
{ {
struct sort_dimension *sort; struct sort_dimension *sort;
int ret = 0; int ret = 0;
BUG_ON(list_empty(list));
list_for_each_entry(sort, list, list) { list_for_each_entry(sort, list, list) {
ret = sort->cmp(l, r); ret = sort->cmp(l, r);
if (ret) if (ret)
...@@ -913,6 +894,32 @@ thread_lat_cmp(struct list_head *list, struct task_atoms *l, ...@@ -913,6 +894,32 @@ thread_lat_cmp(struct list_head *list, struct task_atoms *l,
return ret; return ret;
} }
static struct task_atoms *
thread_atoms_search(struct rb_root *root, struct thread *thread,
struct list_head *sort_list)
{
struct rb_node *node = root->rb_node;
struct task_atoms key = { .thread = thread };
while (node) {
struct task_atoms *atoms;
int cmp;
atoms = container_of(node, struct task_atoms, node);
cmp = thread_lat_cmp(sort_list, &key, atoms);
if (cmp > 0)
node = node->rb_left;
else if (cmp < 0)
node = node->rb_right;
else {
BUG_ON(thread != atoms->thread);
return atoms;
}
}
return NULL;
}
static void static void
__thread_latency_insert(struct rb_root *root, struct task_atoms *data, __thread_latency_insert(struct rb_root *root, struct task_atoms *data,
struct list_head *sort_list) struct list_head *sort_list)
...@@ -1049,18 +1056,18 @@ latency_switch_event(struct trace_switch_event *switch_event, ...@@ -1049,18 +1056,18 @@ latency_switch_event(struct trace_switch_event *switch_event,
sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
in_atoms = thread_atoms_search(&atom_root, sched_in); in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
if (!in_atoms) { if (!in_atoms) {
thread_atoms_insert(sched_in); thread_atoms_insert(sched_in);
in_atoms = thread_atoms_search(&atom_root, sched_in); in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
if (!in_atoms) if (!in_atoms)
die("in-atom: Internal tree error"); die("in-atom: Internal tree error");
} }
out_atoms = thread_atoms_search(&atom_root, sched_out); out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_atoms) { if (!out_atoms) {
thread_atoms_insert(sched_out); thread_atoms_insert(sched_out);
out_atoms = thread_atoms_search(&atom_root, sched_out); out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_atoms) if (!out_atoms)
die("out-atom: Internal tree error"); die("out-atom: Internal tree error");
} }
...@@ -1085,7 +1092,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, ...@@ -1085,7 +1092,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
return; return;
wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
atoms = thread_atoms_search(&atom_root, wakee); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
if (!atoms) { if (!atoms) {
thread_atoms_insert(wakee); thread_atoms_insert(wakee);
return; return;
...@@ -1136,7 +1143,6 @@ static void output_lat_thread(struct task_atoms *atom_list) ...@@ -1136,7 +1143,6 @@ static void output_lat_thread(struct task_atoms *atom_list)
static int pid_cmp(struct task_atoms *l, struct task_atoms *r) static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
{ {
if (l->thread->pid < r->thread->pid) if (l->thread->pid < r->thread->pid)
return -1; return -1;
if (l->thread->pid > r->thread->pid) if (l->thread->pid > r->thread->pid)
...@@ -1666,8 +1672,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) ...@@ -1666,8 +1672,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, latency_options, latency_usage, 0); argc = parse_options(argc, argv, latency_options, latency_usage, 0);
if (argc) if (argc)
usage_with_options(latency_usage, latency_options); usage_with_options(latency_usage, latency_options);
setup_sorting();
} }
setup_sorting();
__cmd_lat(); __cmd_lat();
} else if (!strncmp(argv[0], "rep", 3)) { } else if (!strncmp(argv[0], "rep", 3)) {
trace_handler = &replay_ops; trace_handler = &replay_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment