Commit 744e5326 authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'rt/trace' into rt/head

parents 499027af a5d1c78f
...@@ -239,9 +239,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ ...@@ -239,9 +239,9 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
#undef __print_flags #undef __print_flags
#define __print_flags(flag, delim, flag_array...) \ #define __print_flags(flag, delim, flag_array...) \
({ \ ({ \
static const struct trace_print_flags flags[] = \ static const struct trace_print_flags __flags[] = \
{ flag_array, { -1, NULL }}; \ { flag_array, { -1, NULL }}; \
ftrace_print_flags_seq(p, delim, flag, flags); \ ftrace_print_flags_seq(p, delim, flag, __flags); \
}) })
#undef __print_symbolic #undef __print_symbolic
...@@ -254,7 +254,7 @@ ftrace_format_##call(struct ftrace_event_call *unused, \ ...@@ -254,7 +254,7 @@ ftrace_format_##call(struct ftrace_event_call *unused, \
#undef TRACE_EVENT #undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
enum print_line_t \ static enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
{ \ { \
struct trace_seq *s = &iter->seq; \ struct trace_seq *s = &iter->seq; \
...@@ -317,7 +317,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ ...@@ -317,7 +317,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
#undef TRACE_EVENT #undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
int \ static int \
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \ { \
struct ftrace_raw_##call field; \ struct ftrace_raw_##call field; \
......
...@@ -470,6 +470,18 @@ config FTRACE_STARTUP_TEST ...@@ -470,6 +470,18 @@ config FTRACE_STARTUP_TEST
functioning properly. It will do tests on all the configured functioning properly. It will do tests on all the configured
tracers of ftrace. tracers of ftrace.
config EVENT_TRACE_TEST_SYSCALLS
bool "Run selftest on syscall events"
depends on FTRACE_STARTUP_TEST
help
This option will also enable testing every syscall event.
It only enables the event and disables it and runs various loads
with the event enabled. This adds a bit more time for kernel boot
up since it runs this on every system call defined.
TBD - enable a way to actually call the syscalls as we test their
events
config MMIOTRACE config MMIOTRACE
bool "Memory mapped IO tracing" bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI depends on HAVE_MMIOTRACE_SUPPORT && PCI
......
...@@ -701,7 +701,7 @@ static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -701,7 +701,7 @@ static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
val &= ~RB_FLAG_MASK; val &= ~RB_FLAG_MASK;
ret = (unsigned long)cmpxchg(&list->next, ret = cmpxchg((unsigned long *)&list->next,
val | old_flag, val | new_flag); val | old_flag, val | new_flag);
/* check if the reader took the page */ /* check if the reader took the page */
...@@ -794,7 +794,7 @@ static int rb_head_page_replace(struct buffer_page *old, ...@@ -794,7 +794,7 @@ static int rb_head_page_replace(struct buffer_page *old,
val = *ptr & ~RB_FLAG_MASK; val = *ptr & ~RB_FLAG_MASK;
val |= RB_PAGE_HEAD; val |= RB_PAGE_HEAD;
ret = cmpxchg(ptr, val, &new->list); ret = cmpxchg(ptr, val, (unsigned long)&new->list);
return ret == val; return ret == val;
} }
......
...@@ -78,7 +78,7 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry, ...@@ -78,7 +78,7 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
__field_desc( int, graph_ent, depth ) __field_desc( int, graph_ent, depth )
), ),
F_printk("--> %lx (%d)", __entry->graph_ent.func, __entry->depth) F_printk("--> %lx (%d)", __entry->func, __entry->depth)
); );
/* Function return entry */ /* Function return entry */
...@@ -97,8 +97,8 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, ...@@ -97,8 +97,8 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d", F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d",
__entry->func, __entry->depth, __entry->func, __entry->depth,
__entry->calltime, __entry->rettim, __entry->calltime, __entry->rettime,
__entrty->depth) __entry->depth)
); );
/* /*
...@@ -116,15 +116,6 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, ...@@ -116,15 +116,6 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
__field( unsigned char, next_state ) \ __field( unsigned char, next_state ) \
__field( unsigned int, next_cpu ) __field( unsigned int, next_cpu )
#if 0
FTRACE_ENTRY_STRUCT_ONLY(ctx_switch_entry,
F_STRUCT(
FTRACE_CTX_FIELDS
)
);
#endif
FTRACE_ENTRY(context_switch, ctx_switch_entry, FTRACE_ENTRY(context_switch, ctx_switch_entry,
TRACE_CTX, TRACE_CTX,
...@@ -133,7 +124,7 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry, ...@@ -133,7 +124,7 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry,
FTRACE_CTX_FIELDS FTRACE_CTX_FIELDS
), ),
F_printk(b"%u:%u:%u ==> %u:%u:%u [%03u]", F_printk("%u:%u:%u ==> %u:%u:%u [%03u]",
__entry->prev_pid, __entry->prev_prio, __entry->prev_state, __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
__entry->next_pid, __entry->next_prio, __entry->next_state, __entry->next_pid, __entry->next_prio, __entry->next_state,
__entry->next_cpu __entry->next_cpu
...@@ -257,8 +248,8 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw, ...@@ -257,8 +248,8 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
__field_desc( unsigned char, rw, width ) __field_desc( unsigned char, rw, width )
), ),
F_printk("%lx %lx %lx %d %lx %lx", F_printk("%lx %lx %lx %d %x %x",
__entry->phs, __entry->value, __entry->pc, (unsigned long)__entry->phys, __entry->value, __entry->pc,
__entry->map_id, __entry->opcode, __entry->width) __entry->map_id, __entry->opcode, __entry->width)
); );
...@@ -275,8 +266,8 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, ...@@ -275,8 +266,8 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
__field_desc( unsigned char, map, opcode ) __field_desc( unsigned char, map, opcode )
), ),
F_printk("%lx %lx %lx %d %lx", F_printk("%lx %lx %lx %d %x",
__entry->phs, __entry->virt, __entry->len, (unsigned long)__entry->phys, __entry->virt, __entry->len,
__entry->map_id, __entry->opcode) __entry->map_id, __entry->opcode)
); );
...@@ -370,7 +361,7 @@ FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, ...@@ -370,7 +361,7 @@ FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
__field( int, node ) __field( int, node )
), ),
F_printk("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu" F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi"
" flags:%x node:%d", " flags:%x node:%d",
__entry->type_id, __entry->call_site, __entry->ptr, __entry->type_id, __entry->call_site, __entry->ptr,
__entry->bytes_req, __entry->bytes_alloc, __entry->bytes_req, __entry->bytes_alloc,
......
...@@ -1154,7 +1154,7 @@ static int trace_module_notify(struct notifier_block *self, ...@@ -1154,7 +1154,7 @@ static int trace_module_notify(struct notifier_block *self,
} }
#endif /* CONFIG_MODULES */ #endif /* CONFIG_MODULES */
struct notifier_block trace_module_nb = { static struct notifier_block trace_module_nb = {
.notifier_call = trace_module_notify, .notifier_call = trace_module_notify,
.priority = 0, .priority = 0,
}; };
...@@ -1326,6 +1326,18 @@ static __init void event_trace_self_tests(void) ...@@ -1326,6 +1326,18 @@ static __init void event_trace_self_tests(void)
if (!call->regfunc) if (!call->regfunc)
continue; continue;
/*
* Testing syscall events here is pretty useless, but
* we still do it if configured. But this is time consuming.
* What we really need is a user thread to perform the
* syscalls as we test.
*/
#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
if (call->system &&
strcmp(call->system, "syscalls") == 0)
continue;
#endif
pr_info("Testing event %s: ", call->name); pr_info("Testing event %s: ", call->name);
/* /*
......
...@@ -22,6 +22,47 @@ ...@@ -22,6 +22,47 @@
#undef __field_struct #undef __field_struct
#define __field_struct(type, item) #define __field_struct(type, item)
#undef __field
#define __field(type, item) type item;
#undef __field_desc
#define __field_desc(type, container, item) type item;
#undef __array
#define __array(type, item, size) type item[size];
#undef __array_desc
#define __array_desc(type, container, item, size) type item[size];
#undef __dynamic_array
#define __dynamic_array(type, item) type item[];
#undef F_STRUCT
#define F_STRUCT(args...) args
#undef F_printk
#define F_printk(fmt, args...) fmt, args
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
struct ____ftrace_##name { \
tstruct \
}; \
static void __used ____ftrace_check_##name(void) \
{ \
struct ____ftrace_##name *__entry = NULL; \
\
/* force cmpile-time check on F_printk() */ \
printk(print); \
}
#undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \
FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h"
#undef __field #undef __field
#define __field(type, item) \ #define __field(type, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
...@@ -88,10 +129,6 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ ...@@ -88,10 +129,6 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
return ret; \ return ret; \
} }
#undef FTRACE_ENTRY_DUP
#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \
FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
#include "trace_entries.h" #include "trace_entries.h"
...@@ -172,32 +209,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ ...@@ -172,32 +209,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
#undef __dynamic_array #undef __dynamic_array
#define __dynamic_array(type, item) #define __dynamic_array(type, item)
#undef TRACE_ZERO_CHAR
#define TRACE_ZERO_CHAR(arg)
#undef TRACE_FIELD
#define TRACE_FIELD(type, item, assign)\
entry->item = assign;
#undef TRACE_FIELD
#define TRACE_FIELD(type, item, assign)\
entry->item = assign;
#undef TRACE_FIELD_SIGN
#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \
TRACE_FIELD(type, item, assign)
#undef TP_CMD
#define TP_CMD(cmd...) cmd
#undef TRACE_ENTRY
#define TRACE_ENTRY entry
#undef TRACE_FIELD_SPECIAL
#define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \
cmd;
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ #define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
static int ftrace_raw_init_event_##call(void); \ static int ftrace_raw_init_event_##call(void); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment