Commit e309b41d authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

ftrace: remove notrace

now that we have a kbuild method for notrace, no need to pollute the
C code with the annotations.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent b53dde9d
......@@ -53,7 +53,7 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
/* mcount is defined per arch in assembly */
EXPORT_SYMBOL(mcount);
notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op = ftrace_list;
......@@ -79,7 +79,7 @@ void clear_ftrace_function(void)
ftrace_trace_function = ftrace_stub;
}
static int notrace __register_ftrace_function(struct ftrace_ops *ops)
static int __register_ftrace_function(struct ftrace_ops *ops)
{
/* Should never be called by interrupts */
spin_lock(&ftrace_lock);
......@@ -110,7 +110,7 @@ static int notrace __register_ftrace_function(struct ftrace_ops *ops)
return 0;
}
static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
struct ftrace_ops **p;
int ret = 0;
......@@ -197,7 +197,7 @@ static int ftrace_record_suspend;
static struct dyn_ftrace *ftrace_free_records;
static inline int notrace
static inline int
ftrace_ip_in_hash(unsigned long ip, unsigned long key)
{
struct dyn_ftrace *p;
......@@ -214,13 +214,13 @@ ftrace_ip_in_hash(unsigned long ip, unsigned long key)
return found;
}
static inline void notrace
static inline void
ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
{
hlist_add_head(&node->node, &ftrace_hash[key]);
}
static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
static void ftrace_free_rec(struct dyn_ftrace *rec)
{
/* no locking, only called from kstop_machine */
......@@ -229,7 +229,7 @@ static notrace void ftrace_free_rec(struct dyn_ftrace *rec)
rec->flags |= FTRACE_FL_FREE;
}
static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
{
struct dyn_ftrace *rec;
......@@ -259,7 +259,7 @@ static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
return &ftrace_pages->records[ftrace_pages->index++];
}
static void notrace
static void
ftrace_record_ip(unsigned long ip)
{
struct dyn_ftrace *node;
......@@ -329,7 +329,7 @@ ftrace_record_ip(unsigned long ip)
#define FTRACE_ADDR ((long)(ftrace_caller))
#define MCOUNT_ADDR ((long)(mcount))
static void notrace
static void
__ftrace_replace_code(struct dyn_ftrace *rec,
unsigned char *old, unsigned char *new, int enable)
{
......@@ -405,7 +405,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec,
}
}
static void notrace ftrace_replace_code(int enable)
static void ftrace_replace_code(int enable)
{
unsigned char *new = NULL, *old = NULL;
struct dyn_ftrace *rec;
......@@ -430,7 +430,7 @@ static void notrace ftrace_replace_code(int enable)
}
}
static notrace void ftrace_shutdown_replenish(void)
static void ftrace_shutdown_replenish(void)
{
if (ftrace_pages->next)
return;
......@@ -439,7 +439,7 @@ static notrace void ftrace_shutdown_replenish(void)
ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
}
static notrace void
static void
ftrace_code_disable(struct dyn_ftrace *rec)
{
unsigned long ip;
......@@ -458,7 +458,7 @@ ftrace_code_disable(struct dyn_ftrace *rec)
}
}
static int notrace __ftrace_modify_code(void *data)
static int __ftrace_modify_code(void *data)
{
unsigned long addr;
int *command = data;
......@@ -482,14 +482,14 @@ static int notrace __ftrace_modify_code(void *data)
return 0;
}
static void notrace ftrace_run_update_code(int command)
static void ftrace_run_update_code(int command)
{
stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
}
static ftrace_func_t saved_ftrace_func;
static void notrace ftrace_startup(void)
static void ftrace_startup(void)
{
int command = 0;
......@@ -514,7 +514,7 @@ static void notrace ftrace_startup(void)
mutex_unlock(&ftraced_lock);
}
static void notrace ftrace_shutdown(void)
static void ftrace_shutdown(void)
{
int command = 0;
......@@ -539,7 +539,7 @@ static void notrace ftrace_shutdown(void)
mutex_unlock(&ftraced_lock);
}
static void notrace ftrace_startup_sysctl(void)
static void ftrace_startup_sysctl(void)
{
int command = FTRACE_ENABLE_MCOUNT;
......@@ -557,7 +557,7 @@ static void notrace ftrace_startup_sysctl(void)
mutex_unlock(&ftraced_lock);
}
static void notrace ftrace_shutdown_sysctl(void)
static void ftrace_shutdown_sysctl(void)
{
int command = FTRACE_DISABLE_MCOUNT;
......@@ -577,7 +577,7 @@ static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
static int notrace __ftrace_update_code(void *ignore)
static int __ftrace_update_code(void *ignore)
{
struct dyn_ftrace *p;
struct hlist_head head;
......@@ -618,7 +618,7 @@ static int notrace __ftrace_update_code(void *ignore)
return 0;
}
static void notrace ftrace_update_code(void)
static void ftrace_update_code(void)
{
if (unlikely(ftrace_disabled))
return;
......@@ -626,7 +626,7 @@ static void notrace ftrace_update_code(void)
stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
}
static int notrace ftraced(void *ignore)
static int ftraced(void *ignore)
{
unsigned long usecs;
......@@ -733,7 +733,7 @@ struct ftrace_iterator {
unsigned filtered;
};
static void notrace *
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
......@@ -806,7 +806,7 @@ static struct seq_operations show_ftrace_seq_ops = {
.show = t_show,
};
static int notrace
static int
ftrace_avail_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
......@@ -845,7 +845,7 @@ int ftrace_avail_release(struct inode *inode, struct file *file)
return 0;
}
static void notrace ftrace_filter_reset(void)
static void ftrace_filter_reset(void)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;
......@@ -867,7 +867,7 @@ static void notrace ftrace_filter_reset(void)
preempt_enable();
}
static int notrace
static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
......@@ -903,7 +903,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
return ret;
}
static ssize_t notrace
static ssize_t
ftrace_filter_read(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
......@@ -913,7 +913,7 @@ ftrace_filter_read(struct file *file, char __user *ubuf,
return -EPERM;
}
static loff_t notrace
static loff_t
ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
{
loff_t ret;
......@@ -933,7 +933,7 @@ enum {
MATCH_END_ONLY,
};
static void notrace
static void
ftrace_match(unsigned char *buff, int len)
{
char str[KSYM_SYMBOL_LEN];
......@@ -1002,7 +1002,7 @@ ftrace_match(unsigned char *buff, int len)
preempt_enable();
}
static ssize_t notrace
static ssize_t
ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
......@@ -1094,7 +1094,7 @@ ftrace_filter_write(struct file *file, const char __user *ubuf,
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
*/
notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
if (unlikely(ftrace_disabled))
return;
......@@ -1107,7 +1107,7 @@ notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
mutex_unlock(&ftrace_filter_lock);
}
static int notrace
static int
ftrace_filter_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
......@@ -1242,7 +1242,7 @@ static __init int ftrace_init_debugfs(void)
fs_initcall(ftrace_init_debugfs);
static int __init notrace ftrace_dynamic_init(void)
static int __init ftrace_dynamic_init(void)
{
struct task_struct *p;
unsigned long addr;
......@@ -1352,7 +1352,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
return ret;
}
notrace int
int
ftrace_enable_sysctl(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *lenp,
loff_t *ppos)
......
This diff is collapsed.
......@@ -150,7 +150,7 @@ struct trace_iterator {
long idx;
};
void notrace tracing_reset(struct trace_array_cpu *data);
void tracing_reset(struct trace_array_cpu *data);
int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *tracing_init_dentry(void);
void ftrace(struct trace_array *tr,
......@@ -189,10 +189,10 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
extern notrace cycle_t ftrace_now(int cpu);
extern cycle_t ftrace_now(int cpu);
#ifdef CONFIG_SCHED_TRACER
extern void notrace
extern void
wakeup_sched_switch(struct task_struct *prev, struct task_struct *next);
#else
static inline void
......
......@@ -16,7 +16,7 @@
#include "trace.h"
static notrace void function_reset(struct trace_array *tr)
static void function_reset(struct trace_array *tr)
{
int cpu;
......@@ -26,30 +26,30 @@ static notrace void function_reset(struct trace_array *tr)
tracing_reset(tr->data[cpu]);
}
static notrace void start_function_trace(struct trace_array *tr)
static void start_function_trace(struct trace_array *tr)
{
function_reset(tr);
tracing_start_function_trace();
}
static notrace void stop_function_trace(struct trace_array *tr)
static void stop_function_trace(struct trace_array *tr)
{
tracing_stop_function_trace();
}
static notrace void function_trace_init(struct trace_array *tr)
static void function_trace_init(struct trace_array *tr)
{
if (tr->ctrl)
start_function_trace(tr);
}
static notrace void function_trace_reset(struct trace_array *tr)
static void function_trace_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_function_trace(tr);
}
static notrace void function_trace_ctrl_update(struct trace_array *tr)
static void function_trace_ctrl_update(struct trace_array *tr)
{
if (tr->ctrl)
start_function_trace(tr);
......
......@@ -33,7 +33,7 @@ enum {
static int trace_type __read_mostly;
#ifdef CONFIG_PREEMPT_TRACER
static inline int notrace
static inline int
preempt_trace(void)
{
return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
......@@ -43,7 +43,7 @@ preempt_trace(void)
#endif
#ifdef CONFIG_IRQSOFF_TRACER
static inline int notrace
static inline int
irq_trace(void)
{
return ((trace_type & TRACER_IRQS_OFF) &&
......@@ -67,7 +67,7 @@ static __cacheline_aligned_in_smp unsigned long max_sequence;
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
static void notrace
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = irqsoff_trace;
......@@ -109,7 +109,7 @@ static struct ftrace_ops trace_ops __read_mostly =
/*
* Should this new latency be reported/recorded?
*/
static int notrace report_latency(cycle_t delta)
static int report_latency(cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
......@@ -121,7 +121,7 @@ static int notrace report_latency(cycle_t delta)
return 1;
}
static void notrace
static void
check_critical_timing(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long parent_ip,
......@@ -191,7 +191,7 @@ out:
trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
}
static inline void notrace
static inline void
start_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
......@@ -228,7 +228,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
atomic_dec(&data->disabled);
}
static inline void notrace
static inline void
stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
......@@ -261,13 +261,13 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
}
/* start and stop critical timings used to for stoppage (in idle) */
void notrace start_critical_timings(void)
void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
void notrace stop_critical_timings(void)
void stop_critical_timings(void)
{
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
......@@ -275,13 +275,13 @@ void notrace stop_critical_timings(void)
#ifdef CONFIG_IRQSOFF_TRACER
#ifdef CONFIG_PROVE_LOCKING
void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
void time_hardirqs_off(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
......@@ -309,35 +309,35 @@ void trace_softirqs_off(unsigned long ip)
{
}
inline notrace void print_irqtrace_events(struct task_struct *curr)
inline void print_irqtrace_events(struct task_struct *curr)
{
}
/*
* We are only interested in hardirq on/off events:
*/
void notrace trace_hardirqs_on(void)
void trace_hardirqs_on(void)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_on);
void notrace trace_hardirqs_off(void)
void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
void trace_hardirqs_on_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
......@@ -348,12 +348,12 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_IRQSOFF_TRACER */
#ifdef CONFIG_PREEMPT_TRACER
void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
stop_critical_timing(a0, a1);
}
void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
start_critical_timing(a0, a1);
}
......@@ -395,14 +395,14 @@ static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
stop_irqsoff_tracer(tr);
}
static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
static void irqsoff_tracer_open(struct trace_iterator *iter)
{
/* stop the trace while dumping */
if (iter->tr->ctrl)
stop_irqsoff_tracer(iter->tr);
}
static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
static void irqsoff_tracer_close(struct trace_iterator *iter)
{
if (iter->tr->ctrl)
start_irqsoff_tracer(iter->tr);
......
......@@ -17,7 +17,7 @@
static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static void notrace
static void
ctx_switch_func(struct task_struct *prev, struct task_struct *next)
{
struct trace_array *tr = ctx_trace;
......@@ -57,7 +57,7 @@ void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
wakeup_sched_switch(prev, next);
}
static notrace void sched_switch_reset(struct trace_array *tr)
static void sched_switch_reset(struct trace_array *tr)
{
int cpu;
......@@ -67,18 +67,18 @@ static notrace void sched_switch_reset(struct trace_array *tr)
tracing_reset(tr->data[cpu]);
}
static notrace void start_sched_trace(struct trace_array *tr)
static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
tracer_enabled = 1;
}
static notrace void stop_sched_trace(struct trace_array *tr)
static void stop_sched_trace(struct trace_array *tr)
{
tracer_enabled = 0;
}
static notrace void sched_switch_trace_init(struct trace_array *tr)
static void sched_switch_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
......@@ -86,7 +86,7 @@ static notrace void sched_switch_trace_init(struct trace_array *tr)
start_sched_trace(tr);
}
static notrace void sched_switch_trace_reset(struct trace_array *tr)
static void sched_switch_trace_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_sched_trace(tr);
......
......@@ -27,12 +27,12 @@ static unsigned wakeup_prio = -1;
static DEFINE_SPINLOCK(wakeup_lock);
static void notrace __wakeup_reset(struct trace_array *tr);
static void __wakeup_reset(struct trace_array *tr);
/*
* Should this new latency be reported/recorded?
*/
static int notrace report_latency(cycle_t delta)
static int report_latency(cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
......@@ -44,7 +44,7 @@ static int notrace report_latency(cycle_t delta)
return 1;
}
void notrace
void
wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
{
unsigned long latency = 0, t0 = 0, t1 = 0;
......@@ -126,7 +126,7 @@ out:
atomic_dec(&tr->data[cpu]->disabled);
}
static void notrace __wakeup_reset(struct trace_array *tr)
static void __wakeup_reset(struct trace_array *tr)
{
struct trace_array_cpu *data;
int cpu;
......@@ -147,7 +147,7 @@ static void notrace __wakeup_reset(struct trace_array *tr)
wakeup_task = NULL;
}
static void notrace wakeup_reset(struct trace_array *tr)
static void wakeup_reset(struct trace_array *tr)
{
unsigned long flags;
......@@ -156,7 +156,7 @@ static void notrace wakeup_reset(struct trace_array *tr)
spin_unlock_irqrestore(&wakeup_lock, flags);
}
static notrace void
static void
wakeup_check_start(struct trace_array *tr, struct task_struct *p,
struct task_struct *curr)
{
......@@ -201,7 +201,7 @@ out:
atomic_dec(&tr->data[cpu]->disabled);
}
notrace void
void
ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
{
if (likely(!tracer_enabled))
......@@ -210,7 +210,7 @@ ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
wakeup_check_start(wakeup_trace, wakee, curr);
}
notrace void
void
ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr)
{
if (likely(!tracer_enabled))
......@@ -219,7 +219,7 @@ ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr)
wakeup_check_start(wakeup_trace, wakee, curr);
}
static notrace void start_wakeup_tracer(struct trace_array *tr)
static void start_wakeup_tracer(struct trace_array *tr)
{
wakeup_reset(tr);
......@@ -237,12 +237,12 @@ static notrace void start_wakeup_tracer(struct trace_array *tr)
return;
}
static notrace void stop_wakeup_tracer(struct trace_array *tr)
static void stop_wakeup_tracer(struct trace_array *tr)
{
tracer_enabled = 0;
}
static notrace void wakeup_tracer_init(struct trace_array *tr)
static void wakeup_tracer_init(struct trace_array *tr)
{
wakeup_trace = tr;
......@@ -250,7 +250,7 @@ static notrace void wakeup_tracer_init(struct trace_array *tr)
start_wakeup_tracer(tr);
}
static notrace void wakeup_tracer_reset(struct trace_array *tr)
static void wakeup_tracer_reset(struct trace_array *tr)
{
if (tr->ctrl) {
stop_wakeup_tracer(tr);
......@@ -267,14 +267,14 @@ static void wakeup_tracer_ctrl_update(struct trace_array *tr)
stop_wakeup_tracer(tr);
}
static void notrace wakeup_tracer_open(struct trace_iterator *iter)
static void wakeup_tracer_open(struct trace_iterator *iter)
{
/* stop the trace while dumping */
if (iter->tr->ctrl)
stop_wakeup_tracer(iter->tr);
}
static void notrace wakeup_tracer_close(struct trace_iterator *iter)
static void wakeup_tracer_close(struct trace_iterator *iter)
{
/* forget about any processes we were recording */
if (iter->tr->ctrl)
......
......@@ -3,7 +3,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
static notrace inline int trace_valid_entry(struct trace_entry *entry)
static inline int trace_valid_entry(struct trace_entry *entry)
{
switch (entry->type) {
case TRACE_FN:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment