Commit 54852508 authored by Thomas Gleixner's avatar Thomas Gleixner

timer_stats: Convert to atomic_spin_lock

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 3a6e55cb
......@@ -81,12 +81,12 @@ struct entry {
/*
* Spinlock protecting the tables - not taken during lookup:
*/
static DEFINE_SPINLOCK(table_lock);
static DEFINE_ATOMIC_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
*/
static DEFINE_PER_CPU(spinlock_t, lookup_lock);
static DEFINE_PER_CPU(atomic_spinlock_t, lookup_lock);
/*
* Mutex to serialize state changes with show-stats activities:
......@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
prev = NULL;
curr = *head;
spin_lock(&table_lock);
atomic_spin_lock(&table_lock);
/*
* Make sure we have not raced with another CPU:
*/
......@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
*head = curr;
}
out_unlock:
spin_unlock(&table_lock);
atomic_spin_unlock(&table_lock);
return curr;
}
......@@ -238,7 +238,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
/*
* It doesnt matter which lock we take:
*/
spinlock_t *lock;
atomic_spinlock_t *lock;
struct entry *entry, input;
unsigned long flags;
......@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
input.pid = pid;
input.timer_flag = timer_flag;
spin_lock_irqsave(lock, flags);
atomic_spin_lock_irqsave(lock, flags);
if (!timer_stats_active)
goto out_unlock;
......@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
atomic_inc(&overflow_count);
out_unlock:
spin_unlock_irqrestore(lock, flags);
atomic_spin_unlock_irqrestore(lock, flags);
}
static void print_name_offset(struct seq_file *m, unsigned long addr)
......@@ -348,9 +348,9 @@ static void sync_access(void)
int cpu;
for_each_online_cpu(cpu) {
spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
atomic_spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
/* nothing */
spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
atomic_spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
}
}
......@@ -408,7 +408,7 @@ void __init init_timer_stats(void)
int cpu;
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(lookup_lock, cpu));
atomic_spin_lock_init(&per_cpu(lookup_lock, cpu));
}
static int __init init_tstats_procfs(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment