Commit 0fc7741c authored by Thomas Gleixner's avatar Thomas Gleixner

proportions: Convert to atomic_spinlck

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 87047afa
...@@ -58,7 +58,7 @@ struct prop_local_percpu { ...@@ -58,7 +58,7 @@ struct prop_local_percpu {
*/ */
int shift; int shift;
unsigned long period; unsigned long period;
spinlock_t lock; /* protect the snapshot state */ atomic_spinlock_t lock; /* protect the snapshot state */
}; };
int prop_local_init_percpu(struct prop_local_percpu *pl); int prop_local_init_percpu(struct prop_local_percpu *pl);
...@@ -106,11 +106,11 @@ struct prop_local_single { ...@@ -106,11 +106,11 @@ struct prop_local_single {
*/ */
unsigned long period; unsigned long period;
int shift; int shift;
spinlock_t lock; /* protect the snapshot state */ atomic_spinlock_t lock; /* protect the snapshot state */
}; };
#define INIT_PROP_LOCAL_SINGLE(name) \ #define INIT_PROP_LOCAL_SINGLE(name) \
{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ { .lock = __ATOMIC_SPIN_LOCK_UNLOCKED(name.lock), \
} }
int prop_local_init_single(struct prop_local_single *pl); int prop_local_init_single(struct prop_local_single *pl);
......
...@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift) ...@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
int prop_local_init_percpu(struct prop_local_percpu *pl) int prop_local_init_percpu(struct prop_local_percpu *pl)
{ {
spin_lock_init(&pl->lock); atomic_spin_lock_init(&pl->lock);
pl->shift = 0; pl->shift = 0;
pl->period = 0; pl->period = 0;
return percpu_counter_init(&pl->events, 0); return percpu_counter_init(&pl->events, 0);
...@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) ...@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
if (pl->period == global_period) if (pl->period == global_period)
return; return;
spin_lock_irqsave(&pl->lock, flags); atomic_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift); prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/* /*
...@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl) ...@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
percpu_counter_set(&pl->events, 0); percpu_counter_set(&pl->events, 0);
pl->period = global_period; pl->period = global_period;
spin_unlock_irqrestore(&pl->lock, flags); atomic_spin_unlock_irqrestore(&pl->lock, flags);
} }
/* /*
...@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd, ...@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_descriptor *pd,
int prop_local_init_single(struct prop_local_single *pl) int prop_local_init_single(struct prop_local_single *pl)
{ {
spin_lock_init(&pl->lock); atomic_spin_lock_init(&pl->lock);
pl->shift = 0; pl->shift = 0;
pl->period = 0; pl->period = 0;
pl->events = 0; pl->events = 0;
...@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) ...@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
if (pl->period == global_period) if (pl->period == global_period)
return; return;
spin_lock_irqsave(&pl->lock, flags); atomic_spin_lock_irqsave(&pl->lock, flags);
prop_adjust_shift(&pl->shift, &pl->period, pg->shift); prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
/* /*
* For each missed period, we half the local counter. * For each missed period, we half the local counter.
...@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl) ...@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
else else
pl->events = 0; pl->events = 0;
pl->period = global_period; pl->period = global_period;
spin_unlock_irqrestore(&pl->lock, flags); atomic_spin_unlock_irqrestore(&pl->lock, flags);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment