Commit be71018d authored by Thomas Gleixner's avatar Thomas Gleixner

sched: convert rt_runtime_lock to atomic_spin_lock

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent a3f22fd7
...@@ -166,7 +166,7 @@ struct rt_prio_array { ...@@ -166,7 +166,7 @@ struct rt_prio_array {
struct rt_bandwidth { struct rt_bandwidth {
/* nests inside the rq lock: */ /* nests inside the rq lock: */
spinlock_t rt_runtime_lock; atomic_spinlock_t rt_runtime_lock;
ktime_t rt_period; ktime_t rt_period;
u64 rt_runtime; u64 rt_runtime;
struct hrtimer rt_period_timer; struct hrtimer rt_period_timer;
...@@ -203,7 +203,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) ...@@ -203,7 +203,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
rt_b->rt_period = ns_to_ktime(period); rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime; rt_b->rt_runtime = runtime;
spin_lock_init(&rt_b->rt_runtime_lock); atomic_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer, hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL); CLOCK_MONOTONIC, HRTIMER_MODE_REL);
...@@ -225,7 +225,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) ...@@ -225,7 +225,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
if (hrtimer_active(&rt_b->rt_period_timer)) if (hrtimer_active(&rt_b->rt_period_timer))
return; return;
spin_lock(&rt_b->rt_runtime_lock); atomic_spin_lock(&rt_b->rt_runtime_lock);
for (;;) { for (;;) {
unsigned long delta; unsigned long delta;
ktime_t soft, hard; ktime_t soft, hard;
...@@ -242,7 +242,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) ...@@ -242,7 +242,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0); HRTIMER_MODE_ABS_PINNED, 0);
} }
spin_unlock(&rt_b->rt_runtime_lock); atomic_spin_unlock(&rt_b->rt_runtime_lock);
} }
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
...@@ -501,7 +501,7 @@ struct rt_rq { ...@@ -501,7 +501,7 @@ struct rt_rq {
u64 rt_time; u64 rt_time;
u64 rt_runtime; u64 rt_runtime;
/* Nests inside the rq lock: */ /* Nests inside the rq lock: */
spinlock_t rt_runtime_lock; atomic_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted; unsigned long rt_nr_boosted;
...@@ -9106,7 +9106,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) ...@@ -9106,7 +9106,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
rt_rq->rt_time = 0; rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0; rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0; rt_rq->rt_runtime = 0;
spin_lock_init(&rt_rq->rt_runtime_lock); atomic_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0; rt_rq->rt_nr_boosted = 0;
...@@ -10029,18 +10029,18 @@ static int tg_set_bandwidth(struct task_group *tg, ...@@ -10029,18 +10029,18 @@ static int tg_set_bandwidth(struct task_group *tg,
if (err) if (err)
goto unlock; goto unlock;
spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); atomic_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime; tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i]; struct rt_rq *rt_rq = tg->rt_rq[i];
spin_lock(&rt_rq->rt_runtime_lock); atomic_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime; rt_rq->rt_runtime = rt_runtime;
spin_unlock(&rt_rq->rt_runtime_lock); atomic_spin_unlock(&rt_rq->rt_runtime_lock);
} }
spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); atomic_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock: unlock:
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex); mutex_unlock(&rt_constraints_mutex);
...@@ -10145,7 +10145,7 @@ static int sched_rt_global_constraints(void) ...@@ -10145,7 +10145,7 @@ static int sched_rt_global_constraints(void)
if (sysctl_sched_rt_runtime == 0) if (sysctl_sched_rt_runtime == 0)
return -EBUSY; return -EBUSY;
spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); atomic_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt; struct rt_rq *rt_rq = &cpu_rq(i)->rt;
...@@ -10153,7 +10153,7 @@ static int sched_rt_global_constraints(void) ...@@ -10153,7 +10153,7 @@ static int sched_rt_global_constraints(void)
rt_rq->rt_runtime = global_rt_runtime(); rt_rq->rt_runtime = global_rt_runtime();
spin_unlock(&rt_rq->rt_runtime_lock); spin_unlock(&rt_rq->rt_runtime_lock);
} }
spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); atomic_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0; return 0;
} }
......
...@@ -314,7 +314,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) ...@@ -314,7 +314,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
weight = cpumask_weight(rd->span); weight = cpumask_weight(rd->span);
spin_lock(&rt_b->rt_runtime_lock); atomic_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period); rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) { for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
...@@ -323,7 +323,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) ...@@ -323,7 +323,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
if (iter == rt_rq) if (iter == rt_rq)
continue; continue;
spin_lock(&iter->rt_runtime_lock); atomic_spin_lock(&iter->rt_runtime_lock);
/* /*
* Either all rqs have inf runtime and there's nothing to steal * Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to * or __disable_runtime() below sets a specific rq to inf to
...@@ -345,14 +345,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq) ...@@ -345,14 +345,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
rt_rq->rt_runtime += diff; rt_rq->rt_runtime += diff;
more = 1; more = 1;
if (rt_rq->rt_runtime == rt_period) { if (rt_rq->rt_runtime == rt_period) {
spin_unlock(&iter->rt_runtime_lock); atomic_spin_unlock(&iter->rt_runtime_lock);
break; break;
} }
} }
next: next:
spin_unlock(&iter->rt_runtime_lock); atomic_spin_unlock(&iter->rt_runtime_lock);
} }
spin_unlock(&rt_b->rt_runtime_lock); atomic_spin_unlock(&rt_b->rt_runtime_lock);
return more; return more;
} }
...@@ -373,8 +373,8 @@ static void __disable_runtime(struct rq *rq) ...@@ -373,8 +373,8 @@ static void __disable_runtime(struct rq *rq)
s64 want; s64 want;
int i; int i;
spin_lock(&rt_b->rt_runtime_lock); atomic_spin_lock(&rt_b->rt_runtime_lock);
spin_lock(&rt_rq->rt_runtime_lock); atomic_spin_lock(&rt_rq->rt_runtime_lock);
/* /*
* Either we're all inf and nobody needs to borrow, or we're * Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have * already disabled and thus have nothing to do, or we have
...@@ -383,7 +383,7 @@ static void __disable_runtime(struct rq *rq) ...@@ -383,7 +383,7 @@ static void __disable_runtime(struct rq *rq)
if (rt_rq->rt_runtime == RUNTIME_INF || if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime) rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced; goto balanced;
spin_unlock(&rt_rq->rt_runtime_lock); atomic_spin_unlock(&rt_rq->rt_runtime_lock);
/* /*
* Calculate the difference between what we started out with * Calculate the difference between what we started out with
...@@ -405,7 +405,7 @@ static void __disable_runtime(struct rq *rq) ...@@ -405,7 +405,7 @@ static void __disable_runtime(struct rq *rq)
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue; continue;
spin_lock(&iter->rt_runtime_lock); atomic_spin_lock(&iter->rt_runtime_lock);
if (want > 0) { if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want); diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff; iter->rt_runtime -= diff;
...@@ -414,13 +414,13 @@ static void __disable_runtime(struct rq *rq) ...@@ -414,13 +414,13 @@ static void __disable_runtime(struct rq *rq)
iter->rt_runtime -= want; iter->rt_runtime -= want;
want -= want; want -= want;
} }
spin_unlock(&iter->rt_runtime_lock); atomic_spin_unlock(&iter->rt_runtime_lock);
if (!want) if (!want)
break; break;
} }
spin_lock(&rt_rq->rt_runtime_lock); atomic_spin_lock(&rt_rq->rt_runtime_lock);
/* /*
* We cannot be left wanting - that would mean some runtime * We cannot be left wanting - that would mean some runtime
* leaked out of the system. * leaked out of the system.
...@@ -432,8 +432,8 @@ balanced: ...@@ -432,8 +432,8 @@ balanced:
* runtime - in which case borrowing doesn't make sense. * runtime - in which case borrowing doesn't make sense.
*/ */
rt_rq->rt_runtime = RUNTIME_INF; rt_rq->rt_runtime = RUNTIME_INF;
spin_unlock(&rt_rq->rt_runtime_lock); atomic_spin_unlock(&rt_rq->rt_runtime_lock);
spin_unlock(&rt_b->rt_runtime_lock); atomic_spin_unlock(&rt_b->rt_runtime_lock);
} }
} }
...@@ -459,13 +459,13 @@ static void __enable_runtime(struct rq *rq) ...@@ -459,13 +459,13 @@ static void __enable_runtime(struct rq *rq)
for_each_leaf_rt_rq(rt_rq, rq) { for_each_leaf_rt_rq(rt_rq, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
spin_lock(&rt_b->rt_runtime_lock); atomic_spin_lock(&rt_b->rt_runtime_lock);
spin_lock(&rt_rq->rt_runtime_lock); atomic_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime; rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0; rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0; rt_rq->rt_throttled = 0;
spin_unlock(&rt_rq->rt_runtime_lock); atomic_spin_unlock(&rt_rq->rt_runtime_lock);
spin_unlock(&rt_b->rt_runtime_lock); atomic_spin_unlock(&rt_b->rt_runtime_lock);
} }
} }
...@@ -483,9 +483,9 @@ static int balance_runtime(struct rt_rq *rt_rq) ...@@ -483,9 +483,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
int more = 0; int more = 0;
if (rt_rq->rt_time > rt_rq->rt_runtime) { if (rt_rq->rt_time > rt_rq->rt_runtime) {
spin_unlock(&rt_rq->rt_runtime_lock); atomic_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq); more = do_balance_runtime(rt_rq);
spin_lock(&rt_rq->rt_runtime_lock); atomic_spin_lock(&rt_rq->rt_runtime_lock);
} }
return more; return more;
...@@ -515,7 +515,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) ...@@ -515,7 +515,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (rt_rq->rt_time) { if (rt_rq->rt_time) {
u64 runtime; u64 runtime;
spin_lock(&rt_rq->rt_runtime_lock); atomic_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled) if (rt_rq->rt_throttled)
balance_runtime(rt_rq); balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime; runtime = rt_rq->rt_runtime;
...@@ -526,7 +526,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) ...@@ -526,7 +526,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
} }
if (rt_rq->rt_time || rt_rq->rt_nr_running) if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0; idle = 0;
spin_unlock(&rt_rq->rt_runtime_lock); atomic_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running) } else if (rt_rq->rt_nr_running)
idle = 0; idle = 0;
...@@ -609,11 +609,11 @@ static void update_curr_rt(struct rq *rq) ...@@ -609,11 +609,11 @@ static void update_curr_rt(struct rq *rq)
rt_rq = rt_rq_of_se(rt_se); rt_rq = rt_rq_of_se(rt_se);
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
spin_lock(&rt_rq->rt_runtime_lock); atomic_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec; rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq)) if (sched_rt_runtime_exceeded(rt_rq))
resched_task(curr); resched_task(curr);
spin_unlock(&rt_rq->rt_runtime_lock); atomic_spin_unlock(&rt_rq->rt_runtime_lock);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment