Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
be71018d
Commit
be71018d
authored
Jul 25, 2009
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sched: convert rt_runtime_lock to atomic_spin_lock
Signed-off-by:
Thomas Gleixner
<
tglx@linutronix.de
>
parent
a3f22fd7
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
35 additions
and
35 deletions
+35
-35
kernel/sched.c
kernel/sched.c
+12
-12
kernel/sched_rt.c
kernel/sched_rt.c
+23
-23
No files found.
kernel/sched.c
View file @
be71018d
...
...
@@ -166,7 +166,7 @@ struct rt_prio_array {
struct
rt_bandwidth
{
/* nests inside the rq lock: */
spinlock_t
rt_runtime_lock
;
atomic_spinlock_t
rt_runtime_lock
;
ktime_t
rt_period
;
u64
rt_runtime
;
struct
hrtimer
rt_period_timer
;
...
...
@@ -203,7 +203,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
rt_b
->
rt_period
=
ns_to_ktime
(
period
);
rt_b
->
rt_runtime
=
runtime
;
spin_lock_init
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_lock_init
(
&
rt_b
->
rt_runtime_lock
);
hrtimer_init
(
&
rt_b
->
rt_period_timer
,
CLOCK_MONOTONIC
,
HRTIMER_MODE_REL
);
...
...
@@ -225,7 +225,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
if
(
hrtimer_active
(
&
rt_b
->
rt_period_timer
))
return
;
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
for
(;;)
{
unsigned
long
delta
;
ktime_t
soft
,
hard
;
...
...
@@ -242,7 +242,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
__hrtimer_start_range_ns
(
&
rt_b
->
rt_period_timer
,
soft
,
delta
,
HRTIMER_MODE_ABS_PINNED
,
0
);
}
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
}
#ifdef CONFIG_RT_GROUP_SCHED
...
...
@@ -501,7 +501,7 @@ struct rt_rq {
u64
rt_time
;
u64
rt_runtime
;
/* Nests inside the rq lock: */
spinlock_t
rt_runtime_lock
;
atomic_
spinlock_t
rt_runtime_lock
;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned
long
rt_nr_boosted
;
...
...
@@ -9106,7 +9106,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
rt_rq
->
rt_time
=
0
;
rt_rq
->
rt_throttled
=
0
;
rt_rq
->
rt_runtime
=
0
;
spin_lock_init
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock_init
(
&
rt_rq
->
rt_runtime_lock
);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq
->
rt_nr_boosted
=
0
;
...
...
@@ -10029,18 +10029,18 @@ static int tg_set_bandwidth(struct task_group *tg,
if
(
err
)
goto
unlock
;
spin_lock_irq
(
&
tg
->
rt_bandwidth
.
rt_runtime_lock
);
atomic_
spin_lock_irq
(
&
tg
->
rt_bandwidth
.
rt_runtime_lock
);
tg
->
rt_bandwidth
.
rt_period
=
ns_to_ktime
(
rt_period
);
tg
->
rt_bandwidth
.
rt_runtime
=
rt_runtime
;
for_each_possible_cpu
(
i
)
{
struct
rt_rq
*
rt_rq
=
tg
->
rt_rq
[
i
];
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
rt_rq
->
rt_runtime
=
rt_runtime
;
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
}
spin_unlock_irq
(
&
tg
->
rt_bandwidth
.
rt_runtime_lock
);
atomic_
spin_unlock_irq
(
&
tg
->
rt_bandwidth
.
rt_runtime_lock
);
unlock:
read_unlock
(
&
tasklist_lock
);
mutex_unlock
(
&
rt_constraints_mutex
);
...
...
@@ -10145,7 +10145,7 @@ static int sched_rt_global_constraints(void)
if
(
sysctl_sched_rt_runtime
==
0
)
return
-
EBUSY
;
spin_lock_irqsave
(
&
def_rt_bandwidth
.
rt_runtime_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
def_rt_bandwidth
.
rt_runtime_lock
,
flags
);
for_each_possible_cpu
(
i
)
{
struct
rt_rq
*
rt_rq
=
&
cpu_rq
(
i
)
->
rt
;
...
...
@@ -10153,7 +10153,7 @@ static int sched_rt_global_constraints(void)
rt_rq
->
rt_runtime
=
global_rt_runtime
();
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
}
spin_unlock_irqrestore
(
&
def_rt_bandwidth
.
rt_runtime_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
def_rt_bandwidth
.
rt_runtime_lock
,
flags
);
return
0
;
}
...
...
kernel/sched_rt.c
View file @
be71018d
...
...
@@ -314,7 +314,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
weight
=
cpumask_weight
(
rd
->
span
);
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
rt_period
=
ktime_to_ns
(
rt_b
->
rt_period
);
for_each_cpu
(
i
,
rd
->
span
)
{
struct
rt_rq
*
iter
=
sched_rt_period_rt_rq
(
rt_b
,
i
);
...
...
@@ -323,7 +323,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
if
(
iter
==
rt_rq
)
continue
;
spin_lock
(
&
iter
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
iter
->
rt_runtime_lock
);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
...
...
@@ -345,14 +345,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
rt_rq
->
rt_runtime
+=
diff
;
more
=
1
;
if
(
rt_rq
->
rt_runtime
==
rt_period
)
{
spin_unlock
(
&
iter
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
iter
->
rt_runtime_lock
);
break
;
}
}
next:
spin_unlock
(
&
iter
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
iter
->
rt_runtime_lock
);
}
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
return
more
;
}
...
...
@@ -373,8 +373,8 @@ static void __disable_runtime(struct rq *rq)
s64
want
;
int
i
;
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
...
...
@@ -383,7 +383,7 @@ static void __disable_runtime(struct rq *rq)
if
(
rt_rq
->
rt_runtime
==
RUNTIME_INF
||
rt_rq
->
rt_runtime
==
rt_b
->
rt_runtime
)
goto
balanced
;
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
/*
* Calculate the difference between what we started out with
...
...
@@ -405,7 +405,7 @@ static void __disable_runtime(struct rq *rq)
if
(
iter
==
rt_rq
||
iter
->
rt_runtime
==
RUNTIME_INF
)
continue
;
spin_lock
(
&
iter
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
iter
->
rt_runtime_lock
);
if
(
want
>
0
)
{
diff
=
min_t
(
s64
,
iter
->
rt_runtime
,
want
);
iter
->
rt_runtime
-=
diff
;
...
...
@@ -414,13 +414,13 @@ static void __disable_runtime(struct rq *rq)
iter
->
rt_runtime
-=
want
;
want
-=
want
;
}
spin_unlock
(
&
iter
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
iter
->
rt_runtime_lock
);
if
(
!
want
)
break
;
}
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
...
...
@@ -432,8 +432,8 @@ balanced:
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq
->
rt_runtime
=
RUNTIME_INF
;
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
}
}
...
...
@@ -459,13 +459,13 @@ static void __enable_runtime(struct rq *rq)
for_each_leaf_rt_rq
(
rt_rq
,
rq
)
{
struct
rt_bandwidth
*
rt_b
=
sched_rt_bandwidth
(
rt_rq
);
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
rt_rq
->
rt_runtime
=
rt_b
->
rt_runtime
;
rt_rq
->
rt_time
=
0
;
rt_rq
->
rt_throttled
=
0
;
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_b
->
rt_runtime_lock
);
}
}
...
...
@@ -483,9 +483,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
int
more
=
0
;
if
(
rt_rq
->
rt_time
>
rt_rq
->
rt_runtime
)
{
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
more
=
do_balance_runtime
(
rt_rq
);
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
}
return
more
;
...
...
@@ -515,7 +515,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if
(
rt_rq
->
rt_time
)
{
u64
runtime
;
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
if
(
rt_rq
->
rt_throttled
)
balance_runtime
(
rt_rq
);
runtime
=
rt_rq
->
rt_runtime
;
...
...
@@ -526,7 +526,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
}
if
(
rt_rq
->
rt_time
||
rt_rq
->
rt_nr_running
)
idle
=
0
;
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
}
else
if
(
rt_rq
->
rt_nr_running
)
idle
=
0
;
...
...
@@ -609,11 +609,11 @@ static void update_curr_rt(struct rq *rq)
rt_rq
=
rt_rq_of_se
(
rt_se
);
if
(
sched_rt_runtime
(
rt_rq
)
!=
RUNTIME_INF
)
{
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_lock
(
&
rt_rq
->
rt_runtime_lock
);
rt_rq
->
rt_time
+=
delta_exec
;
if
(
sched_rt_runtime_exceeded
(
rt_rq
))
resched_task
(
curr
);
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
atomic_
spin_unlock
(
&
rt_rq
->
rt_runtime_lock
);
}
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment