Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
7fda20f1
Commit
7fda20f1
authored
Feb 29, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
x86: spinlock ops are always-inlined
Signed-off-by:
Ingo Molnar
<
mingo@elte.hu
>
parent
d93c870b
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
6 additions
and
6 deletions
+6
-6
include/asm-x86/spinlock.h
include/asm-x86/spinlock.h
+6
-6
No files found.
include/asm-x86/spinlock.h
View file @
7fda20f1
...
...
@@ -78,7 +78,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
return
(((
tmp
>>
8
)
&
0xff
)
-
(
tmp
&
0xff
))
>
1
;
}
static
inline
void
__raw_spin_lock
(
raw_spinlock_t
*
lock
)
static
__always_
inline
void
__raw_spin_lock
(
raw_spinlock_t
*
lock
)
{
short
inc
=
0x0100
;
...
...
@@ -99,7 +99,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static
inline
int
__raw_spin_trylock
(
raw_spinlock_t
*
lock
)
static
__always_
inline
int
__raw_spin_trylock
(
raw_spinlock_t
*
lock
)
{
int
tmp
;
short
new
;
...
...
@@ -120,7 +120,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return
tmp
;
}
static
inline
void
__raw_spin_unlock
(
raw_spinlock_t
*
lock
)
static
__always_
inline
void
__raw_spin_unlock
(
raw_spinlock_t
*
lock
)
{
asm
volatile
(
UNLOCK_LOCK_PREFIX
"incb %0"
:
"+m"
(
lock
->
slock
)
...
...
@@ -142,7 +142,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
return
(((
tmp
>>
16
)
&
0xffff
)
-
(
tmp
&
0xffff
))
>
1
;
}
static
inline
void
__raw_spin_lock
(
raw_spinlock_t
*
lock
)
static
__always_
inline
void
__raw_spin_lock
(
raw_spinlock_t
*
lock
)
{
int
inc
=
0x00010000
;
int
tmp
;
...
...
@@ -165,7 +165,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
static
inline
int
__raw_spin_trylock
(
raw_spinlock_t
*
lock
)
static
__always_
inline
int
__raw_spin_trylock
(
raw_spinlock_t
*
lock
)
{
int
tmp
;
int
new
;
...
...
@@ -187,7 +187,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return
tmp
;
}
static
inline
void
__raw_spin_unlock
(
raw_spinlock_t
*
lock
)
static
__always_
inline
void
__raw_spin_unlock
(
raw_spinlock_t
*
lock
)
{
asm
volatile
(
UNLOCK_LOCK_PREFIX
"incw %0"
:
"+m"
(
lock
->
slock
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment