Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
0c95fbb2
Commit
0c95fbb2
authored
Nov 10, 2005
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://oak/home/sfr/kernels/iseries/work
parents
49b09853
06a98dba
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
181 additions
and
4 deletions
+181
-4
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom.c
+3
-3
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/iseries/setup.c
+0
-1
include/asm-powerpc/atomic.h
include/asm-powerpc/atomic.h
+178
-0
No files found.
arch/powerpc/kernel/prom.c
View file @
0c95fbb2
...
...
@@ -1080,9 +1080,9 @@ void __init unflatten_device_tree(void)
static
int
__init
early_init_dt_scan_cpus
(
unsigned
long
node
,
const
char
*
uname
,
int
depth
,
void
*
data
)
{
char
*
type
=
of_get_flat_dt_prop
(
node
,
"device_type"
,
NULL
);
u32
*
prop
;
unsigned
long
size
=
0
;
unsigned
long
size
;
char
*
type
=
of_get_flat_dt_prop
(
node
,
"device_type"
,
&
size
);
/* We are scanning "cpu" nodes only */
if
(
type
==
NULL
||
strcmp
(
type
,
"cpu"
)
!=
0
)
...
...
@@ -1108,7 +1108,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"ibm,vmx"
,
&
size
);
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"ibm,vmx"
,
NULL
);
if
(
prop
&&
(
*
prop
)
>
0
)
{
cur_cpu_spec
->
cpu_features
|=
CPU_FTR_ALTIVEC
;
cur_cpu_spec
->
cpu_user_features
|=
PPC_FEATURE_HAS_ALTIVEC
;
...
...
arch/powerpc/platforms/iseries/setup.c
View file @
0c95fbb2
...
...
@@ -704,7 +704,6 @@ static void iseries_shared_idle(void)
static
void
iseries_dedicated_idle
(
void
)
{
long
oldval
;
set_thread_flag
(
TIF_POLLING_NRFLAG
);
while
(
1
)
{
...
...
include/asm-powerpc/atomic.h
View file @
0c95fbb2
...
...
@@ -197,5 +197,183 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#ifdef __powerpc64__
typedef
struct
{
volatile
long
counter
;
}
atomic64_t
;
#define ATOMIC64_INIT(i) { (i) }
#define atomic64_read(v) ((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
static
__inline__
void
atomic64_add
(
long
a
,
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
"1: ldarx %0,0,%3 # atomic64_add
\n
\
add %0,%2,%0
\n
\
stdcx. %0,0,%3
\n
\
bne- 1b"
:
"=&r"
(
t
),
"=m"
(
v
->
counter
)
:
"r"
(
a
),
"r"
(
&
v
->
counter
),
"m"
(
v
->
counter
)
:
"cc"
);
}
static
__inline__
long
atomic64_add_return
(
long
a
,
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: ldarx %0,0,%2 # atomic64_add_return
\n
\
add %0,%1,%0
\n
\
stdcx. %0,0,%2
\n
\
bne- 1b"
ISYNC_ON_SMP
:
"=&r"
(
t
)
:
"r"
(
a
),
"r"
(
&
v
->
counter
)
:
"cc"
,
"memory"
);
return
t
;
}
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
static
__inline__
void
atomic64_sub
(
long
a
,
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
"1: ldarx %0,0,%3 # atomic64_sub
\n
\
subf %0,%2,%0
\n
\
stdcx. %0,0,%3
\n
\
bne- 1b"
:
"=&r"
(
t
),
"=m"
(
v
->
counter
)
:
"r"
(
a
),
"r"
(
&
v
->
counter
),
"m"
(
v
->
counter
)
:
"cc"
);
}
static
__inline__
long
atomic64_sub_return
(
long
a
,
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: ldarx %0,0,%2 # atomic64_sub_return
\n
\
subf %0,%1,%0
\n
\
stdcx. %0,0,%2
\n
\
bne- 1b"
ISYNC_ON_SMP
:
"=&r"
(
t
)
:
"r"
(
a
),
"r"
(
&
v
->
counter
)
:
"cc"
,
"memory"
);
return
t
;
}
static
__inline__
void
atomic64_inc
(
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
"1: ldarx %0,0,%2 # atomic64_inc
\n
\
addic %0,%0,1
\n
\
stdcx. %0,0,%2
\n
\
bne- 1b"
:
"=&r"
(
t
),
"=m"
(
v
->
counter
)
:
"r"
(
&
v
->
counter
),
"m"
(
v
->
counter
)
:
"cc"
);
}
static
__inline__
long
atomic64_inc_return
(
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: ldarx %0,0,%1 # atomic64_inc_return
\n
\
addic %0,%0,1
\n
\
stdcx. %0,0,%1
\n
\
bne- 1b"
ISYNC_ON_SMP
:
"=&r"
(
t
)
:
"r"
(
&
v
->
counter
)
:
"cc"
,
"memory"
);
return
t
;
}
/*
* atomic64_inc_and_test - increment and test
* @v: pointer of type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
static
__inline__
void
atomic64_dec
(
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
"1: ldarx %0,0,%2 # atomic64_dec
\n
\
addic %0,%0,-1
\n
\
stdcx. %0,0,%2
\n
\
bne- 1b"
:
"=&r"
(
t
),
"=m"
(
v
->
counter
)
:
"r"
(
&
v
->
counter
),
"m"
(
v
->
counter
)
:
"cc"
);
}
static
__inline__
long
atomic64_dec_return
(
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_return
\n
\
addic %0,%0,-1
\n
\
stdcx. %0,0,%1
\n
\
bne- 1b"
ISYNC_ON_SMP
:
"=&r"
(
t
)
:
"r"
(
&
v
->
counter
)
:
"cc"
,
"memory"
);
return
t
;
}
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
static
__inline__
long
atomic64_dec_if_positive
(
atomic64_t
*
v
)
{
long
t
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_if_positive
\n
\
addic. %0,%0,-1
\n
\
blt- 2f
\n
\
stdcx. %0,0,%1
\n
\
bne- 1b"
ISYNC_ON_SMP
"
\n
\
2:"
:
"=&r"
(
t
)
:
"r"
(
&
v
->
counter
)
:
"cc"
,
"memory"
);
return
t
;
}
#endif
/* __powerpc64__ */
#endif
/* __KERNEL__ */
#endif
/* _ASM_POWERPC_ATOMIC_H_ */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment