Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
1326092f
Commit
1326092f
authored
Jul 29, 2009
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'rt/powerpc' into rt/base
parents
a9de8b7b
b4f4919b
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
196 additions
and
187 deletions
+196
-187
arch/powerpc/include/asm/mpic.h
arch/powerpc/include/asm/mpic.h
+1
-1
arch/powerpc/include/asm/pmac_feature.h
arch/powerpc/include/asm/pmac_feature.h
+1
-1
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/irq.c
+11
-11
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/kprobes.c
+3
-3
arch/powerpc/kernel/pmc.c
arch/powerpc/kernel/pmc.c
+5
-5
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom.c
+16
-16
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/traps.c
+9
-4
arch/powerpc/lib/locks.c
arch/powerpc/lib/locks.c
+2
-0
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_native_64.c
+7
-7
arch/powerpc/mm/mmu_context_nohash.c
arch/powerpc/mm/mmu_context_nohash.c
+7
-7
arch/powerpc/mm/tlb_nohash.c
arch/powerpc/mm/tlb_nohash.c
+5
-3
arch/powerpc/platforms/cell/beat_htab.c
arch/powerpc/platforms/cell/beat_htab.c
+12
-12
arch/powerpc/platforms/cell/beat_interrupt.c
arch/powerpc/platforms/cell/beat_interrupt.c
+9
-9
arch/powerpc/platforms/powermac/feature.c
arch/powerpc/platforms/powermac/feature.c
+3
-3
arch/powerpc/platforms/powermac/nvram.c
arch/powerpc/platforms/powermac/nvram.c
+7
-7
arch/powerpc/platforms/powermac/pfunc_base.c
arch/powerpc/platforms/powermac/pfunc_base.c
+12
-12
arch/powerpc/platforms/powermac/pic.c
arch/powerpc/platforms/powermac/pic.c
+19
-19
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/eeh.c
+7
-7
arch/powerpc/sysdev/i8259.c
arch/powerpc/sysdev/i8259.c
+11
-11
arch/powerpc/sysdev/ipic.c
arch/powerpc/sysdev/ipic.c
+9
-9
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/mpic.c
+19
-19
drivers/of/base.c
drivers/of/base.c
+21
-21
No files found.
arch/powerpc/include/asm/mpic.h
View file @
1326092f
...
...
@@ -289,7 +289,7 @@ struct mpic
#ifdef CONFIG_MPIC_U3_HT_IRQS
/* The fixup table */
struct
mpic_irq_fixup
*
fixups
;
spinlock_t
fixup_lock
;
atomic_spinlock_t
fixup_lock
;
#endif
/* Register access method */
...
...
arch/powerpc/include/asm/pmac_feature.h
View file @
1326092f
...
...
@@ -378,7 +378,7 @@ extern struct macio_chip* macio_find(struct device_node* child, int type);
* Those are exported by pmac feature for internal use by arch code
* only like the platform function callbacks, do not use directly in drivers
*/
extern
spinlock_t
feature_lock
;
extern
atomic_
spinlock_t
feature_lock
;
extern
struct
device_node
*
uninorth_node
;
extern
u32
__iomem
*
uninorth_base
;
...
...
arch/powerpc/kernel/irq.c
View file @
1326092f
...
...
@@ -453,7 +453,7 @@ void do_softirq(void)
*/
static
LIST_HEAD
(
irq_hosts
);
static
DEFINE_SPINLOCK
(
irq_big_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
irq_big_lock
);
static
unsigned
int
revmap_trees_allocated
;
static
DEFINE_MUTEX
(
revmap_trees_mutex
);
struct
irq_map_entry
irq_map
[
NR_IRQS
];
...
...
@@ -499,14 +499,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
if
(
host
->
ops
->
match
==
NULL
)
host
->
ops
->
match
=
default_irq_host_match
;
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
/* If it's a legacy controller, check for duplicates and
* mark it as allocated (we use irq 0 host pointer for that
*/
if
(
revmap_type
==
IRQ_HOST_MAP_LEGACY
)
{
if
(
irq_map
[
0
].
host
!=
NULL
)
{
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
/* If we are early boot, we can't free the structure,
* too bad...
* this will be fixed once slab is made available early
...
...
@@ -520,7 +520,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
}
list_add
(
&
host
->
link
,
&
irq_hosts
);
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
/* Additional setups per revmap type */
switch
(
revmap_type
)
{
...
...
@@ -571,13 +571,13 @@ struct irq_host *irq_find_host(struct device_node *node)
* the absence of a device node. This isn't a problem so far
* yet though...
*/
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
list_for_each_entry
(
h
,
&
irq_hosts
,
link
)
if
(
h
->
ops
->
match
(
h
,
node
))
{
found
=
h
;
break
;
}
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
return
found
;
}
EXPORT_SYMBOL_GPL
(
irq_find_host
);
...
...
@@ -935,7 +935,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
if
(
count
==
0
||
count
>
(
irq_virq_count
-
NUM_ISA_INTERRUPTS
))
return
NO_IRQ
;
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
/* Use hint for 1 interrupt if any */
if
(
count
==
1
&&
hint
>=
NUM_ISA_INTERRUPTS
&&
...
...
@@ -959,7 +959,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
}
}
if
(
found
==
NO_IRQ
)
{
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
return
NO_IRQ
;
}
hint_found:
...
...
@@ -968,7 +968,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
smp_wmb
();
irq_map
[
i
].
host
=
host
;
}
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
return
found
;
}
...
...
@@ -980,7 +980,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
WARN_ON
(
virq
<
NUM_ISA_INTERRUPTS
);
WARN_ON
(
count
==
0
||
(
virq
+
count
)
>
irq_virq_count
);
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
irq_big_lock
,
flags
);
for
(
i
=
virq
;
i
<
(
virq
+
count
);
i
++
)
{
struct
irq_host
*
host
;
...
...
@@ -993,7 +993,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
smp_wmb
();
irq_map
[
i
].
host
=
NULL
;
}
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
irq_big_lock
,
flags
);
}
void
irq_early_init
(
void
)
...
...
arch/powerpc/kernel/kprobes.c
View file @
1326092f
...
...
@@ -263,7 +263,7 @@ ss_probe:
kcb
->
kprobe_status
=
KPROBE_HIT_SSDONE
;
reset_current_kprobe
();
preempt_enable
_no_resched
();
preempt_enable
();
return
1
;
}
else
if
(
ret
<
0
)
{
/*
...
...
@@ -282,7 +282,7 @@ ss_probe:
return
1
;
no_kprobe:
preempt_enable
_no_resched
();
preempt_enable
();
return
ret
;
}
...
...
@@ -412,7 +412,7 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
}
reset_current_kprobe
();
out:
preempt_enable
_no_resched
();
preempt_enable
();
/*
* if somebody else is singlestepping across a probe point, msr
...
...
arch/powerpc/kernel/pmc.c
View file @
1326092f
...
...
@@ -37,7 +37,7 @@ static void dummy_perf(struct pt_regs *regs)
}
static
DEFINE_SPINLOCK
(
pmc_owner_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
pmc_owner_lock
);
static
void
*
pmc_owner_caller
;
/* mostly for debugging */
perf_irq_t
perf_irq
=
dummy_perf
;
...
...
@@ -45,7 +45,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
{
int
err
=
0
;
spin_lock
(
&
pmc_owner_lock
);
atomic_
spin_lock
(
&
pmc_owner_lock
);
if
(
pmc_owner_caller
)
{
printk
(
KERN_WARNING
"reserve_pmc_hardware: "
...
...
@@ -59,21 +59,21 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
perf_irq
=
new_perf_irq
?
new_perf_irq
:
dummy_perf
;
out:
spin_unlock
(
&
pmc_owner_lock
);
atomic_
spin_unlock
(
&
pmc_owner_lock
);
return
err
;
}
EXPORT_SYMBOL_GPL
(
reserve_pmc_hardware
);
void
release_pmc_hardware
(
void
)
{
spin_lock
(
&
pmc_owner_lock
);
atomic_
spin_lock
(
&
pmc_owner_lock
);
WARN_ON
(
!
pmc_owner_caller
);
pmc_owner_caller
=
NULL
;
perf_irq
=
dummy_perf
;
spin_unlock
(
&
pmc_owner_lock
);
atomic_
spin_unlock
(
&
pmc_owner_lock
);
}
EXPORT_SYMBOL_GPL
(
release_pmc_hardware
);
...
...
arch/powerpc/kernel/prom.c
View file @
1326092f
...
...
@@ -81,7 +81,7 @@ struct boot_param_header *initial_boot_params;
extern
struct
device_node
*
allnodes
;
/* temporary while merging */
extern
rw
lock_t
devtree_lock
;
/* temporary while merging */
extern
atomic_spin
lock_t
devtree_lock
;
/* temporary while merging */
/* export that to outside world */
struct
device_node
*
of_chosen
;
...
...
@@ -1275,12 +1275,12 @@ struct device_node *of_find_node_by_phandle(phandle handle)
{
struct
device_node
*
np
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
if
(
np
->
linux_phandle
==
handle
)
break
;
of_node_get
(
np
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_phandle
);
...
...
@@ -1328,13 +1328,13 @@ struct device_node *of_find_all_nodes(struct device_node *prev)
{
struct
device_node
*
np
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
np
=
prev
?
prev
->
allnext
:
allnodes
;
for
(;
np
!=
0
;
np
=
np
->
allnext
)
if
(
of_node_get
(
np
))
break
;
of_node_put
(
prev
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_all_nodes
);
...
...
@@ -1419,12 +1419,12 @@ void of_attach_node(struct device_node *np)
{
unsigned
long
flags
;
write
_lock_irqsave
(
&
devtree_lock
,
flags
);
atomic_spin
_lock_irqsave
(
&
devtree_lock
,
flags
);
np
->
sibling
=
np
->
parent
->
child
;
np
->
allnext
=
allnodes
;
np
->
parent
->
child
=
np
;
allnodes
=
np
;
write
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
atomic_spin
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
}
/*
...
...
@@ -1437,7 +1437,7 @@ void of_detach_node(struct device_node *np)
struct
device_node
*
parent
;
unsigned
long
flags
;
write
_lock_irqsave
(
&
devtree_lock
,
flags
);
atomic_spin
_lock_irqsave
(
&
devtree_lock
,
flags
);
parent
=
np
->
parent
;
if
(
!
parent
)
...
...
@@ -1468,7 +1468,7 @@ void of_detach_node(struct device_node *np)
of_node_set_flag
(
np
,
OF_DETACHED
);
out_unlock:
write
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
atomic_spin
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
}
#ifdef CONFIG_PPC_PSERIES
...
...
@@ -1552,18 +1552,18 @@ int prom_add_property(struct device_node* np, struct property* prop)
unsigned
long
flags
;
prop
->
next
=
NULL
;
write
_lock_irqsave
(
&
devtree_lock
,
flags
);
atomic_spin
_lock_irqsave
(
&
devtree_lock
,
flags
);
next
=
&
np
->
properties
;
while
(
*
next
)
{
if
(
strcmp
(
prop
->
name
,
(
*
next
)
->
name
)
==
0
)
{
/* duplicate ! don't insert it */
write
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
atomic_spin
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
return
-
1
;
}
next
=
&
(
*
next
)
->
next
;
}
*
next
=
prop
;
write
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
atomic_spin
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
#ifdef CONFIG_PROC_DEVICETREE
/* try to add to proc as well if it was initialized */
...
...
@@ -1586,7 +1586,7 @@ int prom_remove_property(struct device_node *np, struct property *prop)
unsigned
long
flags
;
int
found
=
0
;
write
_lock_irqsave
(
&
devtree_lock
,
flags
);
atomic_spin
_lock_irqsave
(
&
devtree_lock
,
flags
);
next
=
&
np
->
properties
;
while
(
*
next
)
{
if
(
*
next
==
prop
)
{
...
...
@@ -1599,7 +1599,7 @@ int prom_remove_property(struct device_node *np, struct property *prop)
}
next
=
&
(
*
next
)
->
next
;
}
write
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
atomic_spin
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
if
(
!
found
)
return
-
ENODEV
;
...
...
@@ -1628,7 +1628,7 @@ int prom_update_property(struct device_node *np,
unsigned
long
flags
;
int
found
=
0
;
write
_lock_irqsave
(
&
devtree_lock
,
flags
);
atomic_spin
_lock_irqsave
(
&
devtree_lock
,
flags
);
next
=
&
np
->
properties
;
while
(
*
next
)
{
if
(
*
next
==
oldprop
)
{
...
...
@@ -1642,7 +1642,7 @@ int prom_update_property(struct device_node *np,
}
next
=
&
(
*
next
)
->
next
;
}
write
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
atomic_spin
_unlock_irqrestore
(
&
devtree_lock
,
flags
);
if
(
!
found
)
return
-
ENODEV
;
...
...
arch/powerpc/kernel/traps.c
View file @
1326092f
...
...
@@ -102,11 +102,11 @@ static inline void pmac_backlight_unblank(void) { }
int
die
(
const
char
*
str
,
struct
pt_regs
*
regs
,
long
err
)
{
static
struct
{
spinlock_t
lock
;
atomic_
spinlock_t
lock
;
u32
lock_owner
;
int
lock_owner_depth
;
}
die
=
{
.
lock
=
__SPIN_LOCK_UNLOCKED
(
die
.
lock
),
.
lock
=
__
ATOMIC_
SPIN_LOCK_UNLOCKED
(
die
.
lock
),
.
lock_owner
=
-
1
,
.
lock_owner_depth
=
0
};
...
...
@@ -120,7 +120,7 @@ int die(const char *str, struct pt_regs *regs, long err)
if
(
die
.
lock_owner
!=
raw_smp_processor_id
())
{
console_verbose
();
spin_lock_irqsave
(
&
die
.
lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
die
.
lock
,
flags
);
die
.
lock_owner
=
smp_processor_id
();
die
.
lock_owner_depth
=
0
;
bust_spinlocks
(
1
);
...
...
@@ -155,7 +155,7 @@ int die(const char *str, struct pt_regs *regs, long err)
bust_spinlocks
(
0
);
die
.
lock_owner
=
-
1
;
add_taint
(
TAINT_DIE
);
spin_unlock_irqrestore
(
&
die
.
lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
die
.
lock
,
flags
);
if
(
kexec_should_crash
(
current
)
||
kexec_sr_activated
(
smp_processor_id
()))
...
...
@@ -193,6 +193,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
addr
,
regs
->
nip
,
regs
->
link
,
code
);
}
#ifdef CONFIG_PREEMPT_RT
local_irq_enable
();
preempt_check_resched
();
#endif
memset
(
&
info
,
0
,
sizeof
(
info
));
info
.
si_signo
=
signr
;
info
.
si_code
=
code
;
...
...
arch/powerpc/lib/locks.c
View file @
1326092f
...
...
@@ -86,8 +86,10 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
while
(
lock
->
slock
)
{
HMT_low
();
preempt_disable
();
if
(
SHARED_PROCESSOR
)
__spin_yield
(
lock
);
preempt_enable
();
}
HMT_medium
();
}
...
...
arch/powerpc/mm/hash_native_64.c
View file @
1326092f
...
...
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
static
DEFINE_SPINLOCK
(
native_tlbie_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
native_tlbie_lock
);
static
inline
void
__tlbie
(
unsigned
long
va
,
int
psize
,
int
ssize
)
{
...
...
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
if
(
use_local
)
use_local
=
mmu_psize_defs
[
psize
].
tlbiel
;
if
(
lock_tlbie
&&
!
use_local
)
spin_lock
(
&
native_tlbie_lock
);
atomic_
spin_lock
(
&
native_tlbie_lock
);
asm
volatile
(
"ptesync"
:
:
:
"memory"
);
if
(
use_local
)
{
__tlbiel
(
va
,
psize
,
ssize
);
...
...
@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
asm
volatile
(
"eieio; tlbsync; ptesync"
:
:
:
"memory"
);
}
if
(
lock_tlbie
&&
!
use_local
)
spin_unlock
(
&
native_tlbie_lock
);
atomic_
spin_unlock
(
&
native_tlbie_lock
);
}
static
inline
void
native_lock_hpte
(
struct
hash_pte
*
hptep
)
...
...
@@ -434,7 +434,7 @@ static void native_hpte_clear(void)
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
spin_lock
(
&
native_tlbie_lock
);
atomic_
spin_lock
(
&
native_tlbie_lock
);
slots
=
pteg_count
*
HPTES_PER_GROUP
;
...
...
@@ -458,7 +458,7 @@ static void native_hpte_clear(void)
}
asm
volatile
(
"eieio; tlbsync; ptesync"
:::
"memory"
);
spin_unlock
(
&
native_tlbie_lock
);
atomic_
spin_unlock
(
&
native_tlbie_lock
);
local_irq_restore
(
flags
);
}
...
...
@@ -521,7 +521,7 @@ static void native_flush_hash_range(unsigned long number, int local)
int
lock_tlbie
=
!
cpu_has_feature
(
CPU_FTR_LOCKLESS_TLBIE
);
if
(
lock_tlbie
)
spin_lock
(
&
native_tlbie_lock
);
atomic_
spin_lock
(
&
native_tlbie_lock
);
asm
volatile
(
"ptesync"
:::
"memory"
);
for
(
i
=
0
;
i
<
number
;
i
++
)
{
...
...
@@ -536,7 +536,7 @@ static void native_flush_hash_range(unsigned long number, int local)
asm
volatile
(
"eieio; tlbsync; ptesync"
:::
"memory"
);
if
(
lock_tlbie
)
spin_unlock
(
&
native_tlbie_lock
);
atomic_
spin_unlock
(
&
native_tlbie_lock
);
}
local_irq_restore
(
flags
);
...
...
arch/powerpc/mm/mmu_context_nohash.c
View file @
1326092f
...
...
@@ -46,7 +46,7 @@ static unsigned int next_context, nr_free_contexts;
static
unsigned
long
*
context_map
;
static
unsigned
long
*
stale_map
[
NR_CPUS
];
static
struct
mm_struct
**
context_mm
;
static
DEFINE_SPINLOCK
(
context_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
context_lock
);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
...
...
@@ -104,9 +104,9 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will happen if you have more CPUs than available contexts,
* all we can do here is wait a bit and try again
*/
spin_unlock
(
&
context_lock
);
atomic_
spin_unlock
(
&
context_lock
);
cpu_relax
();
spin_lock
(
&
context_lock
);
atomic_
spin_lock
(
&
context_lock
);
/* This will cause the caller to try again */
return
MMU_NO_CONTEXT
;
...
...
@@ -177,7 +177,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
unsigned
long
*
map
;
/* No lockless fast path .. yet */
spin_lock
(
&
context_lock
);
atomic_
spin_lock
(
&
context_lock
);
#ifndef DEBUG_STEAL_ONLY
pr_devel
(
"[%d] activating context for mm @%p, active=%d, id=%d
\n
"
,
...
...
@@ -257,7 +257,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
/* Flick the MMU and release lock */
set_context
(
id
,
next
->
pgd
);
spin_unlock
(
&
context_lock
);
atomic_
spin_unlock
(
&
context_lock
);
}
/*
...
...
@@ -284,7 +284,7 @@ void destroy_context(struct mm_struct *mm)
WARN_ON
(
mm
->
context
.
active
!=
0
);
spin_lock_irqsave
(
&
context_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
context_lock
,
flags
);
id
=
mm
->
context
.
id
;
if
(
id
!=
MMU_NO_CONTEXT
)
{
__clear_bit
(
id
,
context_map
);
...
...
@@ -295,7 +295,7 @@ void destroy_context(struct mm_struct *mm)
context_mm
[
id
]
=
NULL
;
nr_free_contexts
++
;
}
spin_unlock_irqrestore
(
&
context_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
context_lock
,
flags
);
}
#ifdef CONFIG_SMP
...
...
arch/powerpc/mm/tlb_nohash.c
View file @
1326092f
...
...
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(local_flush_tlb_page);
*/
#ifdef CONFIG_SMP
static
DEFINE_SPINLOCK
(
tlbivax_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
tlbivax_lock
);
struct
tlb_flush_param
{
unsigned
long
addr
;
...
...
@@ -158,10 +158,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
if
(
mmu_has_feature
(
MMU_FTR_USE_TLBIVAX_BCAST
))
{
int
lock
=
mmu_has_feature
(
MMU_FTR_LOCK_BCAST_INVAL
);
if
(
lock
)
spin_lock
(
&
tlbivax_lock
);
atomic_
spin_lock
(
&
tlbivax_lock
);
_tlbivax_bcast
(
vmaddr
,
pid
);
if
(
lock
)
spin_unlock
(
&
tlbivax_lock
);
atomic_
spin_unlock
(
&
tlbivax_lock
);
goto
bail
;
}
else
{
struct
tlb_flush_param
p
=
{
.
pid
=
pid
,
.
addr
=
vmaddr
};
...
...
@@ -189,7 +189,9 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
_tlbil_pid
(
0
);
preempt_enable
();
#else
preempt_disable
();
_tlbil_pid
(
0
);
preempt_enable
();
#endif
}
EXPORT_SYMBOL
(
flush_tlb_kernel_range
);
...
...
arch/powerpc/platforms/cell/beat_htab.c
View file @
1326092f
...
...
@@ -40,7 +40,7 @@
#define DBG_LOW(fmt...) do { } while (0)
#endif
static
DEFINE_SPINLOCK
(
beat_htab_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
beat_htab_lock
);
static
inline
unsigned
int
beat_read_mask
(
unsigned
hpte_group
)
{
...
...
@@ -114,18 +114,18 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
if
(
rflags
&
_PAGE_NO_CACHE
)
hpte_r
&=
~
_PAGE_COHERENT
;
spin_lock
(
&
beat_htab_lock
);
atomic_
spin_lock
(
&
beat_htab_lock
);
lpar_rc
=
beat_read_mask
(
hpte_group
);
if
(
lpar_rc
==
0
)
{
if
(
!
(
vflags
&
HPTE_V_BOLTED
))
DBG_LOW
(
" full
\n
"
);
spin_unlock
(
&
beat_htab_lock
);
atomic_
spin_unlock
(
&
beat_htab_lock
);
return
-
1
;
}
lpar_rc
=
beat_insert_htab_entry
(
0
,
hpte_group
,
lpar_rc
<<
48
,
hpte_v
,
hpte_r
,
&
slot
);
spin_unlock
(
&
beat_htab_lock
);
atomic_
spin_unlock
(
&
beat_htab_lock
);
/*
* Since we try and ioremap PHBs we don't own, the pte insert
...
...
@@ -198,17 +198,17 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... "
,
want_v
&
HPTE_V_AVPN
,
slot
,
psize
,
newpp
);
spin_lock
(
&
beat_htab_lock
);
atomic_
spin_lock
(
&
beat_htab_lock
);
dummy0
=
beat_lpar_hpte_getword0
(
slot
);
if
((
dummy0
&
~
0x7FUL
)
!=
(
want_v
&
~
0x7FUL
))
{
DBG_LOW
(
"not found !
\n
"
);
spin_unlock
(
&
beat_htab_lock
);
atomic_
spin_unlock
(
&
beat_htab_lock
);
return
-
1
;
}
lpar_rc
=
beat_write_htab_entry
(
0
,
slot
,
0
,
newpp
,
0
,
7
,
&
dummy0
,
&
dummy1
);
spin_unlock
(
&
beat_htab_lock
);
atomic_
spin_unlock
(
&
beat_htab_lock
);
if
(
lpar_rc
!=
0
||
dummy0
==
0
)
{
DBG_LOW
(
"not found !
\n
"
);
return
-
1
;
...
...
@@ -262,13 +262,13 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
vsid
=
get_kernel_vsid
(
ea
,
MMU_SEGSIZE_256M
);
va
=
(
vsid
<<
28
)
|
(
ea
&
0x0fffffff
);
spin_lock
(
&
beat_htab_lock
);
atomic_
spin_lock
(
&
beat_htab_lock
);
slot
=
beat_lpar_hpte_find
(
va
,
psize
);
BUG_ON
(
slot
==
-
1
);
lpar_rc
=
beat_write_htab_entry
(
0
,
slot
,
0
,
newpp
,
0
,
7
,
&
dummy0
,
&
dummy1
);
spin_unlock
(
&
beat_htab_lock
);
atomic_
spin_unlock
(
&
beat_htab_lock
);
BUG_ON
(
lpar_rc
!=
0
);
}
...
...
@@ -285,18 +285,18 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
slot
,
va
,
psize
,
local
);
want_v
=
hpte_encode_v
(
va
,
psize
,
MMU_SEGSIZE_256M
);
spin_lock_irqsave
(
&
beat_htab_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
beat_htab_lock
,
flags
);
dummy1
=
beat_lpar_hpte_getword0
(
slot
);
if
((
dummy1
&
~
0x7FUL
)
!=
(
want_v
&
~
0x7FUL
))
{
DBG_LOW
(
"not found !
\n
"
);
spin_unlock_irqrestore
(
&
beat_htab_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
beat_htab_lock
,
flags
);
return
;
}
lpar_rc
=
beat_write_htab_entry
(
0
,
slot
,
0
,
0
,
HPTE_V_VALID
,
0
,
&
dummy1
,
&
dummy2
);
spin_unlock_irqrestore
(
&
beat_htab_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
beat_htab_lock
,
flags
);
BUG_ON
(
lpar_rc
!=
0
);
}
...
...
arch/powerpc/platforms/cell/beat_interrupt.c
View file @
1326092f
...
...
@@ -30,7 +30,7 @@
#include "beat_wrapper.h"
#define MAX_IRQS NR_IRQS
static
DEFINE_SPINLOCK
(
beatic_irq_mask_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
beatic_irq_mask_lock
);
static
uint64_t
beatic_irq_mask_enable
[(
MAX_IRQS
+
255
)
/
64
];
static
uint64_t
beatic_irq_mask_ack
[(
MAX_IRQS
+
255
)
/
64
];
...
...
@@ -65,30 +65,30 @@ static void beatic_mask_irq(unsigned int irq_plug)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
beatic_irq_mask_enable
[
irq_plug
/
64
]
&=
~
(
1UL
<<
(
63
-
(
irq_plug
%
64
)));
beatic_update_irq_mask
(
irq_plug
);
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
}
static
void
beatic_unmask_irq
(
unsigned
int
irq_plug
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
beatic_irq_mask_enable
[
irq_plug
/
64
]
|=
1UL
<<
(
63
-
(
irq_plug
%
64
));
beatic_update_irq_mask
(
irq_plug
);
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
}
static
void
beatic_ack_irq
(
unsigned
int
irq_plug
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
beatic_irq_mask_ack
[
irq_plug
/
64
]
&=
~
(
1UL
<<
(
63
-
(
irq_plug
%
64
)));
beatic_update_irq_mask
(
irq_plug
);
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
}
static
void
beatic_end_irq
(
unsigned
int
irq_plug
)
...
...
@@ -103,10 +103,10 @@ static void beatic_end_irq(unsigned int irq_plug)
printk
(
KERN_ERR
"IRQ over-downcounted, plug %d
\n
"
,
irq_plug
);
}
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
beatic_irq_mask_lock
,
flags
);
beatic_irq_mask_ack
[
irq_plug
/
64
]
|=
1UL
<<
(
63
-
(
irq_plug
%
64
));
beatic_update_irq_mask
(
irq_plug
);
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
beatic_irq_mask_lock
,
flags
);
}
static
struct
irq_chip
beatic_pic
=
{
...
...
arch/powerpc/platforms/powermac/feature.c
View file @
1326092f
...
...
@@ -59,10 +59,10 @@ extern struct device_node *k2_skiplist[2];
* We use a single global lock to protect accesses. Each driver has
* to take care of its own locking
*/
DEFINE_SPINLOCK
(
feature_lock
);
DEFINE_
ATOMIC_
SPINLOCK
(
feature_lock
);
#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
#define LOCK(flags)
atomic_
spin_lock_irqsave(&feature_lock, flags);
#define UNLOCK(flags)
atomic_
spin_unlock_irqrestore(&feature_lock, flags);
/*
...
...
arch/powerpc/platforms/powermac/nvram.c
View file @
1326092f
...
...
@@ -80,7 +80,7 @@ static int is_core_99;
static
int
core99_bank
=
0
;
static
int
nvram_partitions
[
3
];
// XXX Turn that into a sem
static
DEFINE_SPINLOCK
(
nv_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
nv_lock
);
static
int
(
*
core99_write_bank
)(
int
bank
,
u8
*
datas
);
static
int
(
*
core99_erase_bank
)(
int
bank
);
...
...
@@ -165,10 +165,10 @@ static unsigned char indirect_nvram_read_byte(int addr)
unsigned
char
val
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
nv_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
nv_lock
,
flags
);
out_8
(
nvram_addr
,
addr
>>
5
);
val
=
in_8
(
&
nvram_data
[(
addr
&
0x1f
)
<<
4
]);
spin_unlock_irqrestore
(
&
nv_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
nv_lock
,
flags
);
return
val
;
}
...
...
@@ -177,10 +177,10 @@ static void indirect_nvram_write_byte(int addr, unsigned char val)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
nv_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
nv_lock
,
flags
);
out_8
(
nvram_addr
,
addr
>>
5
);
out_8
(
&
nvram_data
[(
addr
&
0x1f
)
<<
4
],
val
);
spin_unlock_irqrestore
(
&
nv_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
nv_lock
,
flags
);
}
...
...
@@ -481,7 +481,7 @@ static void core99_nvram_sync(void)
if
(
!
is_core_99
||
!
nvram_data
||
!
nvram_image
)
return
;
spin_lock_irqsave
(
&
nv_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
nv_lock
,
flags
);
if
(
!
memcmp
(
nvram_image
,
(
u8
*
)
nvram_data
+
core99_bank
*
NVRAM_SIZE
,
NVRAM_SIZE
))
goto
bail
;
...
...
@@ -503,7 +503,7 @@ static void core99_nvram_sync(void)
if
(
core99_write_bank
(
core99_bank
,
nvram_image
))
printk
(
"nvram: Error writing bank %d
\n
"
,
core99_bank
);
bail:
spin_unlock_irqrestore
(
&
nv_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
nv_lock
,
flags
);
#ifdef DEBUG
mdelay
(
2000
);
...
...
arch/powerpc/platforms/powermac/pfunc_base.c
View file @
1326092f
...
...
@@ -50,13 +50,13 @@ static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
value
=
~
value
;
/* Toggle the GPIO */
spin_lock_irqsave
(
&
feature_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
feature_lock
,
flags
);
tmp
=
readb
(
addr
);
tmp
=
(
tmp
&
~
mask
)
|
(
value
&
mask
);
DBG
(
"Do write 0x%02x to GPIO %s (%p)
\n
"
,
tmp
,
func
->
node
->
full_name
,
addr
);
writeb
(
tmp
,
addr
);
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
return
0
;
}
...
...
@@ -145,9 +145,9 @@ static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
struct
macio_chip
*
macio
=
func
->
driver_data
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
feature_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
feature_lock
,
flags
);
MACIO_OUT32
(
offset
,
(
MACIO_IN32
(
offset
)
&
~
mask
)
|
(
value
&
mask
));
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
return
0
;
}
...
...
@@ -168,9 +168,9 @@ static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
struct
macio_chip
*
macio
=
func
->
driver_data
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
feature_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
feature_lock
,
flags
);
MACIO_OUT8
(
offset
,
(
MACIO_IN8
(
offset
)
&
~
mask
)
|
(
value
&
mask
));
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
return
0
;
}
...
...
@@ -223,12 +223,12 @@ static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift,
if
(
args
==
NULL
||
args
->
count
==
0
)
return
-
EINVAL
;
spin_lock_irqsave
(
&
feature_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
feature_lock
,
flags
);
tmp
=
MACIO_IN32
(
offset
);
val
=
args
->
u
[
0
].
v
<<
shift
;
tmp
=
(
tmp
&
~
mask
)
|
(
val
&
mask
);
MACIO_OUT32
(
offset
,
tmp
);
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
return
0
;
}
...
...
@@ -243,12 +243,12 @@ static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
if
(
args
==
NULL
||
args
->
count
==
0
)
return
-
EINVAL
;
spin_lock_irqsave
(
&
feature_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
feature_lock
,
flags
);
tmp
=
MACIO_IN8
(
offset
);
val
=
args
->
u
[
0
].
v
<<
shift
;
tmp
=
(
tmp
&
~
mask
)
|
(
val
&
mask
);
MACIO_OUT8
(
offset
,
tmp
);
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
return
0
;
}
...
...
@@ -278,12 +278,12 @@ static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
feature_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
feature_lock
,
flags
);
/* This is fairly bogus in darwin, but it should work for our needs
* implemeted that way:
*/
UN_OUT
(
offset
,
(
UN_IN
(
offset
)
&
~
mask
)
|
(
value
&
mask
));
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
feature_lock
,
flags
);
return
0
;
}
...
...
arch/powerpc/platforms/powermac/pic.c
View file @
1326092f
...
...
@@ -57,7 +57,7 @@ static int max_irqs;
static
int
max_real_irqs
;
static
u32
level_mask
[
4
];
static
DEFINE_SPINLOCK
(
pmac_pic_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
pmac_pic_lock
);
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static
unsigned
long
ppc_lost_interrupts
[
NR_MASK_WORDS
];
...
...
@@ -85,7 +85,7 @@ static void pmac_mask_and_ack_irq(unsigned int virq)
int
i
=
src
>>
5
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
__clear_bit
(
src
,
ppc_cached_irq_mask
);
if
(
__test_and_clear_bit
(
src
,
ppc_lost_interrupts
))
atomic_dec
(
&
ppc_n_lost_interrupts
);
...
...
@@ -97,7 +97,7 @@ static void pmac_mask_and_ack_irq(unsigned int virq)
mb
();
}
while
((
in_le32
(
&
pmac_irq_hw
[
i
]
->
enable
)
&
bit
)
!=
(
ppc_cached_irq_mask
[
i
]
&
bit
));
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
}
static
void
pmac_ack_irq
(
unsigned
int
virq
)
...
...
@@ -107,12 +107,12 @@ static void pmac_ack_irq(unsigned int virq)
int
i
=
src
>>
5
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
if
(
__test_and_clear_bit
(
src
,
ppc_lost_interrupts
))
atomic_dec
(
&
ppc_n_lost_interrupts
);
out_le32
(
&
pmac_irq_hw
[
i
]
->
ack
,
bit
);
(
void
)
in_le32
(
&
pmac_irq_hw
[
i
]
->
ack
);
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
}
static
void
__pmac_set_irq_mask
(
unsigned
int
irq_nr
,
int
nokicklost
)
...
...
@@ -152,12 +152,12 @@ static unsigned int pmac_startup_irq(unsigned int virq)
unsigned
long
bit
=
1UL
<<
(
src
&
0x1f
);
int
i
=
src
>>
5
;
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
if
((
irq_desc
[
virq
].
status
&
IRQ_LEVEL
)
==
0
)
out_le32
(
&
pmac_irq_hw
[
i
]
->
ack
,
bit
);
__set_bit
(
src
,
ppc_cached_irq_mask
);
__pmac_set_irq_mask
(
src
,
0
);
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
return
0
;
}
...
...
@@ -167,10 +167,10 @@ static void pmac_mask_irq(unsigned int virq)
unsigned
long
flags
;
unsigned
int
src
=
irq_map
[
virq
].
hwirq
;
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
__clear_bit
(
src
,
ppc_cached_irq_mask
);
__pmac_set_irq_mask
(
src
,
1
);
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
}
static
void
pmac_unmask_irq
(
unsigned
int
virq
)
...
...
@@ -178,19 +178,19 @@ static void pmac_unmask_irq(unsigned int virq)
unsigned
long
flags
;
unsigned
int
src
=
irq_map
[
virq
].
hwirq
;
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
__set_bit
(
src
,
ppc_cached_irq_mask
);
__pmac_set_irq_mask
(
src
,
0
);
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
}
static
int
pmac_retrigger
(
unsigned
int
virq
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
__pmac_retrigger
(
irq_map
[
virq
].
hwirq
);
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
return
1
;
}
...
...
@@ -210,7 +210,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id)
int
irq
,
bits
;
int
rc
=
IRQ_NONE
;
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
for
(
irq
=
max_irqs
;
(
irq
-=
32
)
>=
max_real_irqs
;
)
{
int
i
=
irq
>>
5
;
bits
=
in_le32
(
&
pmac_irq_hw
[
i
]
->
event
)
|
ppc_lost_interrupts
[
i
];
...
...
@@ -220,12 +220,12 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id)
if
(
bits
==
0
)
continue
;
irq
+=
__ilog2
(
bits
);
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
generic_handle_irq
(
irq
);
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
rc
=
IRQ_HANDLED
;
}
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
return
rc
;
}
...
...
@@ -244,7 +244,7 @@ static unsigned int pmac_pic_get_irq(void)
return
NO_IRQ_IGNORE
;
/* ignore, already handled */
}
#endif
/* CONFIG_SMP */
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
pmac_pic_lock
,
flags
);
for
(
irq
=
max_real_irqs
;
(
irq
-=
32
)
>=
0
;
)
{
int
i
=
irq
>>
5
;
bits
=
in_le32
(
&
pmac_irq_hw
[
i
]
->
event
)
|
ppc_lost_interrupts
[
i
];
...
...
@@ -256,7 +256,7 @@ static unsigned int pmac_pic_get_irq(void)
irq
+=
__ilog2
(
bits
);
break
;
}
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
pmac_pic_lock
,
flags
);
if
(
unlikely
(
irq
<
0
))
return
NO_IRQ
;
return
irq_linear_revmap
(
pmac_pic_host
,
irq
);
...
...
arch/powerpc/platforms/pseries/eeh.c
View file @
1326092f
...
...
@@ -100,7 +100,7 @@ int eeh_subsystem_enabled;
EXPORT_SYMBOL
(
eeh_subsystem_enabled
);
/* Lock to avoid races due to multiple reports of an error */
static
DEFINE_SPINLOCK
(
confirm_error_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
confirm_error_lock
);
/* Buffer for reporting slot-error-detail rtas calls. Its here
* in BSS, and not dynamically alloced, so that it ends up in
...
...
@@ -436,7 +436,7 @@ static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
void
eeh_clear_slot
(
struct
device_node
*
dn
,
int
mode_flag
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
confirm_error_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
confirm_error_lock
,
flags
);
dn
=
find_device_pe
(
dn
);
...
...
@@ -447,7 +447,7 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
PCI_DN
(
dn
)
->
eeh_mode
&=
~
mode_flag
;
PCI_DN
(
dn
)
->
eeh_check_count
=
0
;
__eeh_clear_slot
(
dn
,
mode_flag
);
spin_unlock_irqrestore
(
&
confirm_error_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
confirm_error_lock
,
flags
);
}
/**
...
...
@@ -506,7 +506,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* in one slot might report errors simultaneously, and we
* only want one error recovery routine running.
*/
spin_lock_irqsave
(
&
confirm_error_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
confirm_error_lock
,
flags
);
rc
=
1
;
if
(
pdn
->
eeh_mode
&
EEH_MODE_ISOLATED
)
{
pdn
->
eeh_check_count
++
;
...
...
@@ -575,7 +575,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* with other functions on this device, and functions under
* bridges. */
eeh_mark_slot
(
dn
,
EEH_MODE_ISOLATED
);
spin_unlock_irqrestore
(
&
confirm_error_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
confirm_error_lock
,
flags
);
eeh_send_failure_event
(
dn
,
dev
);
...
...
@@ -586,7 +586,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
return
1
;
dn_unlock:
spin_unlock_irqrestore
(
&
confirm_error_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
confirm_error_lock
,
flags
);
return
rc
;
}
...
...
@@ -1056,7 +1056,7 @@ void __init eeh_init(void)
struct
device_node
*
phb
,
*
np
;
struct
eeh_early_enable_info
info
;
spin_lock_init
(
&
confirm_error_lock
);
atomic_
spin_lock_init
(
&
confirm_error_lock
);
spin_lock_init
(
&
slot_errbuf_lock
);
np
=
of_find_node_by_path
(
"/rtas"
);
...
...
arch/powerpc/sysdev/i8259.c
View file @
1326092f
...
...
@@ -23,7 +23,7 @@ static unsigned char cached_8259[2] = { 0xff, 0xff };
#define cached_A1 (cached_8259[0])
#define cached_21 (cached_8259[1])
static
DEFINE_SPINLOCK
(
i8259_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
i8259_lock
);
static
struct
irq_host
*
i8259_host
;
...
...
@@ -42,7 +42,7 @@ unsigned int i8259_irq(void)
if
(
pci_intack
)
irq
=
readb
(
pci_intack
);
else
{
spin_lock
(
&
i8259_lock
);
atomic_
spin_lock
(
&
i8259_lock
);
lock
=
1
;
/* Perform an interrupt acknowledge cycle on controller 1. */
...
...
@@ -74,7 +74,7 @@ unsigned int i8259_irq(void)
irq
=
NO_IRQ
;
if
(
lock
)
spin_unlock
(
&
i8259_lock
);
atomic_
spin_unlock
(
&
i8259_lock
);
return
irq
;
}
...
...
@@ -82,7 +82,7 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
if
(
irq_nr
>
7
)
{
cached_A1
|=
1
<<
(
irq_nr
-
8
);
inb
(
0xA1
);
/* DUMMY */
...
...
@@ -95,7 +95,7 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
outb
(
cached_21
,
0x21
);
outb
(
0x20
,
0x20
);
/* Non-specific EOI */
}
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
}
static
void
i8259_set_irq_mask
(
int
irq_nr
)
...
...
@@ -110,13 +110,13 @@ static void i8259_mask_irq(unsigned int irq_nr)
pr_debug
(
"i8259_mask_irq(%d)
\n
"
,
irq_nr
);
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
if
(
irq_nr
<
8
)
cached_21
|=
1
<<
irq_nr
;
else
cached_A1
|=
1
<<
(
irq_nr
-
8
);
i8259_set_irq_mask
(
irq_nr
);
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
}
static
void
i8259_unmask_irq
(
unsigned
int
irq_nr
)
...
...
@@ -125,13 +125,13 @@ static void i8259_unmask_irq(unsigned int irq_nr)
pr_debug
(
"i8259_unmask_irq(%d)
\n
"
,
irq_nr
);
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
if
(
irq_nr
<
8
)
cached_21
&=
~
(
1
<<
irq_nr
);
else
cached_A1
&=
~
(
1
<<
(
irq_nr
-
8
));
i8259_set_irq_mask
(
irq_nr
);
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
}
static
struct
irq_chip
i8259_pic
=
{
...
...
@@ -241,7 +241,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
unsigned
long
flags
;
/* initialize the controller */
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
i8259_lock
,
flags
);
/* Mask all first */
outb
(
0xff
,
0xA1
);
...
...
@@ -273,7 +273,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
outb
(
cached_A1
,
0xA1
);
outb
(
cached_21
,
0x21
);
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
i8259_lock
,
flags
);
/* create a legacy host */
i8259_host
=
irq_alloc_host
(
node
,
IRQ_HOST_MAP_LEGACY
,
...
...
arch/powerpc/sysdev/ipic.c
View file @
1326092f
...
...
@@ -32,7 +32,7 @@
static
struct
ipic
*
primary_ipic
;
static
struct
irq_chip
ipic_level_irq_chip
,
ipic_edge_irq_chip
;
static
DEFINE_SPINLOCK
(
ipic_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
ipic_lock
);
static
struct
ipic_info
ipic_info
[]
=
{
[
1
]
=
{
...
...
@@ -530,13 +530,13 @@ static void ipic_unmask_irq(unsigned int virq)
unsigned
long
flags
;
u32
temp
;
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
temp
=
ipic_read
(
ipic
->
regs
,
ipic_info
[
src
].
mask
);
temp
|=
(
1
<<
(
31
-
ipic_info
[
src
].
bit
));
ipic_write
(
ipic
->
regs
,
ipic_info
[
src
].
mask
,
temp
);
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
}
static
void
ipic_mask_irq
(
unsigned
int
virq
)
...
...
@@ -546,7 +546,7 @@ static void ipic_mask_irq(unsigned int virq)
unsigned
long
flags
;
u32
temp
;
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
temp
=
ipic_read
(
ipic
->
regs
,
ipic_info
[
src
].
mask
);
temp
&=
~
(
1
<<
(
31
-
ipic_info
[
src
].
bit
));
...
...
@@ -556,7 +556,7 @@ static void ipic_mask_irq(unsigned int virq)
* for nearly all cases. */
mb
();
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
}
static
void
ipic_ack_irq
(
unsigned
int
virq
)
...
...
@@ -566,7 +566,7 @@ static void ipic_ack_irq(unsigned int virq)
unsigned
long
flags
;
u32
temp
;
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
temp
=
1
<<
(
31
-
ipic_info
[
src
].
bit
);
ipic_write
(
ipic
->
regs
,
ipic_info
[
src
].
ack
,
temp
);
...
...
@@ -575,7 +575,7 @@ static void ipic_ack_irq(unsigned int virq)
* for nearly all cases. */
mb
();
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
}
static
void
ipic_mask_irq_and_ack
(
unsigned
int
virq
)
...
...
@@ -585,7 +585,7 @@ static void ipic_mask_irq_and_ack(unsigned int virq)
unsigned
long
flags
;
u32
temp
;
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
ipic_lock
,
flags
);
temp
=
ipic_read
(
ipic
->
regs
,
ipic_info
[
src
].
mask
);
temp
&=
~
(
1
<<
(
31
-
ipic_info
[
src
].
bit
));
...
...
@@ -598,7 +598,7 @@ static void ipic_mask_irq_and_ack(unsigned int virq)
* for nearly all cases. */
mb
();
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
ipic_lock
,
flags
);
}
static
int
ipic_set_irq_type
(
unsigned
int
virq
,
unsigned
int
flow_type
)
...
...
arch/powerpc/sysdev/mpic.c
View file @
1326092f
...
...
@@ -46,7 +46,7 @@
static
struct
mpic
*
mpics
;
static
struct
mpic
*
mpic_primary
;
static
DEFINE_SPINLOCK
(
mpic_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
mpic_lock
);
#ifdef CONFIG_PPC32
/* XXX for now */
#ifdef CONFIG_IRQ_ALL_CPUS
...
...
@@ -344,10 +344,10 @@ static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
unsigned
int
mask
=
1U
<<
(
fixup
->
index
&
0x1f
);
writel
(
mask
,
fixup
->
applebase
+
soff
);
}
else
{
spin_lock
(
&
mpic
->
fixup_lock
);
atomic_
spin_lock
(
&
mpic
->
fixup_lock
);
writeb
(
0x11
+
2
*
fixup
->
index
,
fixup
->
base
+
2
);
writel
(
fixup
->
data
,
fixup
->
base
+
4
);
spin_unlock
(
&
mpic
->
fixup_lock
);
atomic_
spin_unlock
(
&
mpic
->
fixup_lock
);
}
}
...
...
@@ -363,7 +363,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
DBG
(
"startup_ht_interrupt(0x%x, 0x%x) index: %d
\n
"
,
source
,
irqflags
,
fixup
->
index
);
spin_lock_irqsave
(
&
mpic
->
fixup_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
mpic
->
fixup_lock
,
flags
);
/* Enable and configure */
writeb
(
0x10
+
2
*
fixup
->
index
,
fixup
->
base
+
2
);
tmp
=
readl
(
fixup
->
base
+
4
);
...
...
@@ -371,7 +371,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
if
(
irqflags
&
IRQ_LEVEL
)
tmp
|=
0x22
;
writel
(
tmp
,
fixup
->
base
+
4
);
spin_unlock_irqrestore
(
&
mpic
->
fixup_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
mpic
->
fixup_lock
,
flags
);
#ifdef CONFIG_PM
/* use the lowest bit inverted to the actual HW,
...
...
@@ -393,12 +393,12 @@ static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
DBG
(
"shutdown_ht_interrupt(0x%x, 0x%x)
\n
"
,
source
,
irqflags
);
/* Disable */
spin_lock_irqsave
(
&
mpic
->
fixup_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
mpic
->
fixup_lock
,
flags
);
writeb
(
0x10
+
2
*
fixup
->
index
,
fixup
->
base
+
2
);
tmp
=
readl
(
fixup
->
base
+
4
);
tmp
|=
1
;
writel
(
tmp
,
fixup
->
base
+
4
);
spin_unlock_irqrestore
(
&
mpic
->
fixup_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
mpic
->
fixup_lock
,
flags
);
#ifdef CONFIG_PM
/* use the lowest bit inverted to the actual HW,
...
...
@@ -512,7 +512,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
BUG_ON
(
mpic
->
fixups
==
NULL
);
/* Init spinlock */
spin_lock_init
(
&
mpic
->
fixup_lock
);
atomic_
spin_lock_init
(
&
mpic
->
fixup_lock
);
/* Map U3 config space. We assume all IO-APICs are on the primary bus
* so we only need to map 64kB.
...
...
@@ -572,12 +572,12 @@ static int irq_choose_cpu(unsigned int virt_irq)
cpumask_copy
(
&
mask
,
irq_desc
[
virt_irq
].
affinity
);
if
(
cpus_equal
(
mask
,
CPU_MASK_ALL
))
{
static
int
irq_rover
;
static
DEFINE_SPINLOCK
(
irq_rover_lock
);
static
DEFINE_
ATOMIC_
SPINLOCK
(
irq_rover_lock
);
unsigned
long
flags
;
/* Round-robin distribution... */
do_round_robin:
spin_lock_irqsave
(
&
irq_rover_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
irq_rover_lock
,
flags
);
while
(
!
cpu_online
(
irq_rover
))
{
if
(
++
irq_rover
>=
NR_CPUS
)
...
...
@@ -589,7 +589,7 @@ static int irq_choose_cpu(unsigned int virt_irq)
irq_rover
=
0
;
}
while
(
!
cpu_online
(
irq_rover
));
spin_unlock_irqrestore
(
&
irq_rover_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
irq_rover_lock
,
flags
);
}
else
{
cpumask_t
tmp
;
...
...
@@ -1372,14 +1372,14 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable)
unsigned
long
flags
;
u32
v
;
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
v
=
mpic_read
(
mpic
->
gregs
,
MPIC_GREG_GLOBAL_CONF_1
);
if
(
enable
)
v
|=
MPIC_GREG_GLOBAL_CONF_1_SIE
;
else
v
&=
~
MPIC_GREG_GLOBAL_CONF_1_SIE
;
mpic_write
(
mpic
->
gregs
,
MPIC_GREG_GLOBAL_CONF_1
,
v
);
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
}
void
mpic_irq_set_priority
(
unsigned
int
irq
,
unsigned
int
pri
)
...
...
@@ -1392,7 +1392,7 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
if
(
!
mpic
)
return
;
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
if
(
mpic_is_ipi
(
mpic
,
irq
))
{
reg
=
mpic_ipi_read
(
src
-
mpic
->
ipi_vecs
[
0
])
&
~
MPIC_VECPRI_PRIORITY_MASK
;
...
...
@@ -1404,7 +1404,7 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
mpic_irq_write
(
src
,
MPIC_INFO
(
IRQ_VECTOR_PRI
),
reg
|
(
pri
<<
MPIC_VECPRI_PRIORITY_SHIFT
));
}
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
}
void
mpic_setup_this_cpu
(
void
)
...
...
@@ -1419,7 +1419,7 @@ void mpic_setup_this_cpu(void)
DBG
(
"%s: setup_this_cpu(%d)
\n
"
,
mpic
->
name
,
hard_smp_processor_id
());
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
/* let the mpic know we want intrs. default affinity is 0xffffffff
* until changed via /proc. That's how it's done on x86. If we want
...
...
@@ -1435,7 +1435,7 @@ void mpic_setup_this_cpu(void)
/* Set current processor priority to 0 */
mpic_cpu_write
(
MPIC_INFO
(
CPU_CURRENT_TASK_PRI
),
0
);
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
#endif
/* CONFIG_SMP */
}
...
...
@@ -1464,7 +1464,7 @@ void mpic_teardown_this_cpu(int secondary)
BUG_ON
(
mpic
==
NULL
);
DBG
(
"%s: teardown_this_cpu(%d)
\n
"
,
mpic
->
name
,
hard_smp_processor_id
());
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
atomic_
spin_lock_irqsave
(
&
mpic_lock
,
flags
);
/* let the mpic know we don't want intrs. */
for
(
i
=
0
;
i
<
mpic
->
num_sources
;
i
++
)
...
...
@@ -1478,7 +1478,7 @@ void mpic_teardown_this_cpu(int secondary)
*/
mpic_eoi
(
mpic
);
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
atomic_
spin_unlock_irqrestore
(
&
mpic_lock
,
flags
);
}
...
...
drivers/of/base.c
View file @
1326092f
...
...
@@ -25,7 +25,7 @@ struct device_node *allnodes;
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
*/
DEFINE_
RW
LOCK
(
devtree_lock
);
DEFINE_
ATOMIC_SPIN
LOCK
(
devtree_lock
);
int
of_n_addr_cells
(
struct
device_node
*
np
)
{
...
...
@@ -68,7 +68,7 @@ struct property *of_find_property(const struct device_node *np,
if
(
!
np
)
return
NULL
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
for
(
pp
=
np
->
properties
;
pp
!=
0
;
pp
=
pp
->
next
)
{
if
(
of_prop_cmp
(
pp
->
name
,
name
)
==
0
)
{
if
(
lenp
!=
0
)
...
...
@@ -76,7 +76,7 @@ struct property *of_find_property(const struct device_node *np,
break
;
}
}
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
pp
;
}
...
...
@@ -159,9 +159,9 @@ struct device_node *of_get_parent(const struct device_node *node)
if
(
!
node
)
return
NULL
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
np
=
of_node_get
(
node
->
parent
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_get_parent
);
...
...
@@ -184,10 +184,10 @@ struct device_node *of_get_next_parent(struct device_node *node)
if
(
!
node
)
return
NULL
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
parent
=
of_node_get
(
node
->
parent
);
of_node_put
(
node
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
parent
;
}
...
...
@@ -204,13 +204,13 @@ struct device_node *of_get_next_child(const struct device_node *node,
{
struct
device_node
*
next
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
next
=
prev
?
prev
->
sibling
:
node
->
child
;
for
(;
next
;
next
=
next
->
sibling
)
if
(
of_node_get
(
next
))
break
;
of_node_put
(
prev
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
next
;
}
EXPORT_SYMBOL
(
of_get_next_child
);
...
...
@@ -226,13 +226,13 @@ struct device_node *of_find_node_by_path(const char *path)
{
struct
device_node
*
np
=
allnodes
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
for
(;
np
;
np
=
np
->
allnext
)
{
if
(
np
->
full_name
&&
(
of_node_cmp
(
np
->
full_name
,
path
)
==
0
)
&&
of_node_get
(
np
))
break
;
}
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_path
);
...
...
@@ -253,14 +253,14 @@ struct device_node *of_find_node_by_name(struct device_node *from,
{
struct
device_node
*
np
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
;
np
=
np
->
allnext
)
if
(
np
->
name
&&
(
of_node_cmp
(
np
->
name
,
name
)
==
0
)
&&
of_node_get
(
np
))
break
;
of_node_put
(
from
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_name
);
...
...
@@ -282,14 +282,14 @@ struct device_node *of_find_node_by_type(struct device_node *from,
{
struct
device_node
*
np
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
;
np
=
np
->
allnext
)
if
(
np
->
type
&&
(
of_node_cmp
(
np
->
type
,
type
)
==
0
)
&&
of_node_get
(
np
))
break
;
of_node_put
(
from
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_type
);
...
...
@@ -313,7 +313,7 @@ struct device_node *of_find_compatible_node(struct device_node *from,
{
struct
device_node
*
np
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
;
np
=
np
->
allnext
)
{
if
(
type
...
...
@@ -323,7 +323,7 @@ struct device_node *of_find_compatible_node(struct device_node *from,
break
;
}
of_node_put
(
from
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_compatible_node
);
...
...
@@ -346,7 +346,7 @@ struct device_node *of_find_node_with_property(struct device_node *from,
struct
device_node
*
np
;
struct
property
*
pp
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
;
np
=
np
->
allnext
)
{
for
(
pp
=
np
->
properties
;
pp
!=
0
;
pp
=
pp
->
next
)
{
...
...
@@ -358,7 +358,7 @@ struct device_node *of_find_node_with_property(struct device_node *from,
}
out:
of_node_put
(
from
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_with_property
);
...
...
@@ -409,14 +409,14 @@ struct device_node *of_find_matching_node(struct device_node *from,
{
struct
device_node
*
np
;
read
_lock
(
&
devtree_lock
);
atomic_spin
_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
;
np
=
np
->
allnext
)
{
if
(
of_match_node
(
matches
,
np
)
&&
of_node_get
(
np
))
break
;
}
of_node_put
(
from
);
read
_unlock
(
&
devtree_lock
);
atomic_spin
_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_matching_node
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment