Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
944b380e
Commit
944b380e
authored
Feb 14, 2007
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'cell-merge' of
git+ssh://master.kernel.org/pub/scm/linux/kernel/git/arnd/cell-2.6
parents
fff5f528
c7eb7347
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
479 additions
and
283 deletions
+479
-283
arch/powerpc/oprofile/op_model_cell.c
arch/powerpc/oprofile/op_model_cell.c
+94
-55
arch/powerpc/platforms/cell/pmu.c
arch/powerpc/platforms/cell/pmu.c
+4
-10
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/context.c
+60
-65
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/file.c
+5
-2
arch/powerpc/platforms/cell/spufs/run.c
arch/powerpc/platforms/cell/spufs/run.c
+10
-6
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/sched.c
+267
-119
arch/powerpc/platforms/cell/spufs/spufs.h
arch/powerpc/platforms/cell/spufs/spufs.h
+33
-15
arch/powerpc/xmon/xmon.c
arch/powerpc/xmon/xmon.c
+0
-1
include/asm-powerpc/cell-pmu.h
include/asm-powerpc/cell-pmu.h
+6
-9
include/asm-powerpc/spu.h
include/asm-powerpc/spu.h
+0
-1
No files found.
arch/powerpc/oprofile/op_model_cell.c
View file @
944b380e
...
...
@@ -39,10 +39,17 @@
#include "../platforms/cell/interrupt.h"
#define PPU_CYCLES_EVENT_NUM 1
/* event number for CYCLES */
#define PPU_CYCLES_GRP_NUM 1
/* special group number for identifying
* PPU_CYCLES event
*/
#define CBE_COUNT_ALL_CYCLES 0x42800000
/* PPU cycle event specifier */
#define NUM_THREADS 2
#define VIRT_CNTR_SW_TIME_NS 100000000 // 0.5 seconds
#define NUM_THREADS 2
/* number of physical threads in
* physical processor
*/
#define NUM_TRACE_BUS_WORDS 4
#define NUM_INPUT_BUS_WORDS 2
struct
pmc_cntrl_data
{
unsigned
long
vcntr
;
...
...
@@ -58,7 +65,7 @@ struct pmc_cntrl_data {
struct
pm_signal
{
u16
cpu
;
/* Processor to modify */
u16
sub_unit
;
/* hw subunit this applies to (if applicable) */
u16
signal_group
;
/* Signal Group to Enable/Disable */
short
int
signal_group
;
/* Signal Group to Enable/Disable */
u8
bus_word
;
/* Enable/Disable on this Trace/Trigger/Event
* Bus Word(s) (bitmask)
*/
...
...
@@ -93,7 +100,6 @@ static struct {
u32
pm07_cntrl
[
NR_PHYS_CTRS
];
}
pm_regs
;
#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
...
...
@@ -101,7 +107,6 @@ static struct {
#define GET_COUNT_CYCLES(x) (x & 0x00000001)
#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
static
DEFINE_PER_CPU
(
unsigned
long
[
NR_PHYS_CTRS
],
pmc_values
);
static
struct
pmc_cntrl_data
pmc_cntrl
[
NUM_THREADS
][
NR_PHYS_CTRS
];
...
...
@@ -129,8 +134,8 @@ static spinlock_t virt_cntr_lock = SPIN_LOCK_UNLOCKED;
static
u32
ctr_enabled
;
static
unsigned
char
trace_bus
[
4
];
static
unsigned
char
input_bus
[
2
];
static
unsigned
char
trace_bus
[
NUM_TRACE_BUS_WORDS
];
static
unsigned
char
input_bus
[
NUM_INPUT_BUS_WORDS
];
/*
* Firmware interface functions
...
...
@@ -177,25 +182,40 @@ static void pm_rtas_reset_signals(u32 node)
static
void
pm_rtas_activate_signals
(
u32
node
,
u32
count
)
{
int
ret
;
int
j
;
int
i
,
j
;
struct
pm_signal
pm_signal_local
[
NR_PHYS_CTRS
];
/* There is no debug setup required for the cycles event.
* Note that only events in the same group can be used.
* Otherwise, there will be conflicts in correctly routing
* the signals on the debug bus. It is the responsiblity
* of the OProfile user tool to check the events are in
* the same group.
*/
i
=
0
;
for
(
j
=
0
;
j
<
count
;
j
++
)
{
/* fw expects physical cpu # */
pm_signal_local
[
j
].
cpu
=
node
;
pm_signal_local
[
j
].
signal_group
=
pm_signal
[
j
].
signal_group
;
pm_signal_local
[
j
].
bus_word
=
pm_signal
[
j
].
bus_word
;
pm_signal_local
[
j
].
sub_unit
=
pm_signal
[
j
].
sub_unit
;
pm_signal_local
[
j
].
bit
=
pm_signal
[
j
].
bit
;
if
(
pm_signal
[
j
].
signal_group
!=
PPU_CYCLES_GRP_NUM
)
{
/* fw expects physical cpu # */
pm_signal_local
[
i
].
cpu
=
node
;
pm_signal_local
[
i
].
signal_group
=
pm_signal
[
j
].
signal_group
;
pm_signal_local
[
i
].
bus_word
=
pm_signal
[
j
].
bus_word
;
pm_signal_local
[
i
].
sub_unit
=
pm_signal
[
j
].
sub_unit
;
pm_signal_local
[
i
].
bit
=
pm_signal
[
j
].
bit
;
i
++
;
}
}
ret
=
rtas_ibm_cbe_perftools
(
SUBFUNC_ACTIVATE
,
PASSTHRU_ENABLE
,
pm_signal_local
,
count
*
sizeof
(
struct
pm_signal
));
if
(
i
!=
0
)
{
ret
=
rtas_ibm_cbe_perftools
(
SUBFUNC_ACTIVATE
,
PASSTHRU_ENABLE
,
pm_signal_local
,
i
*
sizeof
(
struct
pm_signal
));
if
(
ret
)
printk
(
KERN_WARNING
"%s: rtas returned: %d
\n
"
,
__FUNCTION__
,
ret
);
if
(
ret
)
printk
(
KERN_WARNING
"%s: rtas returned: %d
\n
"
,
__FUNCTION__
,
ret
);
}
}
/*
...
...
@@ -212,7 +232,7 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
/* Special Event: Count all cpu cycles */
pm_regs
.
pm07_cntrl
[
ctr
]
=
CBE_COUNT_ALL_CYCLES
;
p
=
&
(
pm_signal
[
ctr
]);
p
->
signal_group
=
21
;
p
->
signal_group
=
PPU_CYCLES_GRP_NUM
;
p
->
bus_word
=
1
;
p
->
sub_unit
=
0
;
p
->
bit
=
0
;
...
...
@@ -232,13 +252,21 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
p
->
signal_group
=
event
/
100
;
p
->
bus_word
=
bus_word
;
p
->
sub_unit
=
unit_mask
&
0x0000f000
;
p
->
sub_unit
=
(
unit_mask
&
0x0000f000
)
>>
12
;
pm_regs
.
pm07_cntrl
[
ctr
]
=
0
;
pm_regs
.
pm07_cntrl
[
ctr
]
|=
PM07_CTR_COUNT_CYCLES
(
count_cycles
);
pm_regs
.
pm07_cntrl
[
ctr
]
|=
PM07_CTR_POLARITY
(
polarity
);
pm_regs
.
pm07_cntrl
[
ctr
]
|=
PM07_CTR_INPUT_CONTROL
(
input_control
);
/* Some of the islands signal selection is based on 64 bit words.
* The debug bus words are 32 bits, the input words to the performance
* counters are defined as 32 bits. Need to convert the 64 bit island
* specification to the appropriate 32 input bit and bus word for the
* performance counter event selection. See the CELL Performance
* monitoring signals manual and the Perf cntr hardware descriptions
* for the details.
*/
if
(
input_control
==
0
)
{
if
(
signal_bit
>
31
)
{
signal_bit
-=
32
;
...
...
@@ -259,12 +287,12 @@ static void set_pm_event(u32 ctr, int event, u32 unit_mask)
p
->
bit
=
signal_bit
;
}
for
(
i
=
0
;
i
<
4
;
i
++
)
{
for
(
i
=
0
;
i
<
NUM_TRACE_BUS_WORDS
;
i
++
)
{
if
(
bus_word
&
(
1
<<
i
))
{
pm_regs
.
debug_bus_control
|=
(
bus_type
<<
(
31
-
(
2
*
i
)
+
1
));
for
(
j
=
0
;
j
<
2
;
j
++
)
{
for
(
j
=
0
;
j
<
NUM_INPUT_BUS_WORDS
;
j
++
)
{
if
(
input_bus
[
j
]
==
0xff
)
{
input_bus
[
j
]
=
i
;
pm_regs
.
group_control
|=
...
...
@@ -278,52 +306,58 @@ out:
;
}
static
void
write_pm_cntrl
(
int
cpu
,
struct
pm_cntrl
*
pm_cntrl
)
static
void
write_pm_cntrl
(
int
cpu
)
{
/* Oprofile will use 32 bit counters, set bits 7:10 to 0 */
/* Oprofile will use 32 bit counters, set bits 7:10 to 0
* pmregs.pm_cntrl is a global
*/
u32
val
=
0
;
if
(
pm_
cntrl
->
enable
==
1
)
if
(
pm_
regs
.
pm_cntrl
.
enable
==
1
)
val
|=
CBE_PM_ENABLE_PERF_MON
;
if
(
pm_
cntrl
->
stop_at_max
==
1
)
if
(
pm_
regs
.
pm_cntrl
.
stop_at_max
==
1
)
val
|=
CBE_PM_STOP_AT_MAX
;
if
(
pm_
cntrl
->
trace_mode
==
1
)
val
|=
CBE_PM_TRACE_MODE_SET
(
pm_
cntrl
->
trace_mode
);
if
(
pm_
regs
.
pm_cntrl
.
trace_mode
==
1
)
val
|=
CBE_PM_TRACE_MODE_SET
(
pm_
regs
.
pm_cntrl
.
trace_mode
);
if
(
pm_
cntrl
->
freeze
==
1
)
if
(
pm_
regs
.
pm_cntrl
.
freeze
==
1
)
val
|=
CBE_PM_FREEZE_ALL_CTRS
;
/* Routine set_count_mode must be called previously to set
* the count mode based on the user selection of user and kernel.
*/
val
|=
CBE_PM_COUNT_MODE_SET
(
pm_
cntrl
->
count_mode
);
val
|=
CBE_PM_COUNT_MODE_SET
(
pm_
regs
.
pm_cntrl
.
count_mode
);
cbe_write_pm
(
cpu
,
pm_control
,
val
);
}
static
inline
void
set_count_mode
(
u32
kernel
,
u32
user
,
struct
pm_cntrl
*
pm_cntrl
)
set_count_mode
(
u32
kernel
,
u32
user
)
{
/* The user must specify user and kernel if they want them. If
* neither is specified, OProfile will count in hypervisor mode
* neither is specified, OProfile will count in hypervisor mode.
* pm_regs.pm_cntrl is a global
*/
if
(
kernel
)
{
if
(
user
)
pm_
cntrl
->
count_mode
=
CBE_COUNT_ALL_MODES
;
pm_
regs
.
pm_cntrl
.
count_mode
=
CBE_COUNT_ALL_MODES
;
else
pm_cntrl
->
count_mode
=
CBE_COUNT_SUPERVISOR_MODE
;
pm_regs
.
pm_cntrl
.
count_mode
=
CBE_COUNT_SUPERVISOR_MODE
;
}
else
{
if
(
user
)
pm_
cntrl
->
count_mode
=
CBE_COUNT_PROBLEM_MODE
;
pm_
regs
.
pm_cntrl
.
count_mode
=
CBE_COUNT_PROBLEM_MODE
;
else
pm_cntrl
->
count_mode
=
CBE_COUNT_HYPERVISOR_MODE
;
pm_regs
.
pm_cntrl
.
count_mode
=
CBE_COUNT_HYPERVISOR_MODE
;
}
}
static
inline
void
enable_ctr
(
u32
cpu
,
u32
ctr
,
u32
*
pm07_cntrl
)
{
pm07_cntrl
[
ctr
]
|=
PM07_CTR_ENABLE
(
1
)
;
pm07_cntrl
[
ctr
]
|=
CBE_PM_CTR_ENABLE
;
cbe_write_pm07_control
(
cpu
,
ctr
,
pm07_cntrl
[
ctr
]);
}
...
...
@@ -365,6 +399,14 @@ static void cell_virtual_cntr(unsigned long data)
hdw_thread
=
1
^
hdw_thread
;
next_hdw_thread
=
hdw_thread
;
for
(
i
=
0
;
i
<
num_counters
;
i
++
)
/* There are some per thread events. Must do the
* set event, for the thread that is being started
*/
set_pm_event
(
i
,
pmc_cntrl
[
next_hdw_thread
][
i
].
evnts
,
pmc_cntrl
[
next_hdw_thread
][
i
].
masks
);
/* The following is done only once per each node, but
* we need cpu #, not node #, to pass to the cbe_xxx functions.
*/
...
...
@@ -385,12 +427,13 @@ static void cell_virtual_cntr(unsigned long data)
==
0xFFFFFFFF
)
/* If the cntr value is 0xffffffff, we must
* reset that to 0xfffffff0 when the current
* thread is restarted. This will generate a new
* interrupt and make sure that we never restore
* the counters to the max value. If the counters
* were restored to the max value, they do not
* increment and no interrupts are generated. Hence
* no more samples will be collected on that cpu.
* thread is restarted. This will generate a
* new interrupt and make sure that we never
* restore the counters to the max value. If
* the counters were restored to the max value,
* they do not increment and no interrupts are
* generated. Hence no more samples will be
* collected on that cpu.
*/
cbe_write_ctr
(
cpu
,
i
,
0xFFFFFFF0
);
else
...
...
@@ -410,9 +453,6 @@ static void cell_virtual_cntr(unsigned long data)
* Must do the set event, enable_cntr
* for each cpu.
*/
set_pm_event
(
i
,
pmc_cntrl
[
next_hdw_thread
][
i
].
evnts
,
pmc_cntrl
[
next_hdw_thread
][
i
].
masks
);
enable_ctr
(
cpu
,
i
,
pm_regs
.
pm07_cntrl
);
}
else
{
...
...
@@ -465,8 +505,7 @@ cell_reg_setup(struct op_counter_config *ctr,
pm_regs
.
pm_cntrl
.
trace_mode
=
0
;
pm_regs
.
pm_cntrl
.
freeze
=
1
;
set_count_mode
(
sys
->
enable_kernel
,
sys
->
enable_user
,
&
pm_regs
.
pm_cntrl
);
set_count_mode
(
sys
->
enable_kernel
,
sys
->
enable_user
);
/* Setup the thread 0 events */
for
(
i
=
0
;
i
<
num_ctrs
;
++
i
)
{
...
...
@@ -498,10 +537,10 @@ cell_reg_setup(struct op_counter_config *ctr,
pmc_cntrl
[
1
][
i
].
vcntr
=
i
;
}
for
(
i
=
0
;
i
<
4
;
i
++
)
for
(
i
=
0
;
i
<
NUM_TRACE_BUS_WORDS
;
i
++
)
trace_bus
[
i
]
=
0xff
;
for
(
i
=
0
;
i
<
2
;
i
++
)
for
(
i
=
0
;
i
<
NUM_INPUT_BUS_WORDS
;
i
++
)
input_bus
[
i
]
=
0xff
;
/* Our counters count up, and "count" refers to
...
...
@@ -560,7 +599,7 @@ static void cell_cpu_setup(struct op_counter_config *cntr)
cbe_write_pm
(
cpu
,
pm_start_stop
,
0
);
cbe_write_pm
(
cpu
,
group_control
,
pm_regs
.
group_control
);
cbe_write_pm
(
cpu
,
debug_bus_control
,
pm_regs
.
debug_bus_control
);
write_pm_cntrl
(
cpu
,
&
pm_regs
.
pm_cntrl
);
write_pm_cntrl
(
cpu
);
for
(
i
=
0
;
i
<
num_counters
;
++
i
)
{
if
(
ctr_enabled
&
(
1
<<
i
))
{
...
...
@@ -602,7 +641,7 @@ static void cell_global_start(struct op_counter_config *ctr)
}
}
cbe_clear_pm_interrupts
(
cpu
);
cbe_
get_and_
clear_pm_interrupts
(
cpu
);
cbe_enable_pm_interrupts
(
cpu
,
hdw_thread
,
interrupt_mask
);
cbe_enable_pm
(
cpu
);
}
...
...
@@ -672,7 +711,7 @@ cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
cbe_disable_pm
(
cpu
);
interrupt_mask
=
cbe_clear_pm_interrupts
(
cpu
);
interrupt_mask
=
cbe_
get_and_
clear_pm_interrupts
(
cpu
);
/* If the interrupt mask has been cleared, then the virt cntr
* has cleared the interrupt. When the thread that generated
...
...
arch/powerpc/platforms/cell/pmu.c
View file @
944b380e
...
...
@@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
* Enabling/disabling interrupts for the entire performance monitoring unit.
*/
u32
cbe_query_pm_interrupts
(
u32
cpu
)
{
return
cbe_read_pm
(
cpu
,
pm_status
);
}
EXPORT_SYMBOL_GPL
(
cbe_query_pm_interrupts
);
u32
cbe_clear_pm_interrupts
(
u32
cpu
)
u32
cbe_get_and_clear_pm_interrupts
(
u32
cpu
)
{
/* Reading pm_status clears the interrupt bits. */
return
cbe_
query_pm_interrupts
(
cpu
);
return
cbe_
read_pm
(
cpu
,
pm_status
);
}
EXPORT_SYMBOL_GPL
(
cbe_clear_pm_interrupts
);
EXPORT_SYMBOL_GPL
(
cbe_
get_and_
clear_pm_interrupts
);
void
cbe_enable_pm_interrupts
(
u32
cpu
,
u32
thread
,
u32
mask
)
{
...
...
@@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
void
cbe_disable_pm_interrupts
(
u32
cpu
)
{
cbe_clear_pm_interrupts
(
cpu
);
cbe_
get_and_
clear_pm_interrupts
(
cpu
);
cbe_write_pm
(
cpu
,
pm_status
,
0
);
}
EXPORT_SYMBOL_GPL
(
cbe_disable_pm_interrupts
);
...
...
arch/powerpc/platforms/cell/spufs/context.c
View file @
944b380e
...
...
@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
}
spin_lock_init
(
&
ctx
->
mmio_lock
);
kref_init
(
&
ctx
->
kref
);
init_rwsem
(
&
ctx
->
state_sema
);
mutex_init
(
&
ctx
->
state_mutex
);
init_MUTEX
(
&
ctx
->
run_sema
);
init_waitqueue_head
(
&
ctx
->
ibox_wq
);
init_waitqueue_head
(
&
ctx
->
wbox_wq
);
...
...
@@ -53,6 +53,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx
->
owner
=
get_task_mm
(
current
);
if
(
gang
)
spu_gang_add_ctx
(
gang
,
ctx
);
ctx
->
rt_priority
=
current
->
rt_priority
;
ctx
->
policy
=
current
->
policy
;
ctx
->
prio
=
current
->
prio
;
INIT_DELAYED_WORK
(
&
ctx
->
sched_work
,
spu_sched_tick
);
goto
out
;
out_free:
kfree
(
ctx
);
...
...
@@ -65,9 +69,9 @@ void destroy_spu_context(struct kref *kref)
{
struct
spu_context
*
ctx
;
ctx
=
container_of
(
kref
,
struct
spu_context
,
kref
);
down_write
(
&
ctx
->
state_sema
);
mutex_lock
(
&
ctx
->
state_mutex
);
spu_deactivate
(
ctx
);
up_write
(
&
ctx
->
state_sema
);
mutex_unlock
(
&
ctx
->
state_mutex
);
spu_fini_csa
(
&
ctx
->
csa
);
if
(
ctx
->
gang
)
spu_gang_remove_ctx
(
ctx
->
gang
,
ctx
);
...
...
@@ -96,16 +100,6 @@ void spu_forget(struct spu_context *ctx)
spu_release
(
ctx
);
}
void
spu_acquire
(
struct
spu_context
*
ctx
)
{
down_read
(
&
ctx
->
state_sema
);
}
void
spu_release
(
struct
spu_context
*
ctx
)
{
up_read
(
&
ctx
->
state_sema
);
}
void
spu_unmap_mappings
(
struct
spu_context
*
ctx
)
{
if
(
ctx
->
local_store
)
...
...
@@ -124,83 +118,84 @@ void spu_unmap_mappings(struct spu_context *ctx)
unmap_mapping_range
(
ctx
->
psmap
,
0
,
0x20000
,
1
);
}
/**
* spu_acquire_exclusive - lock spu contex and protect against userspace access
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int
spu_acquire_exclusive
(
struct
spu_context
*
ctx
)
{
int
ret
=
0
;
int
ret
=
-
EINVAL
;
down_write
(
&
ctx
->
state_sema
);
/*
ctx is about to be freed, can't acquire any more */
if
(
!
ctx
->
owner
)
{
ret
=
-
EINVAL
;
goto
out
;
}
spu_acquire
(
ctx
);
/*
* Context is about to be freed, so we can't acquire it anymore.
*/
if
(
!
ctx
->
owner
)
goto
out_unlock
;
if
(
ctx
->
state
==
SPU_STATE_SAVED
)
{
ret
=
spu_activate
(
ctx
,
0
);
if
(
ret
)
goto
out
;
ctx
->
state
=
SPU_STATE_RUNNABLE
;
goto
out_unlock
;
}
else
{
/* We need to exclude userspace access to the context. */
/*
* We need to exclude userspace access to the context.
*
* To protect against memory access we invalidate all ptes
* and make sure the pagefault handlers block on the mutex.
*/
spu_unmap_mappings
(
ctx
);
}
out:
if
(
ret
)
up_write
(
&
ctx
->
state_sema
);
return
0
;
out_unlock:
spu_release
(
ctx
);
return
ret
;
}
int
spu_acquire_runnable
(
struct
spu_context
*
ctx
)
/**
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
* @ctx: spu contex to lock
*
* Note:
* Returns 0 and with the context locked on success
* Returns negative error and with the context _unlocked_ on failure.
*/
int
spu_acquire_runnable
(
struct
spu_context
*
ctx
,
unsigned
long
flags
)
{
int
ret
=
0
;
down_read
(
&
ctx
->
state_sema
);
if
(
ctx
->
state
==
SPU_STATE_RUNNABLE
)
{
ctx
->
spu
->
prio
=
current
->
prio
;
return
0
;
}
up_read
(
&
ctx
->
state_sema
);
down_write
(
&
ctx
->
state_sema
);
/* ctx is about to be freed, can't acquire any more */
if
(
!
ctx
->
owner
)
{
ret
=
-
EINVAL
;
goto
out
;
}
int
ret
=
-
EINVAL
;
spu_acquire
(
ctx
);
if
(
ctx
->
state
==
SPU_STATE_SAVED
)
{
ret
=
spu_activate
(
ctx
,
0
);
/*
* Context is about to be freed, so we can't acquire it anymore.
*/
if
(
!
ctx
->
owner
)
goto
out_unlock
;
ret
=
spu_activate
(
ctx
,
flags
);
if
(
ret
)
goto
out
;
ctx
->
state
=
SPU_STATE_RUNNABLE
;
goto
out_unlock
;
}
downgrade_write
(
&
ctx
->
state_sema
);
/* On success, we return holding the lock */
return
ret
;
out:
/* Release here, to simplify calling code. */
up_write
(
&
ctx
->
state_sema
);
return
0
;
out_unlock:
spu_release
(
ctx
);
return
ret
;
}
/**
* spu_acquire_saved - lock spu contex and make sure it is in saved state
* @ctx: spu contex to lock
*/
void
spu_acquire_saved
(
struct
spu_context
*
ctx
)
{
down_read
(
&
ctx
->
state_sema
);
if
(
ctx
->
state
==
SPU_STATE_SAVED
)
return
;
up_read
(
&
ctx
->
state_sema
);
down_write
(
&
ctx
->
state_sema
);
if
(
ctx
->
state
==
SPU_STATE_RUNNABLE
)
{
spu_acquire
(
ctx
);
if
(
ctx
->
state
!=
SPU_STATE_SAVED
)
spu_deactivate
(
ctx
);
ctx
->
state
=
SPU_STATE_SAVED
;
}
downgrade_write
(
&
ctx
->
state_sema
);
}
arch/powerpc/platforms/cell/spufs/file.c
View file @
944b380e
...
...
@@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
offset
+=
vma
->
vm_pgoff
<<
PAGE_SHIFT
;
if
(
offset
>=
LS_SIZE
)
return
NOPFN_SIGBUS
;
spu_acquire
(
ctx
);
if
(
ctx
->
state
==
SPU_STATE_SAVED
)
{
...
...
@@ -164,7 +167,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
/* error here usually means a signal.. we might want to test
* the error code more precisely though
*/
ret
=
spu_acquire_runnable
(
ctx
);
ret
=
spu_acquire_runnable
(
ctx
,
0
);
if
(
ret
)
return
NOPFN_REFAULT
;
...
...
@@ -1306,7 +1309,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
if
(
ret
)
goto
out
;
spu_acquire_runnable
(
ctx
);
spu_acquire_runnable
(
ctx
,
0
);
if
(
file
->
f_flags
&
O_NONBLOCK
)
{
ret
=
ctx
->
ops
->
send_mfc_command
(
ctx
,
&
cmd
);
}
else
{
...
...
arch/powerpc/platforms/cell/spufs/run.c
View file @
944b380e
...
...
@@ -133,7 +133,7 @@ out_drop_priv:
spu_mfc_sr1_set
(
ctx
->
spu
,
sr1
);
out_unlock:
spu_release
_exclusive
(
ctx
);
spu_release
(
ctx
);
out:
return
ret
;
}
...
...
@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
int
ret
;
unsigned
long
runcntl
=
SPU_RUNCNTL_RUNNABLE
;
ret
=
spu_acquire_runnable
(
ctx
);
ret
=
spu_acquire_runnable
(
ctx
,
SPU_ACTIVATE_NOWAKE
);
if
(
ret
)
return
ret
;
...
...
@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
spu_release
(
ctx
);
ret
=
spu_setup_isolated
(
ctx
);
if
(
!
ret
)
ret
=
spu_acquire_runnable
(
ctx
);
ret
=
spu_acquire_runnable
(
ctx
,
SPU_ACTIVATE_NOWAKE
);
}
/* if userspace has set the runcntrl register (eg, to issue an
...
...
@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
(
SPU_RUNCNTL_RUNNABLE
|
SPU_RUNCNTL_ISOLATE
);
if
(
runcntl
==
0
)
runcntl
=
SPU_RUNCNTL_RUNNABLE
;
}
else
}
else
{
spu_start_tick
(
ctx
);
ctx
->
ops
->
npc_write
(
ctx
,
*
npc
);
}
ctx
->
ops
->
runcntl_write
(
ctx
,
runcntl
);
return
ret
;
...
...
@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
{
int
ret
=
0
;
spu_stop_tick
(
ctx
);
*
status
=
ctx
->
ops
->
status_read
(
ctx
);
*
npc
=
ctx
->
ops
->
npc_read
(
ctx
);
spu_release
(
ctx
);
...
...
@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
}
if
(
unlikely
(
ctx
->
state
!=
SPU_STATE_RUNNABLE
))
{
ret
=
spu_reacquire_runnable
(
ctx
,
npc
,
&
status
);
if
(
ret
)
if
(
ret
)
{
spu_stop_tick
(
ctx
);
goto
out2
;
}
continue
;
}
ret
=
spu_process_events
(
ctx
);
...
...
@@ -361,4 +366,3 @@ out:
up
(
&
ctx
->
run_sema
);
return
ret
;
}
arch/powerpc/platforms/cell/spufs/sched.c
View file @
944b380e
...
...
@@ -44,17 +44,18 @@
#include <asm/spu_priv1.h>
#include "spufs.h"
#define SPU_
MIN_TIMESLICE (100 * HZ / 1000
)
#define SPU_
TIMESLICE (HZ
)
#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
struct
spu_prio_array
{
unsigned
long
bitmap
[
SPU_BITMAP_SIZE
];
wait_queue_head_t
waitq
[
MAX_PRIO
];
DECLARE_BITMAP
(
bitmap
,
MAX_PRIO
);
struct
list_head
runq
[
MAX_PRIO
];
spinlock_t
runq_lock
;
struct
list_head
active_list
[
MAX_NUMNODES
];
struct
mutex
active_mutex
[
MAX_NUMNODES
];
};
static
struct
spu_prio_array
*
spu_prio
;
static
struct
workqueue_struct
*
spu_sched_wq
;
static
inline
int
node_allowed
(
int
node
)
{
...
...
@@ -68,6 +69,64 @@ static inline int node_allowed(int node)
return
1
;
}
void
spu_start_tick
(
struct
spu_context
*
ctx
)
{
if
(
ctx
->
policy
==
SCHED_RR
)
queue_delayed_work
(
spu_sched_wq
,
&
ctx
->
sched_work
,
SPU_TIMESLICE
);
}
void
spu_stop_tick
(
struct
spu_context
*
ctx
)
{
if
(
ctx
->
policy
==
SCHED_RR
)
cancel_delayed_work
(
&
ctx
->
sched_work
);
}
void
spu_sched_tick
(
struct
work_struct
*
work
)
{
struct
spu_context
*
ctx
=
container_of
(
work
,
struct
spu_context
,
sched_work
.
work
);
struct
spu
*
spu
;
int
rearm
=
1
;
mutex_lock
(
&
ctx
->
state_mutex
);
spu
=
ctx
->
spu
;
if
(
spu
)
{
int
best
=
sched_find_first_bit
(
spu_prio
->
bitmap
);
if
(
best
<=
ctx
->
prio
)
{
spu_deactivate
(
ctx
);
rearm
=
0
;
}
}
mutex_unlock
(
&
ctx
->
state_mutex
);
if
(
rearm
)
spu_start_tick
(
ctx
);
}
/**
* spu_add_to_active_list - add spu to active list
* @spu: spu to add to the active list
*/
static
void
spu_add_to_active_list
(
struct
spu
*
spu
)
{
mutex_lock
(
&
spu_prio
->
active_mutex
[
spu
->
node
]);
list_add_tail
(
&
spu
->
list
,
&
spu_prio
->
active_list
[
spu
->
node
]);
mutex_unlock
(
&
spu_prio
->
active_mutex
[
spu
->
node
]);
}
/**
* spu_remove_from_active_list - remove spu from active list
* @spu: spu to remove from the active list
*/
static
void
spu_remove_from_active_list
(
struct
spu
*
spu
)
{
int
node
=
spu
->
node
;
mutex_lock
(
&
spu_prio
->
active_mutex
[
node
]);
list_del_init
(
&
spu
->
list
);
mutex_unlock
(
&
spu_prio
->
active_mutex
[
node
]);
}
static
inline
void
mm_needs_global_tlbie
(
struct
mm_struct
*
mm
)
{
int
nr
=
(
NR_CPUS
>
1
)
?
NR_CPUS
:
NR_CPUS
+
1
;
...
...
@@ -94,8 +153,12 @@ int spu_switch_event_unregister(struct notifier_block * n)
return
blocking_notifier_chain_unregister
(
&
spu_switch_notifier
,
n
);
}
static
inline
void
bind_context
(
struct
spu
*
spu
,
struct
spu_context
*
ctx
)
/**
* spu_bind_context - bind spu context to physical spu
* @spu: physical spu to bind to
* @ctx: context to bind
*/
static
void
spu_bind_context
(
struct
spu
*
spu
,
struct
spu_context
*
ctx
)
{
pr_debug
(
"%s: pid=%d SPU=%d NODE=%d
\n
"
,
__FUNCTION__
,
current
->
pid
,
spu
->
number
,
spu
->
node
);
...
...
@@ -104,7 +167,6 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
ctx
->
spu
=
spu
;
ctx
->
ops
=
&
spu_hw_ops
;
spu
->
pid
=
current
->
pid
;
spu
->
prio
=
current
->
prio
;
spu
->
mm
=
ctx
->
owner
;
mm_needs_global_tlbie
(
spu
->
mm
);
spu
->
ibox_callback
=
spufs_ibox_callback
;
...
...
@@ -118,12 +180,21 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
spu
->
timestamp
=
jiffies
;
spu_cpu_affinity_set
(
spu
,
raw_smp_processor_id
());
spu_switch_notify
(
spu
,
ctx
);
spu_add_to_active_list
(
spu
);
ctx
->
state
=
SPU_STATE_RUNNABLE
;
}
static
inline
void
unbind_context
(
struct
spu
*
spu
,
struct
spu_context
*
ctx
)
/**
* spu_unbind_context - unbind spu context from physical spu
* @spu: physical spu to unbind from
* @ctx: context to unbind
*/
static
void
spu_unbind_context
(
struct
spu
*
spu
,
struct
spu_context
*
ctx
)
{
pr_debug
(
"%s: unbind pid=%d SPU=%d NODE=%d
\n
"
,
__FUNCTION__
,
spu
->
pid
,
spu
->
number
,
spu
->
node
);
spu_remove_from_active_list
(
spu
);
spu_switch_notify
(
spu
,
NULL
);
spu_unmap_mappings
(
ctx
);
spu_save
(
&
ctx
->
csa
,
spu
);
...
...
@@ -136,95 +207,98 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
spu
->
dma_callback
=
NULL
;
spu
->
mm
=
NULL
;
spu
->
pid
=
0
;
spu
->
prio
=
MAX_PRIO
;
ctx
->
ops
=
&
spu_backing_ops
;
ctx
->
spu
=
NULL
;
spu
->
flags
=
0
;
spu
->
ctx
=
NULL
;
}
static
inline
void
spu_add_wq
(
wait_queue_head_t
*
wq
,
wait_queue_t
*
wait
,
int
prio
)
/**
* spu_add_to_rq - add a context to the runqueue
* @ctx: context to add
*/
static
void
spu_add_to_rq
(
struct
spu_context
*
ctx
)
{
prepare_to_wait_exclusive
(
wq
,
wait
,
TASK_INTERRUPTIBLE
);
set_bit
(
prio
,
spu_prio
->
bitmap
);
spin_lock
(
&
spu_prio
->
runq_lock
);
list_add_tail
(
&
ctx
->
rq
,
&
spu_prio
->
runq
[
ctx
->
prio
]);
set_bit
(
ctx
->
prio
,
spu_prio
->
bitmap
);
spin_unlock
(
&
spu_prio
->
runq_lock
);
}
static
inline
void
spu_del_wq
(
wait_queue_head_t
*
wq
,
wait_queue_t
*
wait
,
int
prio
)
/**
* spu_del_from_rq - remove a context from the runqueue
* @ctx: context to remove
*/
static
void
spu_del_from_rq
(
struct
spu_context
*
ctx
)
{
u64
flags
;
__set_current_state
(
TASK_RUNNING
);
spin_lock_irqsave
(
&
wq
->
lock
,
flags
);
spin_lock
(
&
spu_prio
->
runq_lock
);
list_del_init
(
&
ctx
->
rq
);
if
(
list_empty
(
&
spu_prio
->
runq
[
ctx
->
prio
]))
clear_bit
(
ctx
->
prio
,
spu_prio
->
bitmap
);
spin_unlock
(
&
spu_prio
->
runq_lock
);
}
remove_wait_queue_locked
(
wq
,
wait
);
if
(
list_empty
(
&
wq
->
task_list
))
clear_bit
(
prio
,
spu_prio
->
bitmap
);
/**
* spu_grab_context - remove one context from the runqueue
* @prio: priority of the context to be removed
*
* This function removes one context from the runqueue for priority @prio.
* If there is more than one context with the given priority the first
* task on the runqueue will be taken.
*
* Returns the spu_context it just removed.
*
* Must be called with spu_prio->runq_lock held.
*/
static
struct
spu_context
*
spu_grab_context
(
int
prio
)
{
struct
list_head
*
rq
=
&
spu_prio
->
runq
[
prio
];
spin_unlock_irqrestore
(
&
wq
->
lock
,
flags
);
if
(
list_empty
(
rq
))
return
NULL
;
return
list_entry
(
rq
->
next
,
struct
spu_context
,
rq
);
}
static
void
spu_prio_wait
(
struct
spu_context
*
ctx
,
u64
flags
)
static
void
spu_prio_wait
(
struct
spu_context
*
ctx
)
{
int
prio
=
current
->
prio
;
wait_queue_head_t
*
wq
=
&
spu_prio
->
waitq
[
prio
];
DEFINE_WAIT
(
wait
);
if
(
ctx
->
spu
)
return
;
spu_add_wq
(
wq
,
&
wait
,
prio
);
set_bit
(
SPU_SCHED_WAKE
,
&
ctx
->
sched_flags
);
prepare_to_wait_exclusive
(
&
ctx
->
stop_wq
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
!
signal_pending
(
current
))
{
up_write
(
&
ctx
->
state_sema
);
pr_debug
(
"%s: pid=%d prio=%d
\n
"
,
__FUNCTION__
,
current
->
pid
,
current
->
prio
);
mutex_unlock
(
&
ctx
->
state_mutex
);
schedule
();
down_write
(
&
ctx
->
state_sema
);
mutex_lock
(
&
ctx
->
state_mutex
);
}
spu_del_wq
(
wq
,
&
wait
,
prio
);
__set_current_state
(
TASK_RUNNING
);
remove_wait_queue
(
&
ctx
->
stop_wq
,
&
wait
);
clear_bit
(
SPU_SCHED_WAKE
,
&
ctx
->
sched_flags
);
}
static
void
spu_prio_wakeup
(
void
)
/**
* spu_reschedule - try to find a runnable context for a spu
* @spu: spu available
*
* This function is called whenever a spu becomes idle. It looks for the
* most suitable runnable spu context and schedules it for execution.
*/
static
void
spu_reschedule
(
struct
spu
*
spu
)
{
int
best
=
sched_find_first_bit
(
spu_prio
->
bitmap
);
if
(
best
<
MAX_PRIO
)
{
wait_queue_head_t
*
wq
=
&
spu_prio
->
waitq
[
best
];
wake_up_interruptible_nr
(
wq
,
1
);
}
}
int
best
;
static
int
get_active_spu
(
struct
spu
*
spu
)
{
int
node
=
spu
->
node
;
struct
spu
*
tmp
;
int
rc
=
0
;
spu_free
(
spu
);
mutex_lock
(
&
spu_prio
->
active_mutex
[
node
]);
list_for_each_entry
(
tmp
,
&
spu_prio
->
active_list
[
node
],
list
)
{
if
(
tmp
==
spu
)
{
list_del_init
(
&
spu
->
list
);
rc
=
1
;
break
;
}
spin_lock
(
&
spu_prio
->
runq_lock
);
best
=
sched_find_first_bit
(
spu_prio
->
bitmap
);
if
(
best
<
MAX_PRIO
)
{
struct
spu_context
*
ctx
=
spu_grab_context
(
best
);
if
(
ctx
&&
test_bit
(
SPU_SCHED_WAKE
,
&
ctx
->
sched_flags
))
wake_up
(
&
ctx
->
stop_wq
);
}
mutex_unlock
(
&
spu_prio
->
active_mutex
[
node
]);
return
rc
;
}
static
void
put_active_spu
(
struct
spu
*
spu
)
{
int
node
=
spu
->
node
;
mutex_lock
(
&
spu_prio
->
active_mutex
[
node
]);
list_add_tail
(
&
spu
->
list
,
&
spu_prio
->
active_list
[
node
]);
mutex_unlock
(
&
spu_prio
->
active_mutex
[
node
]);
spin_unlock
(
&
spu_prio
->
runq_lock
);
}
static
struct
spu
*
spu_get_idle
(
struct
spu_context
*
ctx
,
u64
flags
)
static
struct
spu
*
spu_get_idle
(
struct
spu_context
*
ctx
)
{
struct
spu
*
spu
=
NULL
;
int
node
=
cpu_to_node
(
raw_smp_processor_id
());
...
...
@@ -241,87 +315,154 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
return
spu
;
}
static
inline
struct
spu
*
spu_get
(
struct
spu_context
*
ctx
,
u64
flags
)
/**
* find_victim - find a lower priority context to preempt
* @ctx: canidate context for running
*
* Returns the freed physical spu to run the new context on.
*/
static
struct
spu
*
find_victim
(
struct
spu_context
*
ctx
)
{
/* Future: spu_get_idle() if possible,
* otherwise try to preempt an active
* context.
struct
spu_context
*
victim
=
NULL
;
struct
spu
*
spu
;
int
node
,
n
;
/*
* Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't
* exactly fair, but so far the whole spu schedule tries to keep
* a strong node affinity. We might want to fine-tune this in
* the future.
*/
return
spu_get_idle
(
ctx
,
flags
);
restart:
node
=
cpu_to_node
(
raw_smp_processor_id
());
for
(
n
=
0
;
n
<
MAX_NUMNODES
;
n
++
,
node
++
)
{
node
=
(
node
<
MAX_NUMNODES
)
?
node
:
0
;
if
(
!
node_allowed
(
node
))
continue
;
mutex_lock
(
&
spu_prio
->
active_mutex
[
node
]);
list_for_each_entry
(
spu
,
&
spu_prio
->
active_list
[
node
],
list
)
{
struct
spu_context
*
tmp
=
spu
->
ctx
;
if
(
tmp
->
rt_priority
<
ctx
->
rt_priority
&&
(
!
victim
||
tmp
->
rt_priority
<
victim
->
rt_priority
))
victim
=
spu
->
ctx
;
}
mutex_unlock
(
&
spu_prio
->
active_mutex
[
node
]);
if
(
victim
)
{
/*
* This nests ctx->state_mutex, but we always lock
* higher priority contexts before lower priority
* ones, so this is safe until we introduce
* priority inheritance schemes.
*/
if
(
!
mutex_trylock
(
&
victim
->
state_mutex
))
{
victim
=
NULL
;
goto
restart
;
}
spu
=
victim
->
spu
;
if
(
!
spu
)
{
/*
* This race can happen because we've dropped
* the active list mutex. No a problem, just
* restart the search.
*/
mutex_unlock
(
&
victim
->
state_mutex
);
victim
=
NULL
;
goto
restart
;
}
spu_unbind_context
(
spu
,
victim
);
mutex_unlock
(
&
victim
->
state_mutex
);
return
spu
;
}
}
return
NULL
;
}
/* The three externally callable interfaces
* for the scheduler begin here.
/**
* spu_activate - find a free spu for a context and execute it
* @ctx: spu context to schedule
* @flags: flags (currently ignored)
*
*
spu_activate - bind a context to SPU, waiting as needed.
*
spu_deactivate - unbind a context from its SPU.
*
spu_yield - yield an SPU if others are waiting
.
*
Tries to find a free spu to run @ctx. If no free spu is availble
*
add the context to the runqueue so it gets woken up once an spu
*
is available
.
*/
int
spu_activate
(
struct
spu_context
*
ctx
,
u64
flags
)
int
spu_activate
(
struct
spu_context
*
ctx
,
unsigned
long
flags
)
{
struct
spu
*
spu
;
int
ret
=
0
;
for
(;;)
{
if
(
ctx
->
spu
)
if
(
ctx
->
spu
)
return
0
;
do
{
struct
spu
*
spu
;
spu
=
spu_get_idle
(
ctx
);
/*
* If this is a realtime thread we try to get it running by
* preempting a lower priority thread.
*/
if
(
!
spu
&&
ctx
->
rt_priority
)
spu
=
find_victim
(
ctx
);
if
(
spu
)
{
spu_bind_context
(
spu
,
ctx
);
return
0
;
spu
=
spu_get
(
ctx
,
flags
);
if
(
spu
!=
NULL
)
{
if
(
ctx
->
spu
!=
NULL
)
{
spu_free
(
spu
);
spu_prio_wakeup
();
break
;
}
bind_context
(
spu
,
ctx
);
put_active_spu
(
spu
);
break
;
}
spu_prio_wait
(
ctx
,
flags
);
if
(
signal_pending
(
current
))
{
ret
=
-
ERESTARTSYS
;
spu_prio_wa
keup
(
);
break
;
}
}
return
ret
;
spu_add_to_rq
(
ctx
);
if
(
!
(
flags
&
SPU_ACTIVATE_NOWAKE
))
spu_prio_wa
it
(
ctx
);
spu_del_from_rq
(
ctx
)
;
}
while
(
!
signal_pending
(
current
));
return
-
ERESTARTSYS
;
}
/**
* spu_deactivate - unbind a context from it's physical spu
* @ctx: spu context to unbind
*
* Unbind @ctx from the physical spu it is running on and schedule
* the highest priority context to run on the freed physical spu.
*/
void
spu_deactivate
(
struct
spu_context
*
ctx
)
{
struct
spu
*
spu
;
int
needs_idle
;
struct
spu
*
spu
=
ctx
->
spu
;
spu
=
ctx
->
spu
;
if
(
!
spu
)
return
;
needs_idle
=
get_active_spu
(
spu
);
unbind_context
(
spu
,
ctx
);
if
(
needs_idle
)
{
spu_free
(
spu
);
spu_prio_wakeup
();
if
(
spu
)
{
spu_unbind_context
(
spu
,
ctx
);
spu_reschedule
(
spu
);
}
}
/**
* spu_yield - yield a physical spu if others are waiting
* @ctx: spu context to yield
*
* Check if there is a higher priority context waiting and if yes
* unbind @ctx from the physical spu and schedule the highest
* priority context to run on the freed physical spu instead.
*/
void
spu_yield
(
struct
spu_context
*
ctx
)
{
struct
spu
*
spu
;
int
need_yield
=
0
;
if
(
down_write_trylock
(
&
ctx
->
state_sema
))
{
if
(
mutex_trylock
(
&
ctx
->
state_mutex
))
{
if
((
spu
=
ctx
->
spu
)
!=
NULL
)
{
int
best
=
sched_find_first_bit
(
spu_prio
->
bitmap
);
if
(
best
<
MAX_PRIO
)
{
pr_debug
(
"%s: yielding SPU %d NODE %d
\n
"
,
__FUNCTION__
,
spu
->
number
,
spu
->
node
);
spu_deactivate
(
ctx
);
ctx
->
state
=
SPU_STATE_SAVED
;
need_yield
=
1
;
}
else
{
spu
->
prio
=
MAX_PRIO
;
}
}
up_write
(
&
ctx
->
state_sema
);
mutex_unlock
(
&
ctx
->
state_mutex
);
}
if
(
unlikely
(
need_yield
))
yield
();
...
...
@@ -331,14 +472,19 @@ int __init spu_sched_init(void)
{
int
i
;
spu_sched_wq
=
create_singlethread_workqueue
(
"spusched"
);
if
(
!
spu_sched_wq
)
return
1
;
spu_prio
=
kzalloc
(
sizeof
(
struct
spu_prio_array
),
GFP_KERNEL
);
if
(
!
spu_prio
)
{
printk
(
KERN_WARNING
"%s: Unable to allocate priority queue.
\n
"
,
__FUNCTION__
);
destroy_workqueue
(
spu_sched_wq
);
return
1
;
}
for
(
i
=
0
;
i
<
MAX_PRIO
;
i
++
)
{
init_waitqueue_head
(
&
spu_prio
->
wait
q
[
i
]);
INIT_LIST_HEAD
(
&
spu_prio
->
run
q
[
i
]);
__clear_bit
(
i
,
spu_prio
->
bitmap
);
}
__set_bit
(
MAX_PRIO
,
spu_prio
->
bitmap
);
...
...
@@ -346,6 +492,7 @@ int __init spu_sched_init(void)
mutex_init
(
&
spu_prio
->
active_mutex
[
i
]);
INIT_LIST_HEAD
(
&
spu_prio
->
active_list
[
i
]);
}
spin_lock_init
(
&
spu_prio
->
runq_lock
);
return
0
;
}
...
...
@@ -364,4 +511,5 @@ void __exit spu_sched_exit(void)
mutex_unlock
(
&
spu_prio
->
active_mutex
[
node
]);
}
kfree
(
spu_prio
);
destroy_workqueue
(
spu_sched_wq
);
}
arch/powerpc/platforms/cell/spufs/spufs.h
View file @
944b380e
...
...
@@ -23,7 +23,7 @@
#define SPUFS_H
#include <linux/kref.h>
#include <linux/
rwsem
.h>
#include <linux/
mutex
.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
...
...
@@ -37,11 +37,13 @@ enum {
};
struct
spu_context_ops
;
#define SPU_CONTEXT_PREEMPT 0UL
struct
spu_gang
;
/* ctx->sched_flags */
enum
{
SPU_SCHED_WAKE
=
0
,
};
struct
spu_context
{
struct
spu
*
spu
;
/* pointer to a physical SPU */
struct
spu_state
csa
;
/* SPU context save area. */
...
...
@@ -56,7 +58,7 @@ struct spu_context {
u64
object_id
;
/* user space pointer for oprofile */
enum
{
SPU_STATE_RUNNABLE
,
SPU_STATE_SAVED
}
state
;
struct
rw_semaphore
state_sema
;
struct
mutex
state_mutex
;
struct
semaphore
run_sema
;
struct
mm_struct
*
owner
;
...
...
@@ -77,6 +79,14 @@ struct spu_context {
struct
list_head
gang_list
;
struct
spu_gang
*
gang
;
/* scheduler fields */
struct
list_head
rq
;
struct
delayed_work
sched_work
;
unsigned
long
sched_flags
;
unsigned
long
rt_priority
;
int
policy
;
int
prio
;
};
struct
spu_gang
{
...
...
@@ -161,6 +171,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
void
spu_gang_add_ctx
(
struct
spu_gang
*
gang
,
struct
spu_context
*
ctx
);
/* context management */
static
inline
void
spu_acquire
(
struct
spu_context
*
ctx
)
{
mutex_lock
(
&
ctx
->
state_mutex
);
}
static
inline
void
spu_release
(
struct
spu_context
*
ctx
)
{
mutex_unlock
(
&
ctx
->
state_mutex
);
}
struct
spu_context
*
alloc_spu_context
(
struct
spu_gang
*
gang
);
void
destroy_spu_context
(
struct
kref
*
kref
);
struct
spu_context
*
get_spu_context
(
struct
spu_context
*
ctx
);
...
...
@@ -168,20 +188,18 @@ int put_spu_context(struct spu_context *ctx);
void
spu_unmap_mappings
(
struct
spu_context
*
ctx
);
void
spu_forget
(
struct
spu_context
*
ctx
);
void
spu_acquire
(
struct
spu_context
*
ctx
);
void
spu_release
(
struct
spu_context
*
ctx
);
int
spu_acquire_runnable
(
struct
spu_context
*
ctx
);
int
spu_acquire_runnable
(
struct
spu_context
*
ctx
,
unsigned
long
flags
);
void
spu_acquire_saved
(
struct
spu_context
*
ctx
);
int
spu_acquire_exclusive
(
struct
spu_context
*
ctx
);
static
inline
void
spu_release_exclusive
(
struct
spu_context
*
ctx
)
{
up_write
(
&
ctx
->
state_sema
);
}
int
spu_activate
(
struct
spu_context
*
ctx
,
u64
flags
);
enum
{
SPU_ACTIVATE_NOWAKE
=
1
,
};
int
spu_activate
(
struct
spu_context
*
ctx
,
unsigned
long
flags
);
void
spu_deactivate
(
struct
spu_context
*
ctx
);
void
spu_yield
(
struct
spu_context
*
ctx
);
void
spu_start_tick
(
struct
spu_context
*
ctx
);
void
spu_stop_tick
(
struct
spu_context
*
ctx
);
void
spu_sched_tick
(
struct
work_struct
*
work
);
int
__init
spu_sched_init
(
void
);
void
__exit
spu_sched_exit
(
void
);
...
...
arch/powerpc/xmon/xmon.c
View file @
944b380e
...
...
@@ -2811,7 +2811,6 @@ static void dump_spu_fields(struct spu *spu)
DUMP_FIELD
(
spu
,
"0x%lx"
,
irqs
[
2
]);
DUMP_FIELD
(
spu
,
"0x%x"
,
slb_replace
);
DUMP_FIELD
(
spu
,
"%d"
,
pid
);
DUMP_FIELD
(
spu
,
"%d"
,
prio
);
DUMP_FIELD
(
spu
,
"0x%p"
,
mm
);
DUMP_FIELD
(
spu
,
"0x%p"
,
ctx
);
DUMP_FIELD
(
spu
,
"0x%p"
,
rq
);
...
...
include/asm-powerpc/cell-pmu.h
View file @
944b380e
...
...
@@ -53,6 +53,11 @@
#define CBE_PM_CTR_POLARITY 0x01000000
#define CBE_PM_CTR_COUNT_CYCLES 0x00800000
#define CBE_PM_CTR_ENABLE 0x00400000
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
/* Macros for the pm_status register. */
#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7)))
...
...
@@ -89,8 +94,7 @@ extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
extern
void
cbe_enable_pm_interrupts
(
u32
cpu
,
u32
thread
,
u32
mask
);
extern
void
cbe_disable_pm_interrupts
(
u32
cpu
);
extern
u32
cbe_query_pm_interrupts
(
u32
cpu
);
extern
u32
cbe_clear_pm_interrupts
(
u32
cpu
);
extern
u32
cbe_get_and_clear_pm_interrupts
(
u32
cpu
);
extern
void
cbe_sync_irq
(
int
node
);
/* Utility functions, macros */
...
...
@@ -103,11 +107,4 @@ extern u32 cbe_get_hw_thread_id(int cpu);
#define CBE_COUNT_PROBLEM_MODE 2
#define CBE_COUNT_ALL_MODES 3
/* Macros for the pm07_control registers. */
#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
#endif
/* __ASM_CELL_PMU_H__ */
include/asm-powerpc/spu.h
View file @
944b380e
...
...
@@ -129,7 +129,6 @@ struct spu {
struct
spu_runqueue
*
rq
;
unsigned
long
long
timestamp
;
pid_t
pid
;
int
prio
;
int
class_0_pending
;
spinlock_t
register_lock
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment