Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci-2.6.23
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci-2.6.23
Commits
07cc0c9e
Commit
07cc0c9e
authored
Jul 27, 2007
by
Ralf Baechle
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MIPS] MT: Enable coexistence of AP/SP with VSMP and SMTC.
Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
c3a005f4
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
203 additions
and
169 deletions
+203
-169
arch/mips/Kconfig
arch/mips/Kconfig
+12
-12
arch/mips/kernel/kspd.c
arch/mips/kernel/kspd.c
+9
-10
arch/mips/kernel/mips-mt.c
arch/mips/kernel/mips-mt.c
+22
-0
arch/mips/kernel/rtlx.c
arch/mips/kernel/rtlx.c
+17
-5
arch/mips/kernel/smtc.c
arch/mips/kernel/smtc.c
+0
-16
arch/mips/kernel/vpe.c
arch/mips/kernel/vpe.c
+137
-126
include/asm-mips/mips_mt.h
include/asm-mips/mips_mt.h
+6
-0
No files found.
arch/mips/Kconfig
View file @
07cc0c9e
...
@@ -1377,17 +1377,6 @@ config MIPS_MT_SMTC
...
@@ -1377,17 +1377,6 @@ config MIPS_MT_SMTC
This is a kernel model which is known a SMTC or lately has been
This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP.
marketesed into SMVP.
config MIPS_VPE_LOADER
bool "VPE loader support."
depends on SYS_SUPPORTS_MULTITHREADING
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select CPU_MIPSR2_SRS
select MIPS_MT
help
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
endchoice
endchoice
config MIPS_MT
config MIPS_MT
...
@@ -1398,8 +1387,19 @@ config SYS_SUPPORTS_MULTITHREADING
...
@@ -1398,8 +1387,19 @@ config SYS_SUPPORTS_MULTITHREADING
config MIPS_MT_FPAFF
config MIPS_MT_FPAFF
bool "Dynamic FPU affinity for FP-intensive threads"
bool "Dynamic FPU affinity for FP-intensive threads"
depends on MIPS_MT
default y
default y
depends on MIPS_MT_SMP || MIPS_MT_SMTC
config MIPS_VPE_LOADER
bool "VPE loader support."
depends on SYS_SUPPORTS_MULTITHREADING
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select CPU_MIPSR2_SRS
select MIPS_MT
help
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
config MIPS_MT_SMTC_INSTANT_REPLAY
config MIPS_MT_SMTC_INSTANT_REPLAY
bool "Low-latency Dispatch of Deferred SMTC IPIs"
bool "Low-latency Dispatch of Deferred SMTC IPIs"
...
...
arch/mips/kernel/kspd.c
View file @
07cc0c9e
...
@@ -89,7 +89,7 @@ static int sp_stopping = 0;
...
@@ -89,7 +89,7 @@ static int sp_stopping = 0;
#define MTSP_O_EXCL 0x0800
#define MTSP_O_EXCL 0x0800
#define MTSP_O_BINARY 0x8000
#define MTSP_O_BINARY 0x8000
#define SP_VPE 1
extern
int
tclimit
;
struct
apsp_table
{
struct
apsp_table
{
int
sp
;
int
sp
;
...
@@ -225,8 +225,8 @@ void sp_work_handle_request(void)
...
@@ -225,8 +225,8 @@ void sp_work_handle_request(void)
/* Run the syscall at the priviledge of the user who loaded the
/* Run the syscall at the priviledge of the user who loaded the
SP program */
SP program */
if
(
vpe_getuid
(
SP_VPE
))
if
(
vpe_getuid
(
tclimit
))
sp_setfsuidgid
(
vpe_getuid
(
SP_VPE
),
vpe_getgid
(
SP_VPE
));
sp_setfsuidgid
(
vpe_getuid
(
tclimit
),
vpe_getgid
(
tclimit
));
switch
(
sc
.
cmd
)
{
switch
(
sc
.
cmd
)
{
/* needs the flags argument translating from SDE kit to
/* needs the flags argument translating from SDE kit to
...
@@ -245,7 +245,7 @@ void sp_work_handle_request(void)
...
@@ -245,7 +245,7 @@ void sp_work_handle_request(void)
case
MTSP_SYSCALL_EXIT
:
case
MTSP_SYSCALL_EXIT
:
list_for_each_entry
(
n
,
&
kspd_notifylist
,
list
)
list_for_each_entry
(
n
,
&
kspd_notifylist
,
list
)
n
->
kspd_sp_exit
(
SP_VPE
);
n
->
kspd_sp_exit
(
tclimit
);
sp_stopping
=
1
;
sp_stopping
=
1
;
printk
(
KERN_DEBUG
"KSPD got exit syscall from SP exitcode %d
\n
"
,
printk
(
KERN_DEBUG
"KSPD got exit syscall from SP exitcode %d
\n
"
,
...
@@ -255,7 +255,7 @@ void sp_work_handle_request(void)
...
@@ -255,7 +255,7 @@ void sp_work_handle_request(void)
case
MTSP_SYSCALL_OPEN
:
case
MTSP_SYSCALL_OPEN
:
generic
.
arg1
=
translate_open_flags
(
generic
.
arg1
);
generic
.
arg1
=
translate_open_flags
(
generic
.
arg1
);
vcwd
=
vpe_getcwd
(
SP_VPE
);
vcwd
=
vpe_getcwd
(
tclimit
);
/* change to the cwd of the process that loaded the SP program */
/* change to the cwd of the process that loaded the SP program */
old_fs
=
get_fs
();
old_fs
=
get_fs
();
...
@@ -283,7 +283,7 @@ void sp_work_handle_request(void)
...
@@ -283,7 +283,7 @@ void sp_work_handle_request(void)
break
;
break
;
}
/* switch */
}
/* switch */
if
(
vpe_getuid
(
SP_VPE
))
if
(
vpe_getuid
(
tclimit
))
sp_setfsuidgid
(
0
,
0
);
sp_setfsuidgid
(
0
,
0
);
old_fs
=
get_fs
();
old_fs
=
get_fs
();
...
@@ -364,10 +364,9 @@ static void startwork(int vpe)
...
@@ -364,10 +364,9 @@ static void startwork(int vpe)
}
}
INIT_WORK
(
&
work
,
sp_work
);
INIT_WORK
(
&
work
,
sp_work
);
queue_work
(
workqueue
,
&
work
);
}
}
else
queue_work
(
workqueue
,
&
work
);
queue_work
(
workqueue
,
&
work
);
}
}
static
void
stopwork
(
int
vpe
)
static
void
stopwork
(
int
vpe
)
...
@@ -389,7 +388,7 @@ static int kspd_module_init(void)
...
@@ -389,7 +388,7 @@ static int kspd_module_init(void)
notify
.
start
=
startwork
;
notify
.
start
=
startwork
;
notify
.
stop
=
stopwork
;
notify
.
stop
=
stopwork
;
vpe_notify
(
SP_VPE
,
&
notify
);
vpe_notify
(
tclimit
,
&
notify
);
return
0
;
return
0
;
}
}
...
...
arch/mips/kernel/mips-mt.c
View file @
07cc0c9e
...
@@ -21,6 +21,28 @@
...
@@ -21,6 +21,28 @@
#include <asm/r4kcache.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
int
vpelimit
;
static
int
__init
maxvpes
(
char
*
str
)
{
get_option
(
&
str
,
&
vpelimit
);
return
1
;
}
__setup
(
"maxvpes="
,
maxvpes
);
int
tclimit
;
static
int
__init
maxtcs
(
char
*
str
)
{
get_option
(
&
str
,
&
tclimit
);
return
1
;
}
__setup
(
"maxtcs="
,
maxtcs
);
/*
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
* Takes an argument which taken to be a pre-call MVPControl value.
...
...
arch/mips/kernel/rtlx.c
View file @
07cc0c9e
...
@@ -40,12 +40,11 @@
...
@@ -40,12 +40,11 @@
#include <asm/atomic.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/mips_mt.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/vpe.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
#include <asm/rtlx.h>
#define RTLX_TARG_VPE 1
static
struct
rtlx_info
*
rtlx
;
static
struct
rtlx_info
*
rtlx
;
static
int
major
;
static
int
major
;
static
char
module_name
[]
=
"rtlx"
;
static
char
module_name
[]
=
"rtlx"
;
...
@@ -165,10 +164,10 @@ int rtlx_open(int index, int can_sleep)
...
@@ -165,10 +164,10 @@ int rtlx_open(int index, int can_sleep)
}
}
if
(
rtlx
==
NULL
)
{
if
(
rtlx
==
NULL
)
{
if
(
(
p
=
vpe_get_shared
(
RTLX_TARG_VPE
))
==
NULL
)
{
if
(
(
p
=
vpe_get_shared
(
tclimit
))
==
NULL
)
{
if
(
can_sleep
)
{
if
(
can_sleep
)
{
__wait_event_interruptible
(
channel_wqs
[
index
].
lx_queue
,
__wait_event_interruptible
(
channel_wqs
[
index
].
lx_queue
,
(
p
=
vpe_get_shared
(
RTLX_TARG_VPE
)),
(
p
=
vpe_get_shared
(
tclimit
)),
ret
);
ret
);
if
(
ret
)
if
(
ret
)
goto
out_fail
;
goto
out_fail
;
...
@@ -477,6 +476,19 @@ static int rtlx_module_init(void)
...
@@ -477,6 +476,19 @@ static int rtlx_module_init(void)
struct
device
*
dev
;
struct
device
*
dev
;
int
i
,
err
;
int
i
,
err
;
if
(
!
cpu_has_mipsmt
)
{
printk
(
"VPE loader: not a MIPS MT capable processor
\n
"
);
return
-
ENODEV
;
}
if
(
tclimit
==
0
)
{
printk
(
KERN_WARNING
"No TCs reserved for AP/SP, not "
"initializing RTLX.
\n
Pass maxtcs=<n> argument as kernel "
"argument
\n
"
);
return
-
ENODEV
;
}
major
=
register_chrdev
(
0
,
module_name
,
&
rtlx_fops
);
major
=
register_chrdev
(
0
,
module_name
,
&
rtlx_fops
);
if
(
major
<
0
)
{
if
(
major
<
0
)
{
printk
(
register_chrdev_failed
);
printk
(
register_chrdev_failed
);
...
@@ -501,7 +513,7 @@ static int rtlx_module_init(void)
...
@@ -501,7 +513,7 @@ static int rtlx_module_init(void)
/* set up notifiers */
/* set up notifiers */
notify
.
start
=
starting
;
notify
.
start
=
starting
;
notify
.
stop
=
stopping
;
notify
.
stop
=
stopping
;
vpe_notify
(
RTLX_TARG_VPE
,
&
notify
);
vpe_notify
(
tclimit
,
&
notify
);
if
(
cpu_has_vint
)
if
(
cpu_has_vint
)
set_vi_handler
(
MIPS_CPU_RTLX_IRQ
,
rtlx_dispatch
);
set_vi_handler
(
MIPS_CPU_RTLX_IRQ
,
rtlx_dispatch
);
...
...
arch/mips/kernel/smtc.c
View file @
07cc0c9e
...
@@ -86,25 +86,11 @@ unsigned int smtc_status = 0;
...
@@ -86,25 +86,11 @@ unsigned int smtc_status = 0;
/* Boot command line configuration overrides */
/* Boot command line configuration overrides */
static
int
vpelimit
=
0
;
static
int
tclimit
=
0
;
static
int
ipibuffers
=
0
;
static
int
ipibuffers
=
0
;
static
int
nostlb
=
0
;
static
int
nostlb
=
0
;
static
int
asidmask
=
0
;
static
int
asidmask
=
0
;
unsigned
long
smtc_asid_mask
=
0xff
;
unsigned
long
smtc_asid_mask
=
0xff
;
static
int
__init
maxvpes
(
char
*
str
)
{
get_option
(
&
str
,
&
vpelimit
);
return
1
;
}
static
int
__init
maxtcs
(
char
*
str
)
{
get_option
(
&
str
,
&
tclimit
);
return
1
;
}
static
int
__init
ipibufs
(
char
*
str
)
static
int
__init
ipibufs
(
char
*
str
)
{
{
get_option
(
&
str
,
&
ipibuffers
);
get_option
(
&
str
,
&
ipibuffers
);
...
@@ -137,8 +123,6 @@ static int __init asidmask_set(char *str)
...
@@ -137,8 +123,6 @@ static int __init asidmask_set(char *str)
return
1
;
return
1
;
}
}
__setup
(
"maxvpes="
,
maxvpes
);
__setup
(
"maxtcs="
,
maxtcs
);
__setup
(
"ipibufs="
,
ipibufs
);
__setup
(
"ipibufs="
,
ipibufs
);
__setup
(
"nostlb"
,
stlb_disable
);
__setup
(
"nostlb"
,
stlb_disable
);
__setup
(
"asidmask="
,
asidmask_set
);
__setup
(
"asidmask="
,
asidmask_set
);
...
...
arch/mips/kernel/vpe.c
View file @
07cc0c9e
...
@@ -27,7 +27,6 @@
...
@@ -27,7 +27,6 @@
* To load and run, simply cat a SP 'program file' to /dev/vpe1.
* To load and run, simply cat a SP 'program file' to /dev/vpe1.
* i.e cat spapp >/dev/vpe1.
* i.e cat spapp >/dev/vpe1.
*/
*/
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/module.h>
...
@@ -54,6 +53,7 @@
...
@@ -54,6 +53,7 @@
#include <asm/system.h>
#include <asm/system.h>
#include <asm/vpe.h>
#include <asm/vpe.h>
#include <asm/kspd.h>
#include <asm/kspd.h>
#include <asm/mips_mt.h>
typedef
void
*
vpe_handle
;
typedef
void
*
vpe_handle
;
...
@@ -132,14 +132,9 @@ struct tc {
...
@@ -132,14 +132,9 @@ struct tc {
enum
tc_state
state
;
enum
tc_state
state
;
int
index
;
int
index
;
/* parent VPE */
struct
vpe
*
pvpe
;
/* parent VPE */
struct
vpe
*
pvpe
;
struct
list_head
tc
;
/* The list of TC's with this VPE */
struct
list_head
list
;
/* The global list of tc's */
/* The list of TC's with this VPE */
struct
list_head
tc
;
/* The global list of tc's */
struct
list_head
list
;
};
};
struct
{
struct
{
...
@@ -217,18 +212,17 @@ struct vpe *alloc_vpe(int minor)
...
@@ -217,18 +212,17 @@ struct vpe *alloc_vpe(int minor)
/* allocate a tc. At startup only tc0 is running, all other can be halted. */
/* allocate a tc. At startup only tc0 is running, all other can be halted. */
struct
tc
*
alloc_tc
(
int
index
)
struct
tc
*
alloc_tc
(
int
index
)
{
{
struct
tc
*
t
;
struct
tc
*
tc
;
if
((
t
=
kzalloc
(
sizeof
(
struct
tc
),
GFP_KERNEL
))
==
NULL
)
{
return
NULL
;
}
INIT_LIST_HEAD
(
&
t
->
tc
);
if
((
tc
=
kzalloc
(
sizeof
(
struct
tc
),
GFP_KERNEL
))
==
NULL
)
list_add_tail
(
&
t
->
list
,
&
vpecontrol
.
tc_list
)
;
goto
out
;
t
->
index
=
index
;
INIT_LIST_HEAD
(
&
tc
->
tc
);
tc
->
index
=
index
;
list_add_tail
(
&
tc
->
list
,
&
vpecontrol
.
tc_list
);
return
t
;
out:
return
tc
;
}
}
/* clean up and free everything */
/* clean up and free everything */
...
@@ -663,66 +657,48 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
...
@@ -663,66 +657,48 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
}
}
#endif
#endif
static
void
dump_tc
(
struct
tc
*
t
)
{
unsigned
long
val
;
settc
(
t
->
index
);
printk
(
KERN_DEBUG
"VPE loader: TC index %d targtc %ld "
"TCStatus 0x%lx halt 0x%lx
\n
"
,
t
->
index
,
read_c0_vpecontrol
()
&
VPECONTROL_TARGTC
,
read_tc_c0_tcstatus
(),
read_tc_c0_tchalt
());
printk
(
KERN_DEBUG
" tcrestart 0x%lx
\n
"
,
read_tc_c0_tcrestart
());
printk
(
KERN_DEBUG
" tcbind 0x%lx
\n
"
,
read_tc_c0_tcbind
());
val
=
read_c0_vpeconf0
();
printk
(
KERN_DEBUG
" VPEConf0 0x%lx MVP %ld
\n
"
,
val
,
(
val
&
VPECONF0_MVP
)
>>
VPECONF0_MVP_SHIFT
);
printk
(
KERN_DEBUG
" c0 status 0x%lx
\n
"
,
read_vpe_c0_status
());
printk
(
KERN_DEBUG
" c0 cause 0x%lx
\n
"
,
read_vpe_c0_cause
());
printk
(
KERN_DEBUG
" c0 badvaddr 0x%lx
\n
"
,
read_vpe_c0_badvaddr
());
printk
(
KERN_DEBUG
" c0 epc 0x%lx
\n
"
,
read_vpe_c0_epc
());
}
static
void
dump_tclist
(
void
)
{
struct
tc
*
t
;
list_for_each_entry
(
t
,
&
vpecontrol
.
tc_list
,
list
)
{
dump_tc
(
t
);
}
}
/* We are prepared so configure and start the VPE... */
/* We are prepared so configure and start the VPE... */
static
int
vpe_run
(
struct
vpe
*
v
)
static
int
vpe_run
(
struct
vpe
*
v
)
{
{
unsigned
long
flags
,
val
,
dmt_flag
;
struct
vpe_notifications
*
n
;
struct
vpe_notifications
*
n
;
unsigned
long
val
,
dmt_flag
;
unsigned
int
vpeflags
;
struct
tc
*
t
;
struct
tc
*
t
;
/* check we are the Master VPE */
/* check we are the Master VPE */
local_irq_save
(
flags
);
val
=
read_c0_vpeconf0
();
val
=
read_c0_vpeconf0
();
if
(
!
(
val
&
VPECONF0_MVP
))
{
if
(
!
(
val
&
VPECONF0_MVP
))
{
printk
(
KERN_WARNING
printk
(
KERN_WARNING
"VPE loader: only Master VPE's are allowed to configure MT
\n
"
);
"VPE loader: only Master VPE's are allowed to configure MT
\n
"
);
local_irq_restore
(
flags
);
return
-
1
;
return
-
1
;
}
}
/* disable MT (using dvpe) */
dmt_flag
=
dmt
();
dvpe
();
vpeflags
=
dvpe
();
if
(
!
list_empty
(
&
v
->
tc
))
{
if
(
!
list_empty
(
&
v
->
tc
))
{
if
((
t
=
list_entry
(
v
->
tc
.
next
,
struct
tc
,
tc
))
==
NULL
)
{
if
((
t
=
list_entry
(
v
->
tc
.
next
,
struct
tc
,
tc
))
==
NULL
)
{
printk
(
KERN_WARNING
"VPE loader: TC %d is already in use.
\n
"
,
evpe
(
vpeflags
);
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
printk
(
KERN_WARNING
"VPE loader: TC %d is already in use.
\n
"
,
t
->
index
);
t
->
index
);
return
-
ENOEXEC
;
return
-
ENOEXEC
;
}
}
}
else
{
}
else
{
printk
(
KERN_WARNING
"VPE loader: No TC's associated with VPE %d
\n
"
,
evpe
(
vpeflags
);
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
printk
(
KERN_WARNING
"VPE loader: No TC's associated with VPE %d
\n
"
,
v
->
minor
);
v
->
minor
);
return
-
ENOEXEC
;
return
-
ENOEXEC
;
}
}
...
@@ -733,21 +709,20 @@ static int vpe_run(struct vpe * v)
...
@@ -733,21 +709,20 @@ static int vpe_run(struct vpe * v)
/* should check it is halted, and not activated */
/* should check it is halted, and not activated */
if
((
read_tc_c0_tcstatus
()
&
TCSTATUS_A
)
||
!
(
read_tc_c0_tchalt
()
&
TCHALT_H
))
{
if
((
read_tc_c0_tcstatus
()
&
TCSTATUS_A
)
||
!
(
read_tc_c0_tchalt
()
&
TCHALT_H
))
{
printk
(
KERN_WARNING
"VPE loader: TC %d is already doing something!
\n
"
,
evpe
(
vpeflags
);
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
printk
(
KERN_WARNING
"VPE loader: TC %d is already active!
\n
"
,
t
->
index
);
t
->
index
);
dump_tclist
();
return
-
ENOEXEC
;
return
-
ENOEXEC
;
}
}
/*
* Disable multi-threaded execution whilst we activate, clear the
* halt bit and bound the tc to the other VPE...
*/
dmt_flag
=
dmt
();
/* Write the address we want it to start running from in the TCPC register. */
/* Write the address we want it to start running from in the TCPC register. */
write_tc_c0_tcrestart
((
unsigned
long
)
v
->
__start
);
write_tc_c0_tcrestart
((
unsigned
long
)
v
->
__start
);
write_tc_c0_tccontext
((
unsigned
long
)
0
);
write_tc_c0_tccontext
((
unsigned
long
)
0
);
/*
/*
* Mark the TC as activated, not interrupt exempt and not dynamically
* Mark the TC as activated, not interrupt exempt and not dynamically
* allocatable
* allocatable
...
@@ -765,13 +740,12 @@ static int vpe_run(struct vpe * v)
...
@@ -765,13 +740,12 @@ static int vpe_run(struct vpe * v)
*/
*/
mttgpr
(
7
,
physical_memsize
);
mttgpr
(
7
,
physical_memsize
);
/* set up VPE1 */
/* set up VPE1 */
/*
/*
* bind the TC to VPE 1 as late as possible so we only have the final
* bind the TC to VPE 1 as late as possible so we only have the final
* VPE registers to set up, and so an EJTAG probe can trigger on it
* VPE registers to set up, and so an EJTAG probe can trigger on it
*/
*/
write_tc_c0_tcbind
((
read_tc_c0_tcbind
()
&
~
TCBIND_CURVPE
)
|
v
->
minor
);
write_tc_c0_tcbind
((
read_tc_c0_tcbind
()
&
~
TCBIND_CURVPE
)
|
1
);
write_vpe_c0_vpeconf0
(
read_vpe_c0_vpeconf0
()
&
~
(
VPECONF0_VPA
));
write_vpe_c0_vpeconf0
(
read_vpe_c0_vpeconf0
()
&
~
(
VPECONF0_VPA
));
...
@@ -793,15 +767,16 @@ static int vpe_run(struct vpe * v)
...
@@ -793,15 +767,16 @@ static int vpe_run(struct vpe * v)
/* take system out of configuration state */
/* take system out of configuration state */
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
/* now safe to re-enable multi-threading */
#ifdef CONFIG_SMP
emt
(
dmt_flag
);
/* set it running */
evpe
(
EVPE_ENABLE
);
evpe
(
EVPE_ENABLE
);
#else
evpe
(
vpeflags
);
#endif
emt
(
dmt_flag
);
local_irq_restore
(
flags
);
list_for_each_entry
(
n
,
&
v
->
notify
,
list
)
{
list_for_each_entry
(
n
,
&
v
->
notify
,
list
)
n
->
start
(
v
->
minor
);
n
->
start
(
minor
);
}
return
0
;
return
0
;
}
}
...
@@ -1023,23 +998,15 @@ static int vpe_elfload(struct vpe * v)
...
@@ -1023,23 +998,15 @@ static int vpe_elfload(struct vpe * v)
return
0
;
return
0
;
}
}
void
__used
dump_vpe
(
struct
vpe
*
v
)
{
struct
tc
*
t
;
settc
(
v
->
minor
);
printk
(
KERN_DEBUG
"VPEControl 0x%lx
\n
"
,
read_vpe_c0_vpecontrol
());
printk
(
KERN_DEBUG
"VPEConf0 0x%lx
\n
"
,
read_vpe_c0_vpeconf0
());
list_for_each_entry
(
t
,
&
vpecontrol
.
tc_list
,
list
)
dump_tc
(
t
);
}
static
void
cleanup_tc
(
struct
tc
*
tc
)
static
void
cleanup_tc
(
struct
tc
*
tc
)
{
{
unsigned
long
flags
;
unsigned
int
mtflags
,
vpflags
;
int
tmp
;
int
tmp
;
local_irq_save
(
flags
);
mtflags
=
dmt
();
vpflags
=
dvpe
();
/* Put MVPE's into 'configuration state' */
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
...
@@ -1054,9 +1021,12 @@ static void cleanup_tc(struct tc *tc)
...
@@ -1054,9 +1021,12 @@ static void cleanup_tc(struct tc *tc)
write_tc_c0_tchalt
(
TCHALT_H
);
write_tc_c0_tchalt
(
TCHALT_H
);
/* bind it to anything other than VPE1 */
/* bind it to anything other than VPE1 */
write_tc_c0_tcbind
(
read_tc_c0_tcbind
()
&
~
TCBIND_CURVPE
);
// | TCBIND_CURVPE
//
write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
evpe
(
vpflags
);
emt
(
mtflags
);
local_irq_restore
(
flags
);
}
}
static
int
getcwd
(
char
*
buff
,
int
size
)
static
int
getcwd
(
char
*
buff
,
int
size
)
...
@@ -1077,36 +1047,32 @@ static int getcwd(char *buff, int size)
...
@@ -1077,36 +1047,32 @@ static int getcwd(char *buff, int size)
/* checks VPE is unused and gets ready to load program */
/* checks VPE is unused and gets ready to load program */
static
int
vpe_open
(
struct
inode
*
inode
,
struct
file
*
filp
)
static
int
vpe_open
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
{
int
minor
,
ret
;
enum
vpe_state
state
;
enum
vpe_state
state
;
struct
vpe
*
v
;
struct
vpe_notifications
*
not
;
struct
vpe_notifications
*
not
;
struct
vpe
*
v
;
int
ret
;
/* assume only 1 device at the mo. */
if
(
minor
!=
iminor
(
inode
))
{
if
((
minor
=
iminor
(
inode
))
!=
1
)
{
/* assume only 1 device at the moment. */
printk
(
KERN_WARNING
"VPE loader: only vpe1 is supported
\n
"
);
printk
(
KERN_WARNING
"VPE loader: only vpe1 is supported
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
if
((
v
=
get_vpe
(
minor
))
==
NULL
)
{
if
((
v
=
get_vpe
(
tclimit
))
==
NULL
)
{
printk
(
KERN_WARNING
"VPE loader: unable to get vpe
\n
"
);
printk
(
KERN_WARNING
"VPE loader: unable to get vpe
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
state
=
xchg
(
&
v
->
state
,
VPE_STATE_INUSE
);
state
=
xchg
(
&
v
->
state
,
VPE_STATE_INUSE
);
if
(
state
!=
VPE_STATE_UNUSED
)
{
if
(
state
!=
VPE_STATE_UNUSED
)
{
dvpe
();
printk
(
KERN_DEBUG
"VPE loader: tc in use dumping regs
\n
"
);
printk
(
KERN_DEBUG
"VPE loader: tc in use dumping regs
\n
"
);
dump_tc
(
get_tc
(
minor
));
list_for_each_entry
(
not
,
&
v
->
notify
,
list
)
{
list_for_each_entry
(
not
,
&
v
->
notify
,
list
)
{
not
->
stop
(
minor
);
not
->
stop
(
tclimit
);
}
}
release_progmem
(
v
->
load_addr
);
release_progmem
(
v
->
load_addr
);
cleanup_tc
(
get_tc
(
minor
));
cleanup_tc
(
get_tc
(
tclimit
));
}
}
/* this of-course trashes what was there before... */
/* this of-course trashes what was there before... */
...
@@ -1133,26 +1099,25 @@ static int vpe_open(struct inode *inode, struct file *filp)
...
@@ -1133,26 +1099,25 @@ static int vpe_open(struct inode *inode, struct file *filp)
v
->
shared_ptr
=
NULL
;
v
->
shared_ptr
=
NULL
;
v
->
__start
=
0
;
v
->
__start
=
0
;
return
0
;
return
0
;
}
}
static
int
vpe_release
(
struct
inode
*
inode
,
struct
file
*
filp
)
static
int
vpe_release
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
{
int
minor
,
ret
=
0
;
struct
vpe
*
v
;
struct
vpe
*
v
;
Elf_Ehdr
*
hdr
;
Elf_Ehdr
*
hdr
;
int
ret
=
0
;
minor
=
iminor
(
inode
);
v
=
get_vpe
(
tclimit
);
if
(
(
v
=
get_vpe
(
minor
))
==
NULL
)
if
(
v
==
NULL
)
return
-
ENODEV
;
return
-
ENODEV
;
// simple case of fire and forget, so tell the VPE to run...
hdr
=
(
Elf_Ehdr
*
)
v
->
pbuffer
;
hdr
=
(
Elf_Ehdr
*
)
v
->
pbuffer
;
if
(
memcmp
(
hdr
->
e_ident
,
ELFMAG
,
4
)
==
0
)
{
if
(
memcmp
(
hdr
->
e_ident
,
ELFMAG
,
4
)
==
0
)
{
if
(
vpe_elfload
(
v
)
>=
0
)
if
(
vpe_elfload
(
v
)
>=
0
)
{
vpe_run
(
v
);
vpe_run
(
v
);
else
{
}
else
{
printk
(
KERN_WARNING
"VPE loader: ELF load failed.
\n
"
);
printk
(
KERN_WARNING
"VPE loader: ELF load failed.
\n
"
);
ret
=
-
ENOEXEC
;
ret
=
-
ENOEXEC
;
}
}
...
@@ -1179,12 +1144,14 @@ static int vpe_release(struct inode *inode, struct file *filp)
...
@@ -1179,12 +1144,14 @@ static int vpe_release(struct inode *inode, struct file *filp)
static
ssize_t
vpe_write
(
struct
file
*
file
,
const
char
__user
*
buffer
,
static
ssize_t
vpe_write
(
struct
file
*
file
,
const
char
__user
*
buffer
,
size_t
count
,
loff_t
*
ppos
)
size_t
count
,
loff_t
*
ppos
)
{
{
int
minor
;
size_t
ret
=
count
;
size_t
ret
=
count
;
struct
vpe
*
v
;
struct
vpe
*
v
;
minor
=
iminor
(
file
->
f_path
.
dentry
->
d_inode
);
if
(
iminor
(
file
->
f_path
.
dentry
->
d_inode
)
!=
minor
)
if
((
v
=
get_vpe
(
minor
))
==
NULL
)
return
-
ENODEV
;
v
=
get_vpe
(
tclimit
);
if
(
v
==
NULL
)
return
-
ENODEV
;
return
-
ENODEV
;
if
(
v
->
pbuffer
==
NULL
)
{
if
(
v
->
pbuffer
==
NULL
)
{
...
@@ -1370,17 +1337,34 @@ static struct device *vpe_dev;
...
@@ -1370,17 +1337,34 @@ static struct device *vpe_dev;
static
int
__init
vpe_module_init
(
void
)
static
int
__init
vpe_module_init
(
void
)
{
{
unsigned
int
mtflags
,
vpflags
;
int
hw_tcs
,
hw_vpes
,
tc
,
err
=
0
;
unsigned
long
flags
,
val
;
struct
vpe
*
v
=
NULL
;
struct
vpe
*
v
=
NULL
;
struct
device
*
dev
;
struct
device
*
dev
;
struct
tc
*
t
;
struct
tc
*
t
;
unsigned
long
val
;
int
i
,
err
;
if
(
!
cpu_has_mipsmt
)
{
if
(
!
cpu_has_mipsmt
)
{
printk
(
"VPE loader: not a MIPS MT capable processor
\n
"
);
printk
(
"VPE loader: not a MIPS MT capable processor
\n
"
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
if
(
vpelimit
==
0
)
{
printk
(
KERN_WARNING
"No VPEs reserved for AP/SP, not "
"initializing VPE loader.
\n
Pass maxvpes=<n> argument as "
"kernel argument
\n
"
);
return
-
ENODEV
;
}
if
(
tclimit
==
0
)
{
printk
(
KERN_WARNING
"No TCs reserved for AP/SP, not "
"initializing VPE loader.
\n
Pass maxtcs=<n> argument as "
"kernel argument
\n
"
);
return
-
ENODEV
;
}
major
=
register_chrdev
(
0
,
module_name
,
&
vpe_fops
);
major
=
register_chrdev
(
0
,
module_name
,
&
vpe_fops
);
if
(
major
<
0
)
{
if
(
major
<
0
)
{
printk
(
"VPE loader: unable to register character device
\n
"
);
printk
(
"VPE loader: unable to register character device
\n
"
);
...
@@ -1388,40 +1372,61 @@ static int __init vpe_module_init(void)
...
@@ -1388,40 +1372,61 @@ static int __init vpe_module_init(void)
}
}
dev
=
device_create
(
mt_class
,
NULL
,
MKDEV
(
major
,
minor
),
dev
=
device_create
(
mt_class
,
NULL
,
MKDEV
(
major
,
minor
),
"
tc
%d"
,
minor
);
"
vpe
%d"
,
minor
);
if
(
IS_ERR
(
dev
))
{
if
(
IS_ERR
(
dev
))
{
err
=
PTR_ERR
(
dev
);
err
=
PTR_ERR
(
dev
);
goto
out_chrdev
;
goto
out_chrdev
;
}
}
vpe_dev
=
dev
;
vpe_dev
=
dev
;
dmt
();
local_irq_save
(
flags
);
dvpe
();
mtflags
=
dmt
();
vpflags
=
dvpe
();
/* Put MVPE's into 'configuration state' */
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
/* dump_mtregs(); */
/* dump_mtregs(); */
val
=
read_c0_mvpconf0
();
val
=
read_c0_mvpconf0
();
for
(
i
=
0
;
i
<
((
val
&
MVPCONF0_PTC
)
+
1
);
i
++
)
{
hw_tcs
=
(
val
&
MVPCONF0_PTC
)
+
1
;
t
=
alloc_tc
(
i
);
hw_vpes
=
((
val
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
;
for
(
tc
=
tclimit
;
tc
<
hw_tcs
;
tc
++
)
{
/*
* Must re-enable multithreading temporarily or in case we
* reschedule send IPIs or similar we might hang.
*/
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
evpe
(
vpflags
);
emt
(
mtflags
);
local_irq_restore
(
flags
);
t
=
alloc_tc
(
tc
);
if
(
!
t
)
{
err
=
-
ENOMEM
;
goto
out
;
}
local_irq_save
(
flags
);
mtflags
=
dmt
();
vpflags
=
dvpe
();
set_c0_mvpcontrol
(
MVPCONTROL_VPC
);
/* VPE's */
/* VPE's */
if
(
i
<
((
val
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
)
{
if
(
tc
<
hw_tcs
)
{
settc
(
i
);
settc
(
tc
);
if
((
v
=
alloc_vpe
(
i
))
==
NULL
)
{
if
((
v
=
alloc_vpe
(
tc
))
==
NULL
)
{
printk
(
KERN_WARNING
"VPE: unable to allocate VPE
\n
"
);
printk
(
KERN_WARNING
"VPE: unable to allocate VPE
\n
"
);
return
-
ENODEV
;
goto
out_reenable
;
}
}
/* add the tc to the list of this vpe's tc's. */
/* add the tc to the list of this vpe's tc's. */
list_add
(
&
t
->
tc
,
&
v
->
tc
);
list_add
(
&
t
->
tc
,
&
v
->
tc
);
/* deactivate all but vpe0 */
/* deactivate all but vpe0 */
if
(
i
!=
0
)
{
if
(
tc
>=
tclimit
)
{
unsigned
long
tmp
=
read_vpe_c0_vpeconf0
();
unsigned
long
tmp
=
read_vpe_c0_vpeconf0
();
tmp
&=
~
VPECONF0_VPA
;
tmp
&=
~
VPECONF0_VPA
;
...
@@ -1434,7 +1439,7 @@ static int __init vpe_module_init(void)
...
@@ -1434,7 +1439,7 @@ static int __init vpe_module_init(void)
/* disable multi-threading with TC's */
/* disable multi-threading with TC's */
write_vpe_c0_vpecontrol
(
read_vpe_c0_vpecontrol
()
&
~
VPECONTROL_TE
);
write_vpe_c0_vpecontrol
(
read_vpe_c0_vpecontrol
()
&
~
VPECONTROL_TE
);
if
(
i
!=
0
)
{
if
(
tc
>=
vpelimit
)
{
/*
/*
* Set config to be the same as vpe0,
* Set config to be the same as vpe0,
* particularly kseg0 coherency alg
* particularly kseg0 coherency alg
...
@@ -1446,10 +1451,10 @@ static int __init vpe_module_init(void)
...
@@ -1446,10 +1451,10 @@ static int __init vpe_module_init(void)
/* TC's */
/* TC's */
t
->
pvpe
=
v
;
/* set the parent vpe */
t
->
pvpe
=
v
;
/* set the parent vpe */
if
(
i
!=
0
)
{
if
(
tc
>=
tclimit
)
{
unsigned
long
tmp
;
unsigned
long
tmp
;
settc
(
i
);
settc
(
tc
);
/* Any TC that is bound to VPE0 gets left as is - in case
/* Any TC that is bound to VPE0 gets left as is - in case
we are running SMTC on VPE0. A TC that is bound to any
we are running SMTC on VPE0. A TC that is bound to any
...
@@ -1479,9 +1484,14 @@ static int __init vpe_module_init(void)
...
@@ -1479,9 +1484,14 @@ static int __init vpe_module_init(void)
}
}
}
}
out_reenable:
/* release config state */
/* release config state */
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
clear_c0_mvpcontrol
(
MVPCONTROL_VPC
);
evpe
(
vpflags
);
emt
(
mtflags
);
local_irq_restore
(
flags
);
#ifdef CONFIG_MIPS_APSP_KSPD
#ifdef CONFIG_MIPS_APSP_KSPD
kspd_events
.
kspd_sp_exit
=
kspd_sp_exit
;
kspd_events
.
kspd_sp_exit
=
kspd_sp_exit
;
#endif
#endif
...
@@ -1490,6 +1500,7 @@ static int __init vpe_module_init(void)
...
@@ -1490,6 +1500,7 @@ static int __init vpe_module_init(void)
out_chrdev:
out_chrdev:
unregister_chrdev
(
major
,
module_name
);
unregister_chrdev
(
major
,
module_name
);
out:
return
err
;
return
err
;
}
}
...
...
include/asm-mips/mips_mt.h
View file @
07cc0c9e
...
@@ -8,6 +8,12 @@
...
@@ -8,6 +8,12 @@
#include <linux/cpumask.h>
#include <linux/cpumask.h>
/*
* How many VPEs and TCs is Linux allowed to use? 0 means no limit.
*/
extern
int
tclimit
;
extern
int
vpelimit
;
extern
cpumask_t
mt_fpu_cpumask
;
extern
cpumask_t
mt_fpu_cpumask
;
extern
unsigned
long
mt_fpemul_threshold
;
extern
unsigned
long
mt_fpemul_threshold
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment