Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci-2.6.23
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci-2.6.23
Commits
d223a861
Commit
d223a861
authored
Jul 10, 2007
by
Ralf Baechle
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MIPS] FP affinity: Coding style cleanups
Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
e7c4782f
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
56 additions
and
55 deletions
+56
-55
arch/mips/kernel/mips-mt.c
arch/mips/kernel/mips-mt.c
+22
-18
arch/mips/kernel/traps.c
arch/mips/kernel/traps.c
+29
-30
include/asm-mips/system.h
include/asm-mips/system.h
+5
-7
No files found.
arch/mips/kernel/mips-mt.c
View file @
d223a861
...
@@ -109,7 +109,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
...
@@ -109,7 +109,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
/* Compute new global allowed CPU set if necessary */
/* Compute new global allowed CPU set if necessary */
if
(
(
p
->
thread
.
mflags
&
MF_FPUBOUND
)
if
(
(
p
->
thread
.
mflags
&
MF_FPUBOUND
)
&&
cpus_intersects
(
new_mask
,
mt_fpu_cpumask
))
{
&&
cpus_intersects
(
new_mask
,
mt_fpu_cpumask
))
{
cpus_and
(
effective_mask
,
new_mask
,
mt_fpu_cpumask
);
cpus_and
(
effective_mask
,
new_mask
,
mt_fpu_cpumask
);
retval
=
set_cpus_allowed
(
p
,
effective_mask
);
retval
=
set_cpus_allowed
(
p
,
effective_mask
);
...
@@ -195,27 +195,31 @@ void mips_mt_regdump(unsigned long mvpctl)
...
@@ -195,27 +195,31 @@ void mips_mt_regdump(unsigned long mvpctl)
nvpe
=
((
mvpconf0
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
;
nvpe
=
((
mvpconf0
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
;
ntc
=
((
mvpconf0
&
MVPCONF0_PTC
)
>>
MVPCONF0_PTC_SHIFT
)
+
1
;
ntc
=
((
mvpconf0
&
MVPCONF0_PTC
)
>>
MVPCONF0_PTC_SHIFT
)
+
1
;
printk
(
"-- per-VPE State --
\n
"
);
printk
(
"-- per-VPE State --
\n
"
);
for
(
i
=
0
;
i
<
nvpe
;
i
++
)
{
for
(
i
=
0
;
i
<
nvpe
;
i
++
)
{
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
settc
(
tc
);
settc
(
tc
);
if
((
read_tc_c0_tcbind
()
&
TCBIND_CURVPE
)
==
i
)
{
if
((
read_tc_c0_tcbind
()
&
TCBIND_CURVPE
)
==
i
)
{
printk
(
" VPE %d
\n
"
,
i
);
printk
(
" VPE %d
\n
"
,
i
);
printk
(
" VPEControl : %08lx
\n
"
,
read_vpe_c0_vpecontrol
());
printk
(
" VPEControl : %08lx
\n
"
,
printk
(
" VPEConf0 : %08lx
\n
"
,
read_vpe_c0_vpeconf0
());
read_vpe_c0_vpecontrol
());
printk
(
" VPE%d.Status : %08lx
\n
"
,
printk
(
" VPEConf0 : %08lx
\n
"
,
i
,
read_vpe_c0_status
());
read_vpe_c0_vpeconf0
());
printk
(
" VPE%d.EPC : %08lx
\n
"
,
i
,
read_vpe_c0_epc
());
printk
(
" VPE%d.Status : %08lx
\n
"
,
printk
(
" VPE%d.Cause : %08lx
\n
"
,
i
,
read_vpe_c0_cause
());
i
,
read_vpe_c0_status
());
printk
(
" VPE%d.Config7 : %08lx
\n
"
,
printk
(
" VPE%d.EPC : %08lx
\n
"
,
i
,
read_vpe_c0_config7
());
i
,
read_vpe_c0_epc
());
break
;
/* Next VPE */
printk
(
" VPE%d.Cause : %08lx
\n
"
,
i
,
read_vpe_c0_cause
());
printk
(
" VPE%d.Config7 : %08lx
\n
"
,
i
,
read_vpe_c0_config7
());
break
;
/* Next VPE */
}
}
}
}
}
}
printk
(
"-- per-TC State --
\n
"
);
printk
(
"-- per-TC State --
\n
"
);
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
settc
(
tc
);
settc
(
tc
);
if
(
read_tc_c0_tcbind
()
==
read_c0_tcbind
())
{
if
(
read_tc_c0_tcbind
()
==
read_c0_tcbind
())
{
/* Are we dumping ourself? */
/* Are we dumping ourself? */
haltval
=
0
;
/* Then we're not halted, and mustn't be */
haltval
=
0
;
/* Then we're not halted, and mustn't be */
tcstatval
=
flags
;
/* And pre-dump TCStatus is flags */
tcstatval
=
flags
;
/* And pre-dump TCStatus is flags */
...
@@ -384,7 +388,7 @@ void mips_mt_set_cpuoptions(void)
...
@@ -384,7 +388,7 @@ void mips_mt_set_cpuoptions(void)
mt_fpemul_threshold
=
fpaff_threshold
;
mt_fpemul_threshold
=
fpaff_threshold
;
}
else
{
}
else
{
mt_fpemul_threshold
=
mt_fpemul_threshold
=
(
FPUSEFACTOR
*
(
loops_per_jiffy
/
(
500000
/
HZ
)))
/
HZ
;
(
FPUSEFACTOR
*
(
loops_per_jiffy
/
(
500000
/
HZ
)))
/
HZ
;
}
}
printk
(
"FPU Affinity set after %ld emulations
\n
"
,
printk
(
"FPU Affinity set after %ld emulations
\n
"
,
mt_fpemul_threshold
);
mt_fpemul_threshold
);
...
...
arch/mips/kernel/traps.c
View file @
d223a861
...
@@ -752,6 +752,33 @@ asmlinkage void do_ri(struct pt_regs *regs)
...
@@ -752,6 +752,33 @@ asmlinkage void do_ri(struct pt_regs *regs)
force_sig
(
SIGILL
,
current
);
force_sig
(
SIGILL
,
current
);
}
}
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static
void
mt_ase_fp_affinity
(
void
)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if
(
mt_fpemul_threshold
>
0
&&
((
current
->
thread
.
emulated_fp
++
>
mt_fpemul_threshold
)))
{
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if
(
cpus_intersects
(
current
->
cpus_allowed
,
mt_fpu_cpumask
))
{
cpumask_t
tmask
;
cpus_and
(
tmask
,
current
->
thread
.
user_cpus_allowed
,
mt_fpu_cpumask
);
set_cpus_allowed
(
current
,
tmask
);
current
->
thread
.
mflags
|=
MF_FPUBOUND
;
}
}
#endif
/* CONFIG_MIPS_MT_FPAFF */
}
asmlinkage
void
do_cpu
(
struct
pt_regs
*
regs
)
asmlinkage
void
do_cpu
(
struct
pt_regs
*
regs
)
{
{
unsigned
int
cpid
;
unsigned
int
cpid
;
...
@@ -785,36 +812,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
...
@@ -785,36 +812,8 @@ asmlinkage void do_cpu(struct pt_regs *regs)
&
current
->
thread
.
fpu
,
0
);
&
current
->
thread
.
fpu
,
0
);
if
(
sig
)
if
(
sig
)
force_sig
(
sig
,
current
);
force_sig
(
sig
,
current
);
#ifdef CONFIG_MIPS_MT_FPAFF
else
else
{
mt_ase_fp_affinity
();
/*
* MIPS MT processors may have fewer FPU contexts
* than CPU threads. If we've emulated more than
* some threshold number of instructions, force
* migration to a "CPU" that has FP support.
*/
if
(
mt_fpemul_threshold
>
0
&&
((
current
->
thread
.
emulated_fp
++
>
mt_fpemul_threshold
)))
{
/*
* If there's no FPU present, or if the
* application has already restricted
* the allowed set to exclude any CPUs
* with FPUs, we'll skip the procedure.
*/
if
(
cpus_intersects
(
current
->
cpus_allowed
,
mt_fpu_cpumask
))
{
cpumask_t
tmask
;
cpus_and
(
tmask
,
current
->
thread
.
user_cpus_allowed
,
mt_fpu_cpumask
);
set_cpus_allowed
(
current
,
tmask
);
current
->
thread
.
mflags
|=
MF_FPUBOUND
;
}
}
}
#endif
/* CONFIG_MIPS_MT_FPAFF */
}
}
return
;
return
;
...
...
include/asm-mips/system.h
View file @
d223a861
...
@@ -44,7 +44,7 @@ struct task_struct;
...
@@ -44,7 +44,7 @@ struct task_struct;
* different thread.
* different thread.
*/
*/
#define
switch_to(prev,next,last
) \
#define
__mips_mt_fpaff_switch_to(prev
) \
do { \
do { \
if (cpu_has_fpu && \
if (cpu_has_fpu && \
(prev->thread.mflags & MF_FPUBOUND) && \
(prev->thread.mflags & MF_FPUBOUND) && \
...
@@ -52,24 +52,22 @@ do { \
...
@@ -52,24 +52,22 @@ do { \
prev->thread.mflags &= ~MF_FPUBOUND; \
prev->thread.mflags &= ~MF_FPUBOUND; \
prev->cpus_allowed = prev->thread.user_cpus_allowed; \
prev->cpus_allowed = prev->thread.user_cpus_allowed; \
} \
} \
if (cpu_has_dsp) \
__save_dsp(prev); \
next->thread.emulated_fp = 0; \
next->thread.emulated_fp = 0; \
(last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
} while(0)
#else
#else
#define __mips_mt_fpaff_switch_to(prev) do { (prev); } while (0)
#endif
#define switch_to(prev,next,last) \
#define switch_to(prev,next,last) \
do { \
do { \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \
if (cpu_has_dsp) \
__save_dsp(prev); \
__save_dsp(prev); \
(last) = resume(prev, next, task_thread_info(next)); \
(last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
if (cpu_has_dsp) \
__restore_dsp(current); \
__restore_dsp(current); \
} while(0)
} while(0)
#endif
/*
/*
* On SMP systems, when the scheduler does migration-cost autodetection,
* On SMP systems, when the scheduler does migration-cost autodetection,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment