Commit 5323180d authored by Atsushi Nemoto's avatar Atsushi Nemoto Committed by Ralf Baechle

[MIPS] Disallow CpU exception in kernel again.

The commit 4d40bff7110e9e1a97ff8c01bdd6350e9867cc10 ("Allow CpU
exception in kernel partially") was broken.  The commit was to fix
theoretical problem but broke usual case.  Revert it for now.
Signed-off-by: default avatarAtsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 9a994357
...@@ -49,7 +49,8 @@ LEAF(resume) ...@@ -49,7 +49,8 @@ LEAF(resume)
#ifndef CONFIG_CPU_HAS_LLSC #ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit sw zero, ll_bit
#endif #endif
mfc0 t2, CP0_STATUS mfc0 t1, CP0_STATUS
sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0 cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0) sw ra, THREAD_REG31(a0)
...@@ -59,8 +60,8 @@ LEAF(resume) ...@@ -59,8 +60,8 @@ LEAF(resume)
lw t3, TASK_THREAD_INFO(a0) lw t3, TASK_THREAD_INFO(a0)
lw t0, TI_FLAGS(t3) lw t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU li t1, _TIF_USEDFPU
and t1, t0 and t2, t0, t1
beqz t1, 1f beqz t2, 1f
nor t1, zero, t1 nor t1, zero, t1
and t0, t0, t1 and t0, t0, t1
...@@ -73,13 +74,10 @@ LEAF(resume) ...@@ -73,13 +74,10 @@ LEAF(resume)
li t1, ~ST0_CU1 li t1, ~ST0_CU1
and t0, t0, t1 and t0, t0, t1
sw t0, ST_OFF(t3) sw t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_single a0, t0 # clobbers t0 fpu_save_single a0, t0 # clobbers t0
1: 1:
sw t2, THREAD_STATUS(a0)
/* /*
* The order of restoring the registers takes care of the race * The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints. * updating $28, $29 and kernelsp without disabling ints.
......
...@@ -48,7 +48,8 @@ ...@@ -48,7 +48,8 @@
#ifndef CONFIG_CPU_HAS_LLSC #ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit sw zero, ll_bit
#endif #endif
mfc0 t2, CP0_STATUS mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0 cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0) LONG_S ra, THREAD_REG31(a0)
...@@ -58,8 +59,8 @@ ...@@ -58,8 +59,8 @@
PTR_L t3, TASK_THREAD_INFO(a0) PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3) LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU li t1, _TIF_USEDFPU
and t1, t0 and t2, t0, t1
beqz t1, 1f beqz t2, 1f
nor t1, zero, t1 nor t1, zero, t1
and t0, t0, t1 and t0, t0, t1
...@@ -72,13 +73,10 @@ ...@@ -72,13 +73,10 @@
li t1, ~ST0_CU1 li t1, ~ST0_CU1
and t0, t0, t1 and t0, t0, t1
LONG_S t0, ST_OFF(t3) LONG_S t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_double a0 t0 t1 # c0_status passed in t0 fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1 # clobbers t1
1: 1:
LONG_S t2, THREAD_STATUS(a0)
/* /*
* The order of restoring the registers takes care of the race * The order of restoring the registers takes care of the race
......
...@@ -113,10 +113,10 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -113,10 +113,10 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
* Save FPU state to signal context. Signal handler * Save FPU state to signal context. Signal handler
* will "inherit" current FPU state. * will "inherit" current FPU state.
*/ */
preempt_disable();
own_fpu(1); own_fpu(1);
enable_fp_in_kernel();
err |= save_fp_context(sc); err |= save_fp_context(sc);
disable_fp_in_kernel(); preempt_enable();
} }
return err; return err;
} }
...@@ -148,7 +148,10 @@ check_and_restore_fp_context(struct sigcontext __user *sc) ...@@ -148,7 +148,10 @@ check_and_restore_fp_context(struct sigcontext __user *sc)
err = sig = fpcsr_pending(&sc->sc_fpc_csr); err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0) if (err > 0)
err = 0; err = 0;
preempt_disable();
own_fpu(0);
err |= restore_fp_context(sc); err |= restore_fp_context(sc);
preempt_enable();
return err ?: sig; return err ?: sig;
} }
...@@ -187,11 +190,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -187,11 +190,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
if (used_math) { if (used_math) {
/* restore fpu context if we have used it before */ /* restore fpu context if we have used it before */
own_fpu(0);
enable_fp_in_kernel();
if (!err) if (!err)
err = check_and_restore_fp_context(sc); err = check_and_restore_fp_context(sc);
disable_fp_in_kernel();
} else { } else {
/* signal handler may have used FPU. Give it up. */ /* signal handler may have used FPU. Give it up. */
lose_fpu(0); lose_fpu(0);
......
...@@ -209,10 +209,10 @@ static int setup_sigcontext32(struct pt_regs *regs, ...@@ -209,10 +209,10 @@ static int setup_sigcontext32(struct pt_regs *regs,
* Save FPU state to signal context. Signal handler * Save FPU state to signal context. Signal handler
* will "inherit" current FPU state. * will "inherit" current FPU state.
*/ */
preempt_disable();
own_fpu(1); own_fpu(1);
enable_fp_in_kernel();
err |= save_fp_context32(sc); err |= save_fp_context32(sc);
disable_fp_in_kernel(); preempt_enable();
} }
return err; return err;
} }
...@@ -225,7 +225,10 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc) ...@@ -225,7 +225,10 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc)
err = sig = fpcsr_pending(&sc->sc_fpc_csr); err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0) if (err > 0)
err = 0; err = 0;
preempt_disable();
own_fpu(0);
err |= restore_fp_context32(sc); err |= restore_fp_context32(sc);
preempt_enable();
return err ?: sig; return err ?: sig;
} }
...@@ -261,11 +264,8 @@ static int restore_sigcontext32(struct pt_regs *regs, ...@@ -261,11 +264,8 @@ static int restore_sigcontext32(struct pt_regs *regs,
if (used_math) { if (used_math) {
/* restore fpu context if we have used it before */ /* restore fpu context if we have used it before */
own_fpu(0);
enable_fp_in_kernel();
if (!err) if (!err)
err = check_and_restore_fp_context32(sc); err = check_and_restore_fp_context32(sc);
disable_fp_in_kernel();
} else { } else {
/* signal handler may have used FPU. Give it up. */ /* signal handler may have used FPU. Give it up. */
lose_fpu(0); lose_fpu(0);
......
...@@ -757,11 +757,12 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -757,11 +757,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
{ {
unsigned int cpid; unsigned int cpid;
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) { switch (cpid) {
case 0: case 0:
die_if_kernel("do_cpu invoked from kernel context!", regs);
if (!cpu_has_llsc) if (!cpu_has_llsc)
if (!simulate_llsc(regs)) if (!simulate_llsc(regs))
return; return;
...@@ -772,9 +773,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -772,9 +773,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
break; break;
case 1: case 1:
if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
die_if_kernel("do_cpu invoked from kernel context!",
regs);
if (used_math()) /* Using the FPU again. */ if (used_math()) /* Using the FPU again. */
own_fpu(1); own_fpu(1);
else { /* First time FPU user. */ else { /* First time FPU user. */
...@@ -782,19 +780,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -782,19 +780,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
set_used_math(); set_used_math();
} }
if (raw_cpu_has_fpu) { if (!raw_cpu_has_fpu) {
if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
local_irq_disable();
if (cpu_has_fpu)
regs->cp0_status |= ST0_CU1;
/*
* We must return without enabling
* interrupts to ensure keep FPU
* ownership until resume.
*/
return;
}
} else {
int sig; int sig;
sig = fpu_emulator_cop1Handler(regs, sig = fpu_emulator_cop1Handler(regs,
&current->thread.fpu, 0); &current->thread.fpu, 0);
...@@ -836,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs) ...@@ -836,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
case 2: case 2:
case 3: case 3:
die_if_kernel("do_cpu invoked from kernel context!", regs);
break; break;
} }
......
...@@ -68,8 +68,6 @@ do { \ ...@@ -68,8 +68,6 @@ do { \
/* We don't care about the c0 hazard here */ \ /* We don't care about the c0 hazard here */ \
} while (0) } while (0)
#define __fpu_enabled() (read_c0_status() & ST0_CU1)
#define enable_fpu() \ #define enable_fpu() \
do { \ do { \
if (cpu_has_fpu) \ if (cpu_has_fpu) \
...@@ -162,18 +160,4 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk) ...@@ -162,18 +160,4 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
return tsk->thread.fpu.fpr; return tsk->thread.fpu.fpr;
} }
static inline void enable_fp_in_kernel(void)
{
set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
/* make sure CU1 and FPU ownership are consistent */
if (!__is_fpu_owner() && __fpu_enabled())
__disable_fpu();
}
static inline void disable_fp_in_kernel(void)
{
BUG_ON(!__is_fpu_owner() && __fpu_enabled());
clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
}
#endif /* _ASM_FPU_H */ #endif /* _ASM_FPU_H */
...@@ -119,7 +119,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); ...@@ -119,7 +119,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 #define TIF_MEMDIE 18
#define TIF_FREEZE 19 #define TIF_FREEZE 19
#define TIF_ALLOW_FP_IN_KERNEL 20
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment