Commit ba8f5bab authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] Treat R14000 like R10000.
  [MIPS] Remove EXPERIMENTAL from PAGE_SIZE_16KB
  [MIPS] Update/Fix instruction definitions
  [MIPS] DSP and MDMX share the same config flag bit.
  [MIPS] Fix deadlock on MP with cache aliases.
  [MIPS] Use generic STABS_DEBUG macro.
  [MIPS] Create consistency in "system type" selection.
  [MIPS] Use generic DWARF_DEBUG
  [MIPS] Fix kgdb exception handler from user mode.
  [MIPS] Update struct sigcontext member names
  [MIPS] Update/fix futex assembly
  [MIPS] Remove support for sysmips(2) SETNAME and MIPS_RDNVRAM operations.
  [MIPS] Fix detection and handling of the 74K processor.
  [MIPS] Add missing 34K processor IDs
  [MIPS] Fix marking buddy of pte global for MIPS32 w/36-bit physical address
  [MIPS] AU1xxx mips_timer_interrupt() fixes
  [MIPS] Fix typo
parents 5cedae9c 44d921b2
This diff is collapsed.
......@@ -68,6 +68,7 @@
extern void set_debug_traps(void);
extern irq_cpustat_t irq_stat [NR_CPUS];
extern void mips_timer_interrupt(struct pt_regs *regs);
static void setup_local_irq(unsigned int irq, int type, int int_req);
static unsigned int startup_irq(unsigned int irq);
......
......@@ -116,6 +116,7 @@ void mips_timer_interrupt(struct pt_regs *regs)
null:
ack_r4ktimer(0);
irq_exit();
}
#ifdef CONFIG_PM
......
......@@ -272,8 +272,8 @@ void output_sc_defines(void)
text("/* Linux sigcontext offsets. */");
offset("#define SC_REGS ", struct sigcontext, sc_regs);
offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
offset("#define SC_MDHI ", struct sigcontext, sc_hi);
offset("#define SC_MDLO ", struct sigcontext, sc_lo);
offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
offset("#define SC_PC ", struct sigcontext, sc_pc);
offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
linefeed;
......
......@@ -121,6 +121,7 @@ static inline void check_wait(void)
case CPU_24K:
case CPU_25KF:
case CPU_34K:
case CPU_74K:
case CPU_PR4450:
cpu_wait = r4k_wait;
printk(" available.\n");
......@@ -432,6 +433,15 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
c->cputype = CPU_R14000;
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
}
}
......@@ -593,6 +603,9 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
case PRID_IMP_34K:
c->cputype = CPU_34K;
break;
case PRID_IMP_74K:
c->cputype = CPU_74K;
break;
}
}
......
......@@ -101,7 +101,7 @@ FEXPORT(restore_all) # restore full frame
EMT
1:
mfc0 v1, CP0_TCSTATUS
/* We set IXMT above, XOR should cler it here */
/* We set IXMT above, XOR should clear it here */
xori v1, v1, TCSTATUS_IXMT
or v1, v0, v1
mtc0 v1, CP0_TCSTATUS
......
......@@ -54,9 +54,11 @@
*/
mfc0 k0, CP0_CAUSE
andi k0, k0, 0x7c
add k1, k1, k0
PTR_L k0, saved_vectors(k1)
jr k0
#ifdef CONFIG_64BIT
dsll k0, k0, 1
#endif
PTR_L k1, saved_vectors(k0)
jr k1
nop
1:
move k0, sp
......
......@@ -42,6 +42,7 @@ static const char *cpu_name[] = {
[CPU_R8000] = "R8000",
[CPU_R10000] = "R10000",
[CPU_R12000] = "R12000",
[CPU_R14000] = "R14000",
[CPU_R4300] = "R4300",
[CPU_R4650] = "R4650",
[CPU_R4700] = "R4700",
......@@ -74,6 +75,7 @@ static const char *cpu_name[] = {
[CPU_24K] = "MIPS 24K",
[CPU_25KF] = "MIPS 25Kf",
[CPU_34K] = "MIPS 34K",
[CPU_74K] = "MIPS 74K",
[CPU_VR4111] = "NEC VR4111",
[CPU_VR4121] = "NEC VR4121",
[CPU_VR4122] = "NEC VR4122",
......
......@@ -31,7 +31,6 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
save_gp_reg(31);
#undef save_gp_reg
#ifdef CONFIG_32BIT
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
......@@ -43,20 +42,6 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(mflo3(), &sc->sc_lo3);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
#endif
#ifdef CONFIG_64BIT
err |= __put_user(regs->hi, &sc->sc_hi[0]);
err |= __put_user(regs->lo, &sc->sc_lo[0]);
if (cpu_has_dsp) {
err |= __put_user(mfhi1(), &sc->sc_hi[1]);
err |= __put_user(mflo1(), &sc->sc_lo[1]);
err |= __put_user(mfhi2(), &sc->sc_hi[2]);
err |= __put_user(mflo2(), &sc->sc_lo[2]);
err |= __put_user(mfhi3(), &sc->sc_hi[3]);
err |= __put_user(mflo3(), &sc->sc_lo[3]);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
#endif
err |= __put_user(!!used_math(), &sc->sc_used_math);
......@@ -92,7 +77,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
#ifdef CONFIG_32BIT
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
......@@ -104,20 +88,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
#endif
#ifdef CONFIG_64BIT
err |= __get_user(regs->hi, &sc->sc_hi[0]);
err |= __get_user(regs->lo, &sc->sc_lo[0]);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg);
err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg);
err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
#endif
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
......
......@@ -280,27 +280,6 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
char __user *name;
switch(cmd) {
case SETNAME: {
char nodename[__NEW_UTS_LEN + 1];
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
name = (char __user *) arg1;
len = strncpy_from_user(nodename, name, __NEW_UTS_LEN);
if (len < 0)
return -EFAULT;
down_write(&uts_sem);
strncpy(system_utsname.nodename, nodename, len);
nodename[__NEW_UTS_LEN] = '\0';
strlcpy(system_utsname.nodename, nodename,
sizeof(system_utsname.nodename));
up_write(&uts_sem);
return 0;
}
case MIPS_ATOMIC_SET:
printk(KERN_CRIT "How did I get here?\n");
return -EINVAL;
......@@ -313,9 +292,6 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
case FLUSH_CACHE:
__flush_cache_all();
return 0;
case MIPS_RDNVRAM:
return -EIO;
}
return -EINVAL;
......
......@@ -902,6 +902,7 @@ static inline void parity_protection_init(void)
{
switch (current_cpu_data.cputype) {
case CPU_24K:
case CPU_34K:
case CPU_5KC:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
......
......@@ -151,23 +151,13 @@ SECTIONS
/* This is the MIPS specific mdebug section. */
.mdebug : { *(.mdebug) }
/* These are needed for ELF backends which have not yet been
converted to the new style linker. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
/* DWARF debug sections.
Symbols in the .debug DWARF section are relative to the beginning of the
section so we begin .debug at 0. It's not clear yet what needs to happen
for the others. */
.debug 0 : { *(.debug) }
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
.debug_sfnames 0 : { *(.debug_sfnames) }
.line 0 : { *(.line) }
STABS_DEBUG
DWARF_DEBUG
/* These must appear regardless of . */
.gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
.gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
.comment : { *(.comment) }
.note : { *(.note) }
}
......@@ -29,6 +29,27 @@
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
/*
* Special Variant of smp_call_function for use by cache functions:
*
* o No return value
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
*/
static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
int retry, int wait)
{
preempt_disable();
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
smp_call_function(func, info, retry, wait);
#endif
func(info);
preempt_enable();
}
/*
* Must die.
*/
......@@ -299,7 +320,7 @@ static void r4k_flush_cache_all(void)
if (!cpu_has_dc_aliases)
return;
on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
}
static inline void local_r4k___flush_cache_all(void * args)
......@@ -314,13 +335,14 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R4400MC:
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
r4k_blast_scache();
}
}
static void r4k___flush_cache_all(void)
{
on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
}
static inline void local_r4k_flush_cache_range(void * args)
......@@ -341,7 +363,7 @@ static inline void local_r4k_flush_cache_range(void * args)
static void r4k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
}
static inline void local_r4k_flush_cache_mm(void * args)
......@@ -370,7 +392,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
}
struct flush_cache_page_args {
......@@ -461,7 +483,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
......@@ -471,7 +493,7 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
static void r4k_flush_data_cache_page(unsigned long addr)
{
on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
}
struct flush_icache_range_args {
......@@ -514,7 +536,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
instruction_hazard();
}
......@@ -590,7 +612,7 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma,
args.vma = vma;
args.page = page;
on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
}
......@@ -689,7 +711,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
}
static void r4k_flush_icache_all(void)
......@@ -812,6 +834,7 @@ static void __init probe_pcache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
c->icache.linesz = 64;
c->icache.ways = 2;
......@@ -965,9 +988,11 @@ static void __init probe_pcache(void)
c->dcache.flags |= MIPS_CACHE_PINDEX;
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_SB1:
break;
case CPU_24K:
case CPU_34K:
if (!(read_c0_config7() & (1 << 16)))
default:
if (c->dcache.waysize > PAGE_SIZE)
......@@ -1091,6 +1116,7 @@ static void __init setup_scache(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
c->scache.linesz = 64 << ((config >> 13) & 1);
c->scache.ways = 2;
......
......@@ -357,6 +357,7 @@ void __init build_clear_page(void)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
pref_src_mode = Pref_LoadStreamed;
pref_dst_mode = Pref_StoreStreamed;
break;
......
......@@ -875,6 +875,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_4KC:
case CPU_SB1:
case CPU_SB1A:
......@@ -906,6 +907,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
case CPU_4KEC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
i_ehb(p);
tlbw(p);
break;
......
......@@ -80,6 +80,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_24K:
case CPU_25KF:
case CPU_34K:
case CPU_74K:
case CPU_SB1:
case CPU_SB1A:
lmodel = &op_model_mipsxx;
......
......@@ -205,6 +205,10 @@ static int __init mipsxx_init(void)
case CPU_34K:
op_model_mipsxx.cpu_type = "mips/34K";
break;
case CPU_74K:
op_model_mipsxx.cpu_type = "mips/74K";
break;
#endif
case CPU_5KC:
......
......@@ -51,6 +51,7 @@
#define PRID_IMP_R4300 0x0b00
#define PRID_IMP_VR41XX 0x0c00
#define PRID_IMP_R12000 0x0e00
#define PRID_IMP_R14000 0x0f00
#define PRID_IMP_R8000 0x1000
#define PRID_IMP_PR4450 0x1200
#define PRID_IMP_R4600 0x2000
......@@ -87,6 +88,7 @@
#define PRID_IMP_24K 0x9300
#define PRID_IMP_34K 0x9500
#define PRID_IMP_24KE 0x9600
#define PRID_IMP_74K 0x9700
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
......@@ -196,7 +198,9 @@
#define CPU_34K 60
#define CPU_PR4450 61
#define CPU_SB1A 62
#define CPU_LAST 62
#define CPU_74K 63
#define CPU_R14000 64
#define CPU_LAST 64
/*
* ISA Level encodings
......
......@@ -7,6 +7,7 @@
#include <linux/futex.h>
#include <asm/errno.h>
#include <asm/uaccess.h>
#include <asm/war.h>
#ifdef CONFIG_SMP
#define __FUTEX_SMP_SYNC " sync \n"
......@@ -16,30 +17,58 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
"1: ll %1, (%3) # __futex_atomic_op1 \n" \
" .set mips0 \n" \
" " insn " \n" \
" .set mips3 \n" \
"2: sc $1, (%3) \n" \
" beqzl $1, 1b \n" \
__FUTEX_SMP_SYNC \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %5 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=r" (oldval) \
: "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \
if (cpu_has_llsc && R10000_LLSC_WAR) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
"1: ll %1, (%3) # __futex_atomic_op \n" \
" .set mips0 \n" \
" " insn " \n" \
" .set mips3 \n" \
"2: sc $1, (%3) \n" \
" beqzl $1, 1b \n" \
__FUTEX_SMP_SYNC \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %5 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=r" (oldval) \
: "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \
} else if (cpu_has_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
"1: ll %1, (%3) # __futex_atomic_op \n" \
" .set mips0 \n" \
" " insn " \n" \
" .set mips3 \n" \
"2: sc $1, (%3) \n" \
" beqz $1, 1b \n" \
__FUTEX_SMP_SYNC \
"3: \n" \
" .set pop \n" \
" .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %5 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" "__UA_ADDR "\t1b, 4b \n" \
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=r" (oldval) \
: "0" (0), "r" (uaddr), "Jr" (oparg), "i" (-EFAULT)); \
} else \
ret = -ENOSYS; \
}
static inline int
......@@ -102,7 +131,69 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
return -ENOSYS;
int retval;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
" .set mips3 \n"
"1: ll %0, %2 \n"
" bne %0, %z3, 3f \n"
" .set mips0 \n"
" move $1, %z4 \n"
" .set mips3 \n"
"2: sc $1, %1 \n"
" beqzl $1, 1b \n"
__FUTEX_SMP_SYNC
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
"4: li %0, %5 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
: "=&r" (retval), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
" .set mips3 \n"
"1: ll %0, %2 \n"
" bne %0, %z3, 3f \n"
" .set mips0 \n"
" move $1, %z4 \n"
" .set mips3 \n"
"2: sc $1, %1 \n"
" beqz $1, 1b \n"
__FUTEX_SMP_SYNC
"3: \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
"4: li %0, %5 \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
: "=&r" (retval), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory");
} else
return -ENOSYS;
return retval;
}
#endif
......
......@@ -6,6 +6,7 @@
* for more details.
*
* Copyright (C) 1996, 2000 by Ralf Baechle
* Copyright (C) 2006 by Thiemo Seufer
*/
#ifndef _ASM_INST_H
#define _ASM_INST_H
......@@ -21,14 +22,14 @@ enum major_op {
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
daddi_op, daddiu_op, ldl_op, ldr_op,
major_1c_op, jalx_op, major_1e_op, major_1f_op,
spec2_op, jalx_op, mdmx_op, spec3_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
sdl_op, sdr_op, swr_op, cache_op,
ll_op, lwc1_op, lwc2_op, pref_op,
lld_op, ldc1_op, ldc2_op, ld_op,
sc_op, swc1_op, swc2_op, rdhwr_op,
sc_op, swc1_op, swc2_op, major_3b_op,
scd_op, sdc1_op, sdc2_op, sd_op
};
......@@ -37,7 +38,7 @@ enum major_op {
*/
enum spec_op {
sll_op, movc_op, srl_op, sra_op,
sllv_op, srlv_op, srav_op, spec1_unused_op, /* Opcode 0x07 is unused */
sllv_op, pmon_op, srlv_op, srav_op,
jr_op, jalr_op, movz_op, movn_op,
syscall_op, break_op, spim_op, sync_op,
mfhi_op, mthi_op, mflo_op, mtlo_op,
......@@ -54,6 +55,28 @@ enum spec_op {
dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
};
/*
* func field of spec2 opcode.
*/
enum spec2_op {
madd_op, maddu_op, mul_op, spec2_3_unused_op,
msub_op, msubu_op, /* more unused ops */
clz_op = 0x20, clo_op,
dclz_op = 0x24, dclo_op,
sdbpp_op = 0x3f
};
/*
* func field of spec3 opcode.
*/
enum spec3_op {
ext_op, dextm_op, dextu_op, dext_op,
ins_op, dinsm_op, dinsu_op, dins_op,
bshfl_op = 0x20,
dbshfl_op = 0x24,
rdhwr_op = 0x3f
};
/*
* rt field of bcond opcodes.
*/
......@@ -151,8 +174,8 @@ enum cop1x_func {
* func field for mad opcodes (MIPS IV).
*/
enum mad_func {
madd_op = 0x08, msub_op = 0x0a,
nmadd_op = 0x0c, nmsub_op = 0x0e
madd_fp_op = 0x08, msub_fp_op = 0x0a,
nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e
};
/*
......
......@@ -291,7 +291,7 @@
#define ST0_DL (_ULCAST_(1) << 24)
/*
* Enable the MIPS DSP ASE
* Enable the MIPS MDMX and DSP ASEs
*/
#define ST0_MX 0x01000000
......
......@@ -82,10 +82,11 @@ extern void paging_init(void);
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define pmd_page_kernel(pmd) pmd_val(pmd)
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
ptep->pte_high = pte.pte_high;
......@@ -93,27 +94,35 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
ptep->pte_low = pte.pte_low;
//printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
if (pte_val(pte) & _PAGE_GLOBAL) {
if (pte.pte_low & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
/*
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
if (pte_none(*buddy))
buddy->pte_low |= _PAGE_GLOBAL;
if (pte_none(*buddy)) {
buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
}
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t null = __pte(0);
/* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
else
set_pte_at(mm, addr, ptep, __pte(0));
if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
null.pte_low = null.pte_high = _PAGE_GLOBAL;
set_pte_at(mm, addr, ptep, null);
}
#else
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
/*
* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
......@@ -174,75 +183,76 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
*/
static inline int pte_user(pte_t pte) { BUG(); return 0; }
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_READ; }
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
static inline int pte_read(pte_t pte) { return pte.pte_low & _PAGE_READ; }
static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
static inline pte_t pte_wrprotect(pte_t pte)
{
(pte).pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
(pte).pte_high &= ~_PAGE_SILENT_WRITE;
pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_rdprotect(pte_t pte)
{
(pte).pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ);
(pte).pte_high &= ~_PAGE_SILENT_READ;
pte.pte_low &= ~(_PAGE_READ | _PAGE_SILENT_READ);
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
(pte).pte_low &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
(pte).pte_high &= ~_PAGE_SILENT_WRITE;
pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
(pte).pte_low &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
(pte).pte_high &= ~_PAGE_SILENT_READ;
pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
(pte).pte_low |= _PAGE_WRITE;
if ((pte).pte_low & _PAGE_MODIFIED) {
(pte).pte_low |= _PAGE_SILENT_WRITE;
(pte).pte_high |= _PAGE_SILENT_WRITE;
pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) {
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte;
}
static inline pte_t pte_mkread(pte_t pte)
{
(pte).pte_low |= _PAGE_READ;
if ((pte).pte_low & _PAGE_ACCESSED) {
(pte).pte_low |= _PAGE_SILENT_READ;
(pte).pte_high |= _PAGE_SILENT_READ;
pte.pte_low |= _PAGE_READ;
if (pte.pte_low & _PAGE_ACCESSED) {
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
}
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
(pte).pte_low |= _PAGE_MODIFIED;
if ((pte).pte_low & _PAGE_WRITE) {
(pte).pte_low |= _PAGE_SILENT_WRITE;
(pte).pte_high |= _PAGE_SILENT_WRITE;
pte.pte_low |= _PAGE_MODIFIED;
if (pte.pte_low & _PAGE_WRITE) {
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
(pte).pte_low |= _PAGE_ACCESSED;
if ((pte).pte_low & _PAGE_READ)
(pte).pte_low |= _PAGE_SILENT_READ;
(pte).pte_high |= _PAGE_SILENT_READ;
pte.pte_low |= _PAGE_ACCESSED;
if (pte.pte_low & _PAGE_READ)
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
return pte;
}
#else
......
......@@ -55,8 +55,14 @@ struct sigcontext {
struct sigcontext {
unsigned long sc_regs[32];
unsigned long sc_fpregs[32];
unsigned long sc_hi[4];
unsigned long sc_lo[4];
unsigned long sc_mdhi;
unsigned long sc_hi1;
unsigned long sc_hi2;
unsigned long sc_hi3;
unsigned long sc_mdlo;
unsigned long sc_lo1;
unsigned long sc_lo2;
unsigned long sc_lo3;
unsigned long sc_pc;
unsigned int sc_fpc_csr;
unsigned int sc_used_math;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment