Commit 173d6681 authored by Chris Zankel's avatar Chris Zankel Committed by Linus Torvalds

[PATCH] xtensa: remove extra header files

The Xtensa port contained many header files that were never needed.  This
rather lengthy patch removes all those files.  Unfortunately, there were
many dependencies that needed to be updated, so this patch touches quite a
few source files.
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fd43fe19
......@@ -48,25 +48,10 @@ menu "Processor type and features"
choice
prompt "Xtensa Processor Configuration"
default XTENSA_CPU_LINUX_BE
default XTENSA_VARIANT_FSF
config XTENSA_CPU_LINUX_BE
bool "linux_be"
---help---
The linux_be processor configuration is the baseline Xtensa
configurations included in this kernel and also used by
binutils, gcc, and gdb. It contains no TIE, no coprocessors,
and the following configuration options:
Code Density Option 2 Misc Special Registers
NSA/NSAU Instructions 128-bit Data Bus Width
Processor ID 8K, 2-way I and D Caches
Zero-Overhead Loops 2 Inst Address Break Registers
Big Endian 2 Data Address Break Registers
64 General-Purpose Registers JTAG Interface and Trace Port
17 Interrupts MMU w/ TLBs and Autorefill
3 Interrupt Levels 8 Autorefill Ways (I/D TLBs)
3 Timers Unaligned Exceptions
config XTENSA_VARIANT_FSF
bool "fsf"
endchoice
config MMU
......
......@@ -11,13 +11,13 @@
# this architecture
# Core configuration.
# (Use CPU=<xtensa_config> to use another default compiler.)
# (Use VAR=<xtensa_config> to use another default compiler.)
cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be
cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom
variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
CPU = $(cpu-y)
export CPU
VARIANT = $(variant-y)
export VARIANT
# Platform configuration
......@@ -27,8 +27,6 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
PLATFORM = $(platform-y)
export PLATFORM
CPPFLAGS += $(if $(KBUILD_SRC),-I$(srctree)/include/asm-xtensa/)
CPPFLAGS += -Iinclude/asm
CFLAGS += -pipe -mlongcalls
KBUILD_DEFCONFIG := iss_defconfig
......@@ -41,12 +39,12 @@ core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
# Test for cross compiling
ifneq ($(CPU),)
ifneq ($(VARIANT),)
COMPILE_ARCH = $(shell uname -m)
ifneq ($(COMPILE_ARCH), xtensa)
ifndef CROSS_COMPILE
CROSS_COMPILE = xtensa_$(CPU)-
CROSS_COMPILE = xtensa_$(VARIANT)-
endif
endif
endif
......@@ -68,14 +66,13 @@ archinc := include/asm-xtensa
archprepare: $(archinc)/.platform
# Update machine cpu and platform symlinks if something which affects
# Update processor variant and platform symlinks if something which affects
# them changed.
$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
@echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)'
@echo ' SYMLINK $(archinc)/variant -> $(archinc)/variant-$(VARIANT)'
$(Q)mkdir -p $(archinc)
$(Q)mkdir -p $(archinc)/xtensa
$(Q)ln -fsn $(srctree)/$(archinc)/xtensa/config-$(CPU) $(archinc)/xtensa/config
$(Q)ln -fsn $(srctree)/$(archinc)/variant-$(VARIANT) $(archinc)/variant
@echo ' SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)'
$(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform
@touch $@
......@@ -89,7 +86,7 @@ zImage zImage.initrd: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
CLEAN_FILES += arch/xtensa/vmlinux.lds \
$(archinc)/platform $(archinc)/xtensa/config \
$(archinc)/platform $(archinc)/variant \
$(archinc)/.platform
define archhelp
......
#include <xtensa/config/specreg.h>
#include <xtensa/config/core.h>
#include <asm/bootparam.h>
......
#define _ASMLANGUAGE
#include <xtensa/config/specreg.h>
#include <xtensa/config/core.h>
#include <xtensa/cacheasm.h>
#include <asm/variant/core.h>
#include <asm/regs.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
/*
* RB-Data: RedBoot data/bss
* P: Boot-Parameters
......@@ -77,8 +75,14 @@ _start:
/* Note: The assembler cannot relax "addi a0, a0, ..." to an
l32r, so we load to a4 first. */
addi a4, a0, __start - __start_a0
mov a0, a4
# addi a4, a0, __start - __start_a0
# mov a0, a4
movi a4, __start
movi a5, __start_a0
add a4, a0, a4
sub a0, a4, a5
movi a4, __start
movi a5, __reloc_end
......@@ -106,9 +110,13 @@ _start:
/* We have to flush and invalidate the caches here before we jump. */
#if XCHAL_DCACHE_IS_WRITEBACK
dcache_writeback_all a5, a6
___flush_dcache_all a5 a6
#endif
icache_invalidate_all a5, a6
___invalidate_icache_all a5 a6
isync
movi a11, _reloc
jx a11
......@@ -209,9 +217,14 @@ _reloc:
/* jump to the kernel */
2:
#if XCHAL_DCACHE_IS_WRITEBACK
dcache_writeback_all a5, a6
___flush_dcache_all a5 a6
#endif
icache_invalidate_all a5, a6
___invalidate_icache_all a5 a6
isync
movi a5, __start
movi a3, boot_initrd_start
......
......@@ -53,11 +53,7 @@ CONFIG_CC_ALIGN_JUMPS=0
#
# Processor type and features
#
CONFIG_XTENSA_ARCH_LINUX_BE=y
# CONFIG_XTENSA_ARCH_LINUX_LE is not set
# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
# CONFIG_XTENSA_ARCH_S5 is not set
# CONFIG_XTENSA_CUSTOM is not set
CONFIG_XTENSA_VARIANT_FSF=y
CONFIG_MMU=y
# CONFIG_XTENSA_UNALIGNED_USER is not set
# CONFIG_PREEMPT is not set
......
......@@ -16,14 +16,9 @@
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
......@@ -216,7 +211,7 @@ ENTRY(fast_unaligned)
extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
#if XCHAL_HAVE_NARROW
#if XCHAL_HAVE_DENSITY
_beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
addi a6, a5, -OP0_S32I_N
_beqz a6, .Lstore # S32I.N, do a store
......@@ -251,7 +246,7 @@ ENTRY(fast_unaligned)
#endif
__src_b a3, a5, a6 # a3 has the data word
#if XCHAL_HAVE_NARROW
#if XCHAL_HAVE_DENSITY
addi a7, a7, 2 # increment PC (assume 16-bit insn)
extui a5, a4, INSN_OP0, 4
......@@ -279,14 +274,14 @@ ENTRY(fast_unaligned)
1:
#if XCHAL_HAVE_LOOP
rsr a3, LEND # check if we reached LEND
bne a7, a3, 1f
rsr a3, LCOUNT # and LCOUNT != 0
beqz a3, 1f
addi a3, a3, -1 # decrement LCOUNT and set
#if XCHAL_HAVE_LOOPS
rsr a5, LEND # check if we reached LEND
bne a7, a5, 1f
rsr a5, LCOUNT # and LCOUNT != 0
beqz a5, 1f
addi a5, a5, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a3, LCOUNT
wsr a5, LCOUNT
#endif
1: wsr a7, EPC_1 # skip load instruction
......@@ -336,7 +331,7 @@ ENTRY(fast_unaligned)
movi a6, 0 # mask: ffffffff:00000000
#if XCHAL_HAVE_NARROW
#if XCHAL_HAVE_DENSITY
addi a7, a7, 2 # incr. PC,assume 16-bit instruction
extui a5, a4, INSN_OP0, 4 # extract OP0
......@@ -359,14 +354,14 @@ ENTRY(fast_unaligned)
/* Get memory address */
1:
#if XCHAL_HAVE_LOOP
rsr a3, LEND # check if we reached LEND
bne a7, a3, 1f
rsr a3, LCOUNT # and LCOUNT != 0
beqz a3, 1f
addi a3, a3, -1 # decrement LCOUNT and set
#if XCHAL_HAVE_LOOPS
rsr a4, LEND # check if we reached LEND
bne a7, a4, 1f
rsr a4, LCOUNT # and LCOUNT != 0
beqz a4, 1f
addi a4, a4, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a3, LCOUNT
wsr a4, LCOUNT
#endif
1: wsr a7, EPC_1 # skip store instruction
......@@ -416,6 +411,7 @@ ENTRY(fast_unaligned)
/* Restore working register */
l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
......@@ -446,7 +442,7 @@ ENTRY(fast_unaligned)
mov a1, a2
rsr a0, PS
bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception
jx a0
......
......@@ -90,7 +90,6 @@ ENTRY(enable_coprocessor)
rsync
retw
#endif
ENTRY(save_coprocessor_extra)
entry sp, 16
......@@ -197,4 +196,5 @@ _xtensa_reginfo_tables:
XCHAL_CP7_SA_CONTENTS_LIBDB
.word 0xFC000000 /* invalid register number,marks end of table*/
_xtensa_reginfo_table_end:
#endif
......@@ -24,7 +24,7 @@
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <xtensa/coreasm.h>
#include <asm/tlbflush.h>
/* Unimplemented features. */
......@@ -364,7 +364,7 @@ common_exception:
movi a2, 1
extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
movi a2, PS_WOE_MASK
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
rsr a0, EXCCAUSE
xsr a3, PS
......@@ -399,7 +399,7 @@ common_exception_return:
/* Jump if we are returning from kernel exceptions. */
1: l32i a3, a1, PT_PS
_bbsi.l a3, PS_UM_SHIFT, 2f
_bbsi.l a3, PS_UM_BIT, 2f
j kernel_exception_exit
/* Specific to a user exception exit:
......@@ -422,7 +422,7 @@ common_exception_return:
* (Hint: There is only one user exception frame on stack)
*/
movi a3, PS_WOE_MASK
movi a3, 1 << PS_WOE_BIT
_bbsi.l a4, TIF_NEED_RESCHED, 3f
_bbci.l a4, TIF_SIGPENDING, 4f
......@@ -694,7 +694,7 @@ common_exception_exit:
ENTRY(debug_exception)
rsr a0, EPS + XCHAL_DEBUGLEVEL
bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode
bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
/* Set EPC_1 and EXCCAUSE */
......@@ -707,7 +707,7 @@ ENTRY(debug_exception)
/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
movi a2, 1 << PS_EXCM_SHIFT
movi a2, 1 << PS_EXCM_BIT
or a2, a0, a2
movi a0, debug_exception # restore a3, debug jump vector
wsr a2, PS
......@@ -715,7 +715,7 @@ ENTRY(debug_exception)
/* Switch to kernel/user stack, restore jump vector, and save a0 */
bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode
bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
addi a2, a1, -16-PT_SIZE # assume kernel stack
s32i a0, a2, PT_AREG0
......@@ -778,7 +778,7 @@ ENTRY(unrecoverable_exception)
wsr a1, WINDOWBASE
rsync
movi a1, PS_WOE_MASK | 1
movi a1, (1 << PS_WOE_BIT) | 1
wsr a1, PS
rsync
......@@ -1491,7 +1491,7 @@ ENTRY(_spill_registers)
*/
rsr a0, PS
_bbci.l a0, PS_UM_SHIFT, 1f
_bbci.l a0, PS_UM_BIT, 1f
/* User space: Setup a dummy frame and kill application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
......@@ -1510,7 +1510,7 @@ ENTRY(_spill_registers)
l32i a1, a3, EXC_TABLE_KSTK
wsr a3, EXCSAVE_1
movi a4, PS_WOE_MASK | 1
movi a4, (1 << PS_WOE_BIT) | 1
wsr a4, PS
rsync
......@@ -1612,7 +1612,7 @@ ENTRY(fast_second_level_miss)
rsr a1, PTEVADDR
srli a1, a1, PAGE_SHIFT
slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number
addi a1, a1, DTLB_WAY_PGD # ... + way_number
wdtlb a0, a1
dsync
......@@ -1654,7 +1654,7 @@ ENTRY(fast_second_level_miss)
mov a1, a2
rsr a2, PS
bbsi.l a2, PS_UM_SHIFT, 1f
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
......@@ -1753,7 +1753,7 @@ ENTRY(fast_store_prohibited)
mov a1, a2
rsr a2, PS
bbsi.l a2, PS_UM_SHIFT, 1f
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
......@@ -1924,7 +1924,7 @@ ENTRY(_switch_to)
/* Disable ints while we manipulate the stack pointer; spill regs. */
movi a5, PS_EXCM_MASK | LOCKLEVEL
movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL
xsr a5, PS
rsr a3, EXCSAVE_1
rsync
......
......@@ -15,9 +15,9 @@
* Kevin Chea
*/
#include <xtensa/cacheasm.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cacheasm.h>
/*
* This module contains the entry code for kernel images. It performs the
......@@ -32,13 +32,6 @@
*
*/
.macro iterate from, to , cmd
.ifeq ((\to - \from) & ~0xfff)
\cmd \from
iterate "(\from+1)", \to, \cmd
.endif
.endm
/*
* _start
*
......@@ -64,7 +57,7 @@ _startup:
/* Disable interrupts and exceptions. */
movi a0, XCHAL_PS_EXCM_MASK
movi a0, LOCKLEVEL
wsr a0, PS
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
......@@ -91,11 +84,11 @@ _startup:
movi a1, 15
wsr a0, ICOUNTLEVEL
.macro reset_dbreak num
wsr a0, DBREAKC + \num
.endm
iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
.set _index, 0
.rept XCHAL_NUM_DBREAK - 1
wsr a0, DBREAKC + _index
.set _index, _index + 1
.endr
#endif
/* Clear CCOUNT (not really necessary, but nice) */
......@@ -110,10 +103,11 @@ _startup:
/* Disable all timers. */
.macro reset_timer num
wsr a0, CCOMPARE_0 + \num
.endm
iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
.set _index, 0
.rept XCHAL_NUM_TIMERS - 1
wsr a0, CCOMPARE + _index
.set _index, _index + 1
.endr
/* Interrupt initialization. */
......@@ -139,12 +133,21 @@ _startup:
rsync
/* Initialize the caches.
* Does not include flushing writeback d-cache.
* a6, a7 are just working registers (clobbered).
* a2, a3 are just working registers (clobbered).
*/
icache_reset a2, a3
dcache_reset a2, a3
#if XCHAL_DCACHE_LINE_LOCKABLE
___unlock_dcache_all a2 a3
#endif
#if XCHAL_ICACHE_LINE_LOCKABLE
___unlock_icache_all a2 a3
#endif
___invalidate_dcache_all a2 a3
___invalidate_icache_all a2 a3
isync
/* Unpack data sections
*
......@@ -181,9 +184,9 @@ _startup:
movi a2, _bss_start # start of BSS
movi a3, _bss_end # end of BSS
1: addi a2, a2, 4
__loopt a2, a3, a4, 2
s32i a0, a2, 0
blt a2, a3, 1b
__endla a2, a4, 4
#if XCHAL_DCACHE_IS_WRITEBACK
......@@ -191,7 +194,7 @@ _startup:
* instructions/data are available.
*/
dcache_writeback_all a2, a3
___flush_dcache_all a2 a3
#endif
/* Setup stack and enable window exceptions (keep irqs disabled) */
......
/*
* arch/xtensa/kernel/pci-dma.c
* arch/xtensa/pci-dma.c
*
* DMA coherent memory allocation.
*
......@@ -29,28 +29,48 @@
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
{
void *ret;
unsigned long ret;
unsigned long uncached = 0;
/* ignore region speicifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (ret != NULL) {
memset(ret, 0, size);
*handle = virt_to_bus(ret);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
flag |= GFP_DMA;
ret = (unsigned long)__get_free_pages(flag, get_order(size));
if (ret == 0)
return NULL;
/* We currently don't support coherent memory outside KSEG */
if (ret < XCHAL_KSEG_CACHED_VADDR
|| ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
BUG();
if (ret != 0) {
memset((void*) ret, 0, size);
uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
*handle = virt_to_bus((void*)ret);
__flush_invalidate_dcache_range(ret, size);
}
return (void*) BYPASS_ADDR((unsigned long)ret);
return (void*)uncached;
}
void dma_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
BUG();
free_pages(addr, get_order(size));
}
......
// TODO verify coprocessor handling
/*
* arch/xtensa/kernel/process.c
*
......@@ -43,7 +42,7 @@
#include <asm/irq.h>
#include <asm/atomic.h>
#include <asm/asm-offsets.h>
#include <asm/coprocessor.h>
#include <asm/regs.h>
extern void ret_from_fork(void);
......@@ -67,25 +66,6 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
#if XCHAL_CP_NUM > 0
/*
* Coprocessor ownership.
*/
coprocessor_info_t coprocessor_info[] = {
{ 0, XTENSA_CPE_CP0_OFFSET },
{ 0, XTENSA_CPE_CP1_OFFSET },
{ 0, XTENSA_CPE_CP2_OFFSET },
{ 0, XTENSA_CPE_CP3_OFFSET },
{ 0, XTENSA_CPE_CP4_OFFSET },
{ 0, XTENSA_CPE_CP5_OFFSET },
{ 0, XTENSA_CPE_CP6_OFFSET },
{ 0, XTENSA_CPE_CP7_OFFSET },
};
#endif
/*
* Powermanagement idle function, if any is provided by the platform.
*/
......@@ -110,12 +90,10 @@ void cpu_idle(void)
void exit_thread(void)
{
release_coprocessors(current); /* Empty macro if no CPs are defined */
}
void flush_thread(void)
{
release_coprocessors(current); /* Empty macro if no CPs are defined */
}
/*
......@@ -275,7 +253,7 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
*/
elfregs->pc = regs->pc;
elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
elfregs->exccause = regs->exccause;
elfregs->excvaddr = regs->excvaddr;
elfregs->windowbase = regs->windowbase;
......@@ -325,7 +303,7 @@ void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
*/
regs->pc = elfregs->pc;
regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
regs->ps = (elfregs->ps | (1 << PS_EXCM_BIT));
regs->exccause = elfregs->exccause;
regs->excvaddr = elfregs->excvaddr;
regs->windowbase = elfregs->windowbase;
......@@ -459,16 +437,7 @@ int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
int
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
{
/* see asm/coprocessor.h for this magic number 16 */
#if XTENSA_CP_EXTRA_SIZE > 16
do_save_fpregs (r, regs, task);
/* For now, bit 16 means some extra state may be present: */
// FIXME!! need to track to return more accurate mask
return 0x10000 | XCHAL_CP_MASK;
#else
return 0; /* no coprocessors active on this processor */
#endif
}
/*
......
......@@ -96,7 +96,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Note: PS.EXCM is not set while user task is running;
* its being set in regs is for exception handling
* convenience. */
tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
break;
case REG_WB:
tmp = regs->windowbase;
......
......@@ -42,8 +42,6 @@
#include <asm/page.h>
#include <asm/setup.h>
#include <xtensa/config/system.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
#endif
......@@ -336,7 +334,7 @@ c_show(struct seq_file *f, void *slot)
/* high-level stuff */
seq_printf(f,"processor\t: 0\n"
"vendor_id\t: Tensilica\n"
"model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n"
"model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
"core ID\t\t: " XCHAL_CORE_ID "\n"
"build ID\t: 0x%x\n"
"byte order\t: %s\n"
......@@ -420,25 +418,6 @@ c_show(struct seq_file *f, void *slot)
XCHAL_NUM_TIMERS,
XCHAL_DEBUGLEVEL);
/* Coprocessors */
#if XCHAL_HAVE_CP
seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
#else
seq_printf(f, "coprocessors\t: none\n");
#endif
/* {I,D}{RAM,ROM} and XLMI */
seq_printf(f,"inst ROMs\t: %d\n"
"inst RAMs\t: %d\n"
"data ROMs\t: %d\n"
"data RAMs\t: %d\n"
"XLMI ports\t: %d\n",
XCHAL_NUM_IROM,
XCHAL_NUM_IRAM,
XCHAL_NUM_DROM,
XCHAL_NUM_DRAM,
XCHAL_NUM_XLMI);
/* Cache */
seq_printf(f,"icache line size: %d\n"
"icache ways\t: %d\n"
......@@ -466,24 +445,6 @@ c_show(struct seq_file *f, void *slot)
XCHAL_DCACHE_WAYS,
XCHAL_DCACHE_SIZE);
/* MMU */
seq_printf(f,"ASID bits\t: %d\n"
"ASID invalid\t: %d\n"
"ASID kernel\t: %d\n"
"rings\t\t: %d\n"
"itlb ways\t: %d\n"
"itlb AR ways\t: %d\n"
"dtlb ways\t: %d\n"
"dtlb AR ways\t: %d\n",
XCHAL_MMU_ASID_BITS,
XCHAL_MMU_ASID_INVALID,
XCHAL_MMU_ASID_KERNEL,
XCHAL_MMU_RINGS,
XCHAL_ITLB_WAYS,
XCHAL_ITLB_ARF_WAYS,
XCHAL_DTLB_WAYS,
XCHAL_DTLB_ARF_WAYS);
return 0;
}
......
......@@ -12,8 +12,8 @@
*
*/
#include <xtensa/config/core.h>
#include <xtensa/hal.h>
#include <asm/variant/core.h>
#include <asm/coprocessor.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
......@@ -216,8 +216,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
* handler, or the user mode value doesn't matter (e.g. PS.OWB).
*/
err |= __get_user(ps, &sc->sc_ps);
regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK)
| (ps & XCHAL_PS_CALLINC_MASK);
regs->ps = (regs->ps & ~PS_CALLINC_MASK)
| (ps & PS_CALLINC_MASK);
/* Additional corruption checks */
......@@ -280,7 +280,7 @@ flush_my_cpstate(struct task_struct *tsk)
static int
save_cpextra (struct _cpstate *buf)
{
#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0)
#if XCHAL_CP_NUM == 0
return 0;
#else
......@@ -497,8 +497,10 @@ gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
/* Flush generated code out of the data cache */
if (err == 0)
__flush_invalidate_cache_range((unsigned long)codemem, 6UL);
if (err == 0) {
__invalidate_icache_range((unsigned long)codemem, 6UL);
__flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
}
return err;
}
......
......@@ -175,8 +175,8 @@ void system_call (struct pt_regs *regs)
* interrupts in the first place:
*/
local_save_flags (ps);
local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
(regs->ps & XCHAL_PS_INTLEVEL_MASK) );
local_irq_restore((ps & ~PS_INTLEVEL_MASK) |
(regs->ps & PS_INTLEVEL_MASK) );
if (syscallnr > __NR_Linux_syscalls) {
regs->areg[2] = -ENOSYS;
......
......@@ -75,7 +75,7 @@ extern void system_call (struct pt_regs*);
#define USER 0x02
#define COPROCESSOR(x) \
{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
typedef struct {
int cause;
......@@ -85,38 +85,38 @@ typedef struct {
dispatch_init_table_t __init dispatch_init_table[] = {
{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
/* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#ifdef CONFIG_UNALIGNED_USER
{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
#else
{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
#endif
{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif
{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if (XCHAL_CP_MASK & 1)
COPROCESSOR(0),
......
......@@ -53,6 +53,8 @@
#include <asm/thread_info.h>
#include <asm/processor.h>
#define WINDOW_VECTORS_SIZE 0x180
/*
* User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
......@@ -210,7 +212,7 @@ ENTRY(_DoubleExceptionVector)
/* Check for kernel double exception (usually fatal). */
rsr a3, PS
_bbci.l a3, PS_UM_SHIFT, .Lksp
_bbci.l a3, PS_UM_BIT, .Lksp
/* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */
......@@ -219,7 +221,7 @@ ENTRY(_DoubleExceptionVector)
movi a3, XCHAL_WINDOW_VECTORS_VADDR
_bltu a0, a3, .Lfixup
addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE
addi a3, a3, WINDOW_VECTORS_SIZE
_bgeu a0, a3, .Lfixup
/* Window overflow/underflow exception. Get stack pointer. */
......@@ -245,7 +247,7 @@ ENTRY(_DoubleExceptionVector)
wsr a2, DEPC # save stack pointer temporarily
rsr a0, PS
extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
extui a0, a0, PS_OWB_SHIFT, 4
wsr a0, WINDOWBASE
rsync
......@@ -312,8 +314,8 @@ ENTRY(_DoubleExceptionVector)
.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
rsr a3, EXCCAUSE
beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f
addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS
beqi a3, EXCCAUSE_ITLB_MISS, 1f
addi a3, a3, -EXCCAUSE_DTLB_MISS
bnez a3, .Lunrecoverable
1: movi a3, fast_second_level_miss_double_kernel
jx a3
......
......@@ -16,20 +16,17 @@
#include <asm-generic/vmlinux.lds.h>
#define _NOCLANGUAGE
#undef __ASSEMBLER__
#include <xtensa/config/core.h>
#include <xtensa/config/system.h>
#include <asm/variant/core.h>
OUTPUT_ARCH(xtensa)
ENTRY(_start)
#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN
#ifdef __XTENSA_EB__
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
#define KERNELOFFSET 0x1000
#define KERNELOFFSET 0xd0001000
/* Note: In the following macros, it would be nice to specify only the
vector name and section kind and construct "sym" and "section" using
......@@ -76,7 +73,7 @@ jiffies = jiffies_64;
SECTIONS
{
. = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET;
. = KERNELOFFSET;
/* .text section */
_text = .;
......@@ -160,7 +157,7 @@ SECTIONS
/* Initialization code and data: */
. = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
. = ALIGN(1 << 12);
__init_begin = .;
.init.text : {
_sinittext = .;
......@@ -224,32 +221,32 @@ SECTIONS
.dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4,
XCHAL_DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL),
XCHAL_DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
XCHAL_KERNELEXC_VECTOR_VADDR - 4,
XCHAL_KERNEL_VECTOR_VADDR - 4,
SIZEOF(.DebugInterruptVector.text),
.DebugInterruptVector.text)
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
XCHAL_KERNELEXC_VECTOR_VADDR,
XCHAL_KERNEL_VECTOR_VADDR,
4,
.KernelExceptionVector.literal)
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
XCHAL_USEREXC_VECTOR_VADDR - 4,
XCHAL_USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
XCHAL_USEREXC_VECTOR_VADDR,
XCHAL_USER_VECTOR_VADDR,
4,
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
......@@ -264,7 +261,7 @@ SECTIONS
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
. = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
. = ALIGN(1 << 12);
__init_end = .;
......
......@@ -16,8 +16,7 @@
#include <asm/errno.h>
#include <linux/linkage.h>
#define _ASMLANGUAGE
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
......
......@@ -9,7 +9,7 @@
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
.macro src_b r, w0, w1
#ifdef __XTENSA_EB__
......
......@@ -11,7 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
/*
* void *memset(void *dst, int c, size_t length)
......
......@@ -11,7 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
#include <linux/errno.h>
/* Load or store instructions that may cause exceptions use the EX macro. */
......
......@@ -11,7 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
/* Load or store instructions that may cause exceptions use the EX macro. */
......
......@@ -53,7 +53,7 @@
* a11/ original length
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
#ifdef __XTENSA_EB__
#define ALIGN(R, W0, W1) src R, W0, W1
......
......@@ -21,7 +21,7 @@
#include <asm/system.h>
#include <asm/pgalloc.h>
unsigned long asid_cache = ASID_FIRST_VERSION;
unsigned long asid_cache = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
/*
......@@ -58,10 +58,10 @@ void do_page_fault(struct pt_regs *regs)
return;
}
is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
#if 0
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
......
......@@ -141,8 +141,8 @@ void __init bootmem_init(void)
if (min_low_pfn > max_pfn)
panic("No memory found!\n");
max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
/* Find an area to use for the bootmem bitmap. */
......@@ -215,7 +215,7 @@ void __init init_mmu (void)
/* Set rasid register to a known value. */
set_rasid_register (ASID_ALL_RESERVED);
set_rasid_register (ASID_USER_FIRST);
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
......
......@@ -19,9 +19,8 @@
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <xtensa/cacheasm.h>
#include <xtensa/cacheattrasm.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
/* clear_page (page) */
......@@ -74,104 +73,66 @@ ENTRY(copy_page)
retw
/*
* void __flush_invalidate_cache_all(void)
* void __invalidate_icache_page(ulong start)
*/
ENTRY(__flush_invalidate_cache_all)
ENTRY(__invalidate_icache_page)
entry sp, 16
dcache_writeback_inv_all a2, a3
icache_invalidate_all a2, a3
retw
/*
* void __invalidate_icache_all(void)
*/
___invalidate_icache_page a2 a3
isync
ENTRY(__invalidate_icache_all)
entry sp, 16
icache_invalidate_all a2, a3
retw
/*
* void __flush_invalidate_dcache_all(void)
* void __invalidate_dcache_page(ulong start)
*/
ENTRY(__flush_invalidate_dcache_all)
ENTRY(__invalidate_dcache_page)
entry sp, 16
dcache_writeback_inv_all a2, a3
retw
/*
* void __flush_invalidate_cache_range(ulong start, ulong size)
*/
___invalidate_dcache_page a2 a3
dsync
ENTRY(__flush_invalidate_cache_range)
entry sp, 16
mov a4, a2
mov a5, a3
dcache_writeback_inv_region a4, a5, a6
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_icache_page(ulong start)
* void __flush_invalidate_dcache_page(ulong start)
*/
ENTRY(__invalidate_icache_page)
ENTRY(__flush_invalidate_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_dcache_page(ulong start)
*/
___flush_invalidate_dcache_page a2 a3
ENTRY(__invalidate_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
dcache_invalidate_region a2, a3, a4
dsync
retw
/*
* void __invalidate_icache_range(ulong start, ulong size)
* void __flush_dcache_page(ulong start)
*/
ENTRY(__invalidate_icache_range)
ENTRY(__flush_dcache_page)
entry sp, 16
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_dcache_range(ulong start, ulong size)
*/
___flush_dcache_page a2 a3
ENTRY(__invalidate_dcache_range)
entry sp, 16
dcache_invalidate_region a2, a3, a4
dsync
retw
/*
* void __flush_dcache_page(ulong start)
*/
ENTRY(__flush_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
dcache_writeback_region a2, a3, a4
retw
/*
* void __flush_invalidate_dcache_page(ulong start)
* void __invalidate_icache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_page)
ENTRY(__invalidate_icache_range)
entry sp, 16
movi a3, PAGE_SIZE
dcache_writeback_inv_region a2, a3, a4
___invalidate_icache_range a2 a3 a4
isync
retw
/*
......@@ -180,195 +141,69 @@ ENTRY(__flush_invalidate_dcache_page)
ENTRY(__flush_invalidate_dcache_range)
entry sp, 16
dcache_writeback_inv_region a2, a3, a4
retw
/*
* void __invalidate_dcache_all(void)
*/
___flush_invalidate_dcache_range a2 a3 a4
dsync
ENTRY(__invalidate_dcache_all)
entry sp, 16
dcache_invalidate_all a2, a3
retw
/*
* void __flush_invalidate_dcache_page_phys(ulong start)
* void _flush_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_page_phys)
ENTRY(__flush_dcache_range)
entry sp, 16
movi a3, XCHAL_DCACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
___flush_dcache_range a2 a3 a4
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
2: diwbi a3, 0
bgeui a3, 2, 1b
retw
ENTRY(check_dcache_low0)
entry sp, 16
movi a3, XCHAL_DCACHE_SIZE / 4
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
2: j 2b
ENTRY(check_dcache_high0)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE / 2
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
/*
* void _invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(check_dcache_low1)
ENTRY(__invalidate_dcache_range)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE * 3 / 4
movi a4, PAGE_MASK | 1
addi a2, a2, 1
___invalidate_dcache_range a2 a3 a4
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
/*
* void _invalidate_icache_all(void)
*/
ENTRY(check_dcache_high1)
ENTRY(__invalidate_icache_all)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
___invalidate_icache_all a2 a3
isync
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
/*
* void __invalidate_icache_page_phys(ulong start)
* void _flush_invalidate_dcache_all(void)
*/
ENTRY(__invalidate_icache_page_phys)
ENTRY(__flush_invalidate_dcache_all)
entry sp, 16
movi a3, XCHAL_ICACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_ICACHE_LINESIZE
lict a6, a3
isync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
___flush_invalidate_dcache_all a2 a3
dsync
2: iii a3, 0
bgeui a3, 2, 1b
retw
/*
* void _invalidate_dcache_all(void)
*/
#if 0
movi a3, XCHAL_DCACHE_WAYS - 1
movi a4, PAGE_SIZE
1: mov a5, a2
add a6, a2, a4
2: diwbi a5, 0
diwbi a5, XCHAL_DCACHE_LINESIZE
diwbi a5, XCHAL_DCACHE_LINESIZE * 2
diwbi a5, XCHAL_DCACHE_LINESIZE * 3
addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
blt a5, a6, 2b
addi a3, a3, -1
addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
bgez a3, 1b
retw
ENTRY(__invalidate_icache_page_index)
ENTRY(__invalidate_dcache_all)
entry sp, 16
movi a3, XCHAL_ICACHE_WAYS - 1
movi a4, PAGE_SIZE
1: mov a5, a2
add a6, a2, a4
2: iii a5, 0
iii a5, XCHAL_ICACHE_LINESIZE
iii a5, XCHAL_ICACHE_LINESIZE * 2
iii a5, XCHAL_ICACHE_LINESIZE * 3
addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
blt a5, a6, 2b
addi a3, a3, -1
addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
bgez a3, 2b
___invalidate_dcache_all a2 a3
dsync
retw
#endif
This diff is collapsed.
......@@ -25,11 +25,15 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <xtensa/simcall.h>
#include <asm/platform/simcall.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#ifdef SERIAL_INLINE
#define _INLINE_ inline
#endif
#define SERIAL_MAX_NUM_LINES 1
#define SERIAL_TIMER_VALUE (20 * HZ)
......@@ -191,7 +195,7 @@ static int rs_read_proc(char *page, char **start, off_t off, int count,
}
static const struct tty_operations serial_ops = {
static struct tty_operations serial_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
......
......@@ -34,7 +34,7 @@
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <xtensa/simcall.h>
#include <asm/platform/simcall.h>
#define DRIVER_NAME "iss-netdev"
#define ETH_MAX_PACKET 1500
......
/*
* include/asm-xtensa/asmmacro.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ASMMACRO_H
#define _XTENSA_ASMMACRO_H
#include <asm/variant/core.h>
/*
* Some little helpers for loops. Use zero-overhead-loops
* where applicable and if supported by the processor.
*
* __loopi ar, at, size, inc
* ar register initialized with the start address
* at scratch register used by macro
* size size immediate value
* inc increment
*
* __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
* ar register initialized with the start address
* as register initialized with the size
* at scratch register use by macro
* inc_log2 increment [in log2]
* mask_log2 mask [in log2]
* cond true condition (used in loop'cond')
* ncond false condition (used in b'ncond')
*
* __loop as
* restart loop. 'as' register must not have been modified!
*
* __endla ar, at, incr
* ar start address (modified)
* as scratch register used by macro
* inc increment
*/
/*
* loop for given size as immediate
*/
.macro __loopi ar, at, size, incr
#if XCHAL_HAVE_LOOPS
movi \at, ((\size + \incr - 1) / (\incr))
loop \at, 99f
#else
addi \at, \ar, \size
98:
#endif
.endm
/*
* loop for given size in register
*/
.macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
#if XCHAL_HAVE_LOOPS
.ifgt \incr_log2 - 1
addi \at, \as, (1 << \incr_log2) - 1
.ifnc \mask_log2,
extui \at, \at, \incr_log2, \mask_log2
.else
srli \at, \at, \incr_log2
.endif
.endif
loop\cond \at, 99f
#else
.ifnc \mask_log2,
extui \at, \as, \incr_log2, \mask_log2
.else
.ifnc \ncond,
srli \at, \as, \incr_log2
.endif
.endif
.ifnc \ncond,
b\ncond \at, 99f
.endif
.ifnc \mask_log2,
slli \at, \at, \incr_log2
add \at, \ar, \at
.else
add \at, \ar, \as
.endif
#endif
98:
.endm
/*
* loop from ar to ax
*/
.macro __loopt ar, as, at, incr_log2
#if XCHAL_HAVE_LOOPS
sub \at, \as, \ar
.ifgt \incr_log2 - 1
addi \at, \at, (1 << \incr_log2) - 1
srli \at, \at, \incr_log2
.endif
loop \at, 99f
#else
98:
#endif
.endm
/*
* restart loop. registers must be unchanged
*/
.macro __loop as
#if XCHAL_HAVE_LOOPS
loop \as, 99f
#else
98:
#endif
.endm
/*
* end of loop with no increment of the address.
*/
.macro __endl ar, as
#if !XCHAL_HAVE_LOOPS
bltu \ar, \as, 98b
#endif
99:
.endm
/*
* end of loop with increment of the address.
*/
.macro __endla ar, as, incr
addi \ar, \ar, \incr
__endl \ar \as
.endm
#endif /* _XTENSA_ASMMACRO_H */
......@@ -11,7 +11,6 @@
#ifndef _XTENSA_BYTEORDER_H
#define _XTENSA_BYTEORDER_H
#include <asm/processor.h>
#include <asm/types.h>
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
......
......@@ -4,7 +4,6 @@
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* 2 of the License, or (at your option) any later version.
*
* (C) 2001 - 2005 Tensilica Inc.
*/
......@@ -12,21 +11,14 @@
#ifndef _XTENSA_CACHE_H
#define _XTENSA_CACHE_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#if XCHAL_ICACHE_SIZE > 0
# if (XCHAL_ICACHE_SIZE % (XCHAL_ICACHE_LINESIZE*XCHAL_ICACHE_WAYS*4)) != 0
# error cache configuration outside expected/supported range!
# endif
#endif
#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#if XCHAL_DCACHE_SIZE > 0
# if (XCHAL_DCACHE_SIZE % (XCHAL_DCACHE_LINESIZE*XCHAL_DCACHE_WAYS*4)) != 0
# error cache configuration outside expected/supported range!
# endif
#endif
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
#define L1_CACHE_SHIFT XCHAL_CACHE_LINEWIDTH_MAX
#define L1_CACHE_BYTES XCHAL_CACHE_LINESIZE_MAX
#endif /* _XTENSA_CACHE_H */
/*
* include/asm-xtensa/cacheasm.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Tensilica Inc.
*/
#include <asm/cache.h>
#include <asm/asmmacro.h>
#include <linux/stringify.h>
/*
* Define cache functions as macros here so that they can be used
* by the kernel and boot loader. We should consider moving them to a
* library that can be linked by both.
*
* Locking
*
* ___unlock_dcache_all
* ___unlock_icache_all
*
* Flush and invaldating
*
* ___flush_invalidate_dcache_{all|range|page}
* ___flush_dcache_{all|range|page}
* ___invalidate_dcache_{all|range|page}
* ___invalidate_icache_{all|range|page}
*
*/
.macro __loop_cache_all ar at insn size line_width
movi \ar, 0
__loopi \ar, \at, \size, (4 << (\line_width))
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
.macro __loop_cache_range ar as at insn line_width
extui \at, \ar, 0, \line_width
add \as, \as, \at
__loops \ar, \as, \at, \line_width
\insn \ar, 0
__endla \ar, \at, (1 << (\line_width))
.endm
.macro __loop_cache_page ar at insn line_width
__loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
#if XCHAL_DCACHE_LINE_LOCKABLE
.macro ___unlock_dcache_all ar at
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
#endif
#if XCHAL_ICACHE_LINE_LOCKABLE
.macro ___unlock_icache_all ar at
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
.endm
#endif
.macro ___flush_invalidate_dcache_all ar at
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_all ar at
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_all ar at
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_all ar at
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
.endm
.macro ___flush_invalidate_dcache_range ar as at
__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_range ar as at
__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_range ar as at
__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_range ar as at
__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
.endm
.macro ___flush_invalidate_dcache_page ar as
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_page ar as
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_page ar as
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_page ar as
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm
......@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) 2001 - 2005 Tensilica Inc.
* (C) 2001 - 2006 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
......
......@@ -12,7 +12,7 @@
#define _XTENSA_CHECKSUM_H
#include <linux/in6.h>
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
/*
* computes the checksum of a memory block at buff, length len,
......
......@@ -11,7 +11,16 @@
#ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#include <asm/variant/tie.h>
#if !XCHAL_HAVE_CP
#define XTENSA_CP_EXTRA_OFFSET 0
#define XTENSA_CP_EXTRA_ALIGN 1 /* must be a power of 2 */
#define XTENSA_CP_EXTRA_SIZE 0
#else
#define XTOFS(last_start,last_size,align) \
((last_start+last_size+align-1) & -align)
......@@ -67,4 +76,6 @@ extern void save_coprocessor_registers(void*, int);
# endif
#endif
#endif
#endif /* _XTENSA_COPROCESSOR_H */
......@@ -12,7 +12,6 @@
#define _XTENSA_DMA_H
#include <asm/io.h> /* need byte IO */
#include <xtensa/config/core.h>
/*
* This is only to be defined if we have PC-like DMA.
......@@ -44,7 +43,9 @@
* enters another area, and virt_to_phys() may not return
* the value desired).
*/
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KSEG_CACHED_SIZE - 1)
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
/* Reserve and release a DMA channel */
extern int request_dma(unsigned int dmanr, const char * device_id);
......
......@@ -13,9 +13,8 @@
#ifndef _XTENSA_ELF_H
#define _XTENSA_ELF_H
#include <asm/variant/core.h>
#include <asm/ptrace.h>
#include <asm/coprocessor.h>
#include <xtensa/config/core.h>
/* Xtensa processor ELF architecture-magic number */
......@@ -118,11 +117,15 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
* using memcpy(). But we do allow space for such alignment,
* to allow optimizations of layout and copying.
*/
#if 0
#define TOTAL_FPREGS_SIZE \
(4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE)
#define ELF_NFPREG \
((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t))
#else
#define TOTAL_FPREGS_SIZE 0
#define ELF_NFPREG 0
#endif
typedef unsigned int elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
......
/*
* include/asm-xtensa/fixmap.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_FIXMAP_H
#define _XTENSA_FIXMAP_H
#include <asm/processor.h>
#ifdef CONFIG_MMU
/*
* Here we define all the compile-time virtual addresses.
*/
#if XCHAL_SEG_MAPPABLE_VADDR != 0
# error "Current port requires virtual user space starting at 0"
#endif
#if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000
# error "Current port requires at least 0x8000000 bytes for user space"
#endif
/* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */
#define __IN_VMALLOC(addr) \
(((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END))
#define __SPAN_VMALLOC(start,end) \
(((start) < VMALLOC_START) && ((end) >= VMALLOC_END))
#define INSIDE_VMALLOC(start,end) \
(__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end)))
#if XCHAL_NUM_INSTROM
# if XCHAL_NUM_INSTROM == 1
# if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE)
# error vmalloc range conflicts with instrom0
# endif
# endif
# if XCHAL_NUM_INSTROM == 2
# if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE)
# error vmalloc range conflicts with instrom1
# endif
# endif
#endif
#if XCHAL_NUM_INSTRAM
# if XCHAL_NUM_INSTRAM == 1
# if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE)
# error vmalloc range conflicts with instram0
# endif
# endif
# if XCHAL_NUM_INSTRAM == 2
# if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE)
# error vmalloc range conflicts with instram1
# endif
# endif
#endif
#if XCHAL_NUM_DATAROM
# if XCHAL_NUM_DATAROM == 1
# if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE)
# error vmalloc range conflicts with datarom0
# endif
# endif
# if XCHAL_NUM_DATAROM == 2
# if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE)
# error vmalloc range conflicts with datarom1
# endif
# endif
#endif
#if XCHAL_NUM_DATARAM
# if XCHAL_NUM_DATARAM == 1
# if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE)
# error vmalloc range conflicts with dataram0
# endif
# endif
# if XCHAL_NUM_DATARAM == 2
# if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE)
# error vmalloc range conflicts with dataram1
# endif
# endif
#endif
#if XCHAL_NUM_XLMI
# if XCHAL_NUM_XLMI == 1
# if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE)
# error vmalloc range conflicts with xlmi0
# endif
# endif
# if XCHAL_NUM_XLMI == 2
# if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE)
# error vmalloc range conflicts with xlmi1
# endif
# endif
#endif
#if (XCHAL_NUM_INSTROM > 2) || \
(XCHAL_NUM_INSTRAM > 2) || \
(XCHAL_NUM_DATARAM > 2) || \
(XCHAL_NUM_DATAROM > 2) || \
(XCHAL_NUM_XLMI > 2)
# error Insufficient checks on vmalloc above for more than 2 devices
#endif
/*
* USER_VM_SIZE does not necessarily equal TASK_SIZE. We bumped
* TASK_SIZE down to 0x4000000 to simplify the handling of windowed
* call instructions (currently limited to a range of 1 GByte). User
* tasks may very well reclaim the VM space from 0x40000000 to
* 0x7fffffff in the future, so we do not want the kernel becoming
* accustomed to having any of its stuff (e.g., page tables) in this
* region. This VM region is no-man's land for now.
*/
#define USER_VM_START XCHAL_SEG_MAPPABLE_VADDR
#define USER_VM_SIZE 0x80000000
/* Size of page table: */
#define PGTABLE_SIZE_BITS (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2)
#define PGTABLE_SIZE (1L << PGTABLE_SIZE_BITS)
/* All kernel-mappable space: */
#define KERNEL_ALLMAP_START (USER_VM_START + USER_VM_SIZE)
#define KERNEL_ALLMAP_SIZE (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START)
/* Carve out page table at start of kernel-mappable area: */
#if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE
#error "Gimme some space for page table!"
#endif
#define PGTABLE_START KERNEL_ALLMAP_START
/* Remaining kernel-mappable space: */
#define KERNEL_MAPPED_START (KERNEL_ALLMAP_START + PGTABLE_SIZE)
#define KERNEL_MAPPED_SIZE (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE)
#if KERNEL_MAPPED_SIZE < 0x01000000 /* 16 MB is arbitrary for now */
# error "Shouldn't the kernel have at least *some* mappable space?"
#endif
#define MAX_LOW_MEMORY XCHAL_KSEG_CACHED_SIZE
#endif
/*
* Some constants used elsewhere, but perhaps only in Xtensa header
* files, so maybe we can get rid of some and access compile-time HAL
* directly...
*
* Note: We assume that system RAM is located at the very start of the
* kernel segments !!
*/
#define KERNEL_VM_LOW XCHAL_KSEG_CACHED_VADDR
#define KERNEL_VM_HIGH XCHAL_KSEG_BYPASS_VADDR
#define KERNEL_SPACE XCHAL_KSEG_CACHED_VADDR
/*
* Returns the physical/virtual addresses of the kernel space
* (works with the cached kernel segment only, which is the
* one normally used for kernel operation).
*/
/* PHYSICAL BYPASS CACHED
*
* bypass vaddr bypass paddr * cached vaddr
* cached vaddr cached paddr bypass vaddr *
* bypass paddr * bypass vaddr cached vaddr
* cached paddr * bypass vaddr cached vaddr
* other * * *
*/
#define PHYSADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR : \
(unsigned)(a))
#define BYPASS_ADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)? \
(unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR: \
(unsigned)(a))
#define CACHED_ADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \
(unsigned)(a))
#define PHYSADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR : \
(unsigned)(a))
#define BYPASS_ADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \
(unsigned)(a))
#define CACHED_ADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \
(unsigned)(a))
#endif /* _XTENSA_ADDRSPACE_H */
/*
* linux/include/asm-xtensa/io.h
* include/asm-xtensa/io.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
......@@ -15,10 +15,11 @@
#include <asm/byteorder.h>
#include <linux/types.h>
#include <asm/fixmap.h>
#define _IO_BASE 0
#define XCHAL_KIO_CACHED_VADDR 0xf0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf8000000
#define XCHAL_KIO_PADDR 0xf0000000
#define XCHAL_KIO_SIZE 0x08000000
/*
* swap functions to change byte order from little-endian to big-endian and
......@@ -42,40 +43,43 @@ static inline unsigned int _swapl (unsigned int v)
static inline unsigned long virt_to_phys(volatile void * address)
{
return PHYSADDR((unsigned long)address);
return __pa(address);
}
static inline void * phys_to_virt(unsigned long address)
{
return (void*) CACHED_ADDR(address);
return __va(address);
}
/*
* IO bus memory addresses are also 1:1 with the physical address
* virt_to_bus and bus_to_virt are deprecated.
*/
static inline unsigned long virt_to_bus(volatile void * address)
{
return PHYSADDR((unsigned long)address);
}
static inline void * bus_to_virt (unsigned long address)
{
return (void *) CACHED_ADDR(address);
}
#define virt_to_bus(x) virt_to_phys(x)
#define bus_to_virt(x) phys_to_virt(x)
/*
* Change "struct page" to physical address.
* Return the virtual (cached) address for the specified bus memory.
* Note that we currently don't support any address outside the KIO segment.
*/
static inline void *ioremap(unsigned long offset, unsigned long size)
{
return (void *) CACHED_ADDR_IO(offset);
if (offset >= XCHAL_KIO_PADDR
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
BUG();
}
static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{
return (void *) BYPASS_ADDR_IO(offset);
if (offset >= XCHAL_KIO_PADDR
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
BUG();
}
static inline void iounmap(void *addr)
......@@ -121,9 +125,6 @@ static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
*(__force volatile __u32 *)(addr) = b;
}
/* These are the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl, the "string" versions
* insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
......@@ -131,11 +132,11 @@ static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
* The macros don't do byte-swapping.
*/
#define inb(port) readb((u8 *)((port)+_IO_BASE))
#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)+_IO_BASE))
#define inw(port) readw((u16 *)((port)+_IO_BASE))
#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)+_IO_BASE))
#define inl(port) readl((u32 *)((port)+_IO_BASE))
#define inb(port) readb((u8 *)((port)))
#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)))
#define inw(port) readw((u16 *)((port)))
#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)))
#define inl(port) readl((u32 *)((port)))
#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
#define inb_p(port) inb((port))
......@@ -180,14 +181,13 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
/*
* * Convert a physical pointer to a virtual kernel pointer for /dev/mem
* * access
* */
* Convert a physical pointer to a virtual kernel pointer for /dev/mem access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* * Convert a virtual cached pointer to an uncached pointer
* */
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
......
......@@ -12,8 +12,7 @@
#define _XTENSA_IRQ_H
#include <asm/platform/hardware.h>
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
......@@ -27,10 +26,5 @@ static __inline__ int irq_canonicalize(int irq)
}
struct irqaction;
#if 0 // FIXME
extern void disable_irq_nosync(unsigned int);
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
#endif
#endif /* _XTENSA_IRQ_H */
This diff is collapsed.
......@@ -15,18 +15,24 @@
#include <asm/processor.h>
#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
#define XCHAL_KSEG_PADDR 0x00000000
#define XCHAL_KSEG_SIZE 0x08000000
/*
* PAGE_SHIFT determines the page size
* PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
*/
#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#define PGTABLE_START 0x80000000
#ifdef __ASSEMBLY__
......
......@@ -11,7 +11,7 @@
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#ifdef __KERNEL__
# define HZ 100 /* internal timer frequency */
......
......@@ -14,45 +14,6 @@
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
/* Assertions. */
#ifdef CONFIG_MMU
#if (XCHAL_MMU_RINGS < 2)
# error Linux build assumes at least 2 ring levels.
#endif
#if (XCHAL_MMU_CA_BITS != 4)
# error We assume exactly four bits for CA.
#endif
#if (XCHAL_MMU_SR_BITS != 0)
# error We have no room for SR bits.
#endif
/*
* Use the first min-wired way for mapping page-table pages.
* Page coloring requires a second min-wired way.
*/
#if (XCHAL_DTLB_MINWIRED_SETS == 0)
# error Need a min-wired way for mapping page-table pages
#endif
#define DTLB_WAY_PGTABLE XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAY)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
# if XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAYS) >= 2
# define DTLB_WAY_DCACHE_ALIAS0 (DTLB_WAY_PGTABLE + 1)
# define DTLB_WAY_DCACHE_ALIAS1 (DTLB_WAY_PGTABLE + 2)
# else
# error Page coloring requires its own wired dtlb way!
# endif
#endif
#endif /* CONFIG_MMU */
/*
* We only use two ring levels, user and kernel space.
*/
......@@ -97,7 +58,7 @@
#define PGD_ORDER 0
#define PMD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS XCHAL_SEG_MAPPABLE_VADDR
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
/* virtual memory area. We keep a distance to other memory regions to be
......
......@@ -12,18 +12,18 @@
* This file contains the default configuration of ISS.
*/
#ifndef __ASM_XTENSA_ISS_HARDWARE
#define __ASM_XTENSA_ISS_HARDWARE
#ifndef _XTENSA_PLATFORM_ISS_HARDWARE_H
#define _XTENSA_PLATFORM_ISS_HARDWARE_H
/*
* Memory configuration.
*/
#define PLATFORM_DEFAULT_MEM_START XSHAL_RAM_PADDR
#define PLATFORM_DEFAULT_MEM_SIZE XSHAL_RAM_VSIZE
#define PLATFORM_DEFAULT_MEM_START 0x00000000
#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
/*
* Interrupt configuration.
*/
#endif /* __ASM_XTENSA_ISS_HARDWARE */
#endif /* _XTENSA_PLATFORM_ISS_HARDWARE_H */
#ifndef SIMCALL_INCLUDED
#define SIMCALL_INCLUDED
/*
* THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
*
* include/asm-xtensa/xtensa/simcall.h - Simulator call numbers
* include/asm-xtensa/platform-iss/hardware.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002 Tensilica Inc.
* Copyright (C) 2001 Tensilica Inc.
*/
#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_H
#define _XTENSA_PLATFORM_ISS_SIMCALL_H
/*
* System call like services offered by the simulator host.
* These are modeled after the Linux 2.4 kernel system calls
* for Xtensa processors. However not all system calls and
* not all functionality of a given system call are implemented,
* or necessarily have well defined or equivalent semantics in
* the context of a simulation (as opposed to a Unix kernel).
*
* These services behave largely as if they had been invoked
* as a task in the simulator host's operating system
* (eg. files accessed are those of the simulator host).
* However, these SIMCALLs model a virtual operating system
* so that various definitions, bit assignments etc
* (eg. open mode bits, errno values, etc) are independent
* of the host operating system used to run the simulation.
* Rather these definitions are specific to the Xtensa ISS.
* This way Xtensa ISA code written to use these SIMCALLs
* can (in principle) be simulated on any host.
*
* Up to 6 parameters are passed in registers a3 to a8
* (note the 6th parameter isn't passed on the stack,
* unlike windowed function calling conventions).
* The return value is in a2. A negative value in the
* range -4096 to -1 indicates a negated error code to be
* reported in errno with a return value of -1, otherwise
* the value in a2 is returned as is.
*/
/* These #defines need to match what's in Xtensa/OS/vxworks/xtiss/simcalls.c */
#define SYS_nop 0 /* n/a - setup; used to flush register windows */
#define SYS_nop 0 /* unused */
#define SYS_exit 1 /*x*/
#define SYS_fork 2
#define SYS_read 3 /*x*/
......@@ -77,54 +49,14 @@
#define SYS_bind 30
#define SYS_ioctl 31
/*
* Other...
*/
#define SYS_iss_argc 1000 /* returns value of argc */
#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
/*
* SIMCALLs for the ferret memory debugger. All are invoked by
* libferret.a ... ( Xtensa/Target-Libs/ferret )
*/
#define SYS_ferret 1010
#define SYS_malloc 1011
#define SYS_free 1012
#define SYS_more_heap 1013
#define SYS_no_heap 1014
/*
* Extra SIMCALLs for GDB:
*/
#define SYS_gdb_break -1 /* invoked by XTOS on user exceptions if EPC points
to a break.n/break, regardless of cause! */
#define SYS_xmon_out -2 /* invoked by XMON: ... */
#define SYS_xmon_in -3 /* invoked by XMON: ... */
#define SYS_xmon_flush -4 /* invoked by XMON: ... */
#define SYS_gdb_abort -5 /* invoked by XTOS in _xtos_panic() */
#define SYS_gdb_illegal_inst -6 /* invoked by XTOS for illegal instructions (too deeply) */
#define SYS_xmon_init -7 /* invoked by XMON: ... */
#define SYS_gdb_enter_sktloop -8 /* invoked by XTOS on debug exceptions */
/*
* SIMCALLs for vxWorks xtiss BSP:
*/
#define SYS_setup_ppp_pipes -83
#define SYS_log_msg -84
/*
* Test SIMCALLs:
*/
#define SYS_test_write_state -100
#define SYS_test_read_state -101
/*
* SYS_select_one specifiers
*/
#define XTISS_SELECT_ONE_READ 1
#define XTISS_SELECT_ONE_WRITE 2
#define XTISS_SELECT_ONE_EXCEPT 3
#endif /* !SIMCALL_INCLUDED */
#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
......@@ -11,24 +11,18 @@
#ifndef _XTENSA_PROCESSOR_H
#define _XTENSA_PROCESSOR_H
#ifdef __ASSEMBLY__
#define _ASMLANGUAGE
#endif
#include <xtensa/config/core.h>
#include <xtensa/config/specreg.h>
#include <xtensa/config/tie.h>
#include <xtensa/config/system.h>
#include <asm/variant/core.h>
#include <asm/coprocessor.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/coprocessor.h>
#include <asm/regs.h>
/* Assertions. */
#if (XCHAL_HAVE_WINDOWED != 1)
#error Linux requires the Xtensa Windowed Registers Option.
# error Linux requires the Xtensa Windowed Registers Option.
#endif
/*
......@@ -145,11 +139,11 @@ struct thread_struct {
* Note: We set-up ps as if we did a call4 to the new pc.
* set_thread_state in signal.c depends on it.
*/
#define USER_PS_VALUE ( (1 << XCHAL_PS_WOE_SHIFT) + \
(1 << XCHAL_PS_CALLINC_SHIFT) + \
(USER_RING << XCHAL_PS_RING_SHIFT) + \
(1 << XCHAL_PS_PROGSTACK_SHIFT) + \
(1 << XCHAL_PS_EXCM_SHIFT) )
#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
(1 << PS_CALLINC_SHIFT) | \
(USER_RING << PS_RING_SHIFT) | \
(1 << PS_UM_BIT) | \
(1 << PS_EXCM_BIT))
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
......
......@@ -11,7 +11,7 @@
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
/*
* Kernel stack
......
/*
* Xtensa Special Register symbolic names
*/
/* $Id: specreg.h,v 1.2 2003/03/07 19:15:18 joetaylor Exp $ */
/*
* Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
* Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2.1 of the GNU Lesser General Public
......@@ -28,18 +22,20 @@
* USA.
*/
#ifndef XTENSA_SPECREG_H
#define XTENSA_SPECREG_H
#ifndef _XTENSA_REGS_H
#define _XTENSA_REGS_H
/* Include these special register bitfield definitions, for historical reasons: */
#include <xtensa/corebits.h>
/* Special registers. */
/* Special registers: */
#define LBEG 0
#define LEND 1
#define LCOUNT 2
#define SAR 3
#define BR 4
#define SCOMPARE1 12
#define ACCHI 16
#define ACCLO 17
#define MR 32
#define WINDOWBASE 72
#define WINDOWSTART 73
#define PTEVADDR 83
......@@ -48,52 +44,95 @@
#define DTLBCFG 92
#define IBREAKENABLE 96
#define DDR 104
#define IBREAKA_0 128
#define IBREAKA_1 129
#define DBREAKA_0 144
#define DBREAKA_1 145
#define DBREAKC_0 160
#define DBREAKC_1 161
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPC_1 177
#define EPC_2 178
#define EPC_3 179
#define EPC_4 180
#define DEPC 192
#define EPS_2 194
#define EPS_3 195
#define EPS_4 196
#define EPS 192
#define EPS_1 193
#define EXCSAVE 208
#define EXCSAVE_1 209
#define EXCSAVE_2 210
#define EXCSAVE_3 211
#define EXCSAVE_4 212
#define INTERRUPT 226
#define INTENABLE 228
#define PS 230
#define THREADPTR 231
#define EXCCAUSE 232
#define DEBUGCAUSE 233
#define CCOUNT 234
#define PRID 235
#define ICOUNT 236
#define ICOUNTLEVEL 237
#define EXCVADDR 238
#define CCOMPARE_0 240
#define CCOMPARE_1 241
#define CCOMPARE_2 242
#define MISC_REG_0 244
#define MISC_REG_1 245
/* Special cases (bases of special register series): */
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPS 192
#define EXCSAVE 208
#define CCOMPARE 240
#define MISC 244
/* Special names for read-only and write-only interrupt registers. */
/* Special names for read-only and write-only interrupt registers: */
#define INTREAD 226
#define INTSET 226
#define INTCLEAR 227
#endif /* XTENSA_SPECREG_H */
/* EXCCAUSE register fields */
#define EXCCAUSE_EXCCAUSE_SHIFT 0
#define EXCCAUSE_EXCCAUSE_MASK 0x3F
#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
#define EXCCAUSE_SYSTEM_CALL 1
#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
#define EXCCAUSE_LOAD_STORE_ERROR 3
#define EXCCAUSE_LEVEL1_INTERRUPT 4
#define EXCCAUSE_ALLOCA 5
#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
#define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9
#define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18
#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
#define EXCCAUSE_DTLB_MISS 24
#define EXCCAUSE_DTLB_MULTIHIT 25
#define EXCCAUSE_DTLB_PRIVILEGE 26
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
#define EXCCAUSE_FLOATING_POINT 40
/* PS register fields. */
#define PS_WOE_BIT 18
#define PS_CALLINC_SHIFT 16
#define PS_CALLINC_MASK 0x00030000
#define PS_OWB_SHIFT 8
#define PS_OWB_MASK 0x00000F00
#define PS_RING_SHIFT 6
#define PS_RING_MASK 0x000000C0
#define PS_UM_BIT 5
#define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0
#define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields. */
#define DBREAKC_MASK_BIT 0
#define DBREAKC_MASK_MASK 0x0000003F
#define DBREAKC_LOAD_BIT 30
#define DBREAKC_LOAD_MASK 0x40000000
#define DBREAKC_STOR_BIT 31
#define DBREAKC_STOR_MASK 0x80000000
/* DEBUGCAUSE register fields. */
#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */
......@@ -25,7 +25,7 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#if XCHAL_HAVE_LE
#ifdef __XTENSA_EL__
__kernel_time_t sem_otime; /* last semop time */
unsigned long __unused1;
__kernel_time_t sem_ctime; /* last change time */
......
......@@ -213,7 +213,7 @@ static inline void spill_registers(void)
unsigned int a0, ps;
__asm__ __volatile__ (
"movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t"
"movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
"mov a12, a0\n\t"
"rsr a13," __stringify(SAR) "\n\t"
"xsr a14," __stringify(PS) "\n\t"
......
......@@ -16,17 +16,22 @@
#include <asm/processor.h>
#include <linux/stringify.h>
#if XCHAL_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x)
#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1
# define LINUX_TIMER 0
#elif XCHAL_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) == 1
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1
# define LINUX_TIMER 1
#elif XCHAL_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) == 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1
# define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
# error "Bad timer number for Linux configurations!"
#endif
#define LINUX_TIMER_INT XCHAL_TIMER_INTERRUPT(LINUX_TIMER)
#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT)
#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */
......@@ -60,8 +65,8 @@ extern cycles_t cacheflush_time;
#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r))
#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r))
#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) : "=a"(r))
#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void)
{
......
......@@ -11,12 +11,20 @@
#ifndef _XTENSA_TLBFLUSH_H
#define _XTENSA_TLBFLUSH_H
#define DEBUG_TLB
#ifdef __KERNEL__
#include <asm/processor.h>
#include <linux/stringify.h>
#include <asm/processor.h>
#define DTLB_WAY_PGD 7
#define ITLB_ARF_WAYS 4
#define DTLB_ARF_WAYS 4
#define ITLB_HIT_BIT 3
#define DTLB_HIT_BIT 4
#ifndef __ASSEMBLY__
/* TLB flushing:
*
......@@ -46,11 +54,6 @@ static inline void flush_tlb_pgtables(struct mm_struct *mm,
/* TLB operations. */
#define ITLB_WAYS_LOG2 XCHAL_ITLB_WAY_BITS
#define DTLB_WAYS_LOG2 XCHAL_DTLB_WAY_BITS
#define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2)
#define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2)
static inline unsigned long itlb_probe(unsigned long addr)
{
unsigned long tmp;
......@@ -131,29 +134,30 @@ static inline void write_itlb_entry (pte_t entry, int way)
static inline void invalidate_page_directory (void)
{
invalidate_dtlb_entry (DTLB_WAY_PGTABLE);
invalidate_dtlb_entry (DTLB_WAY_PGD);
invalidate_dtlb_entry (DTLB_WAY_PGD+1);
invalidate_dtlb_entry (DTLB_WAY_PGD+2);
}
static inline void invalidate_itlb_mapping (unsigned address)
{
unsigned long tlb_entry;
while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS)
invalidate_itlb_entry (tlb_entry);
if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
invalidate_itlb_entry(tlb_entry);
}
static inline void invalidate_dtlb_mapping (unsigned address)
{
unsigned long tlb_entry;
while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS)
invalidate_dtlb_entry (tlb_entry);
if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
invalidate_dtlb_entry(tlb_entry);
}
#define check_pgt_cache() do { } while (0)
#ifdef DEBUG_TLB
/* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
/*
* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
* ISA and exist only for test purposes..
* You may find it helpful for MMU debugging, however.
*
......@@ -193,8 +197,6 @@ static inline unsigned long read_itlb_translation (int way)
return tmp;
}
#endif /* DEBUG_TLB */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
#endif /* _XTENSA_TLBFLUSH_H */
This diff is collapsed.
/*
* Xtensa processor core configuration information.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2006 Tensilica Inc.
*/
#ifndef XTENSA_TIE_H
#define XTENSA_TIE_H
/*----------------------------------------------------------------------
COPROCESSORS and EXTRA STATE
----------------------------------------------------------------------*/
#define XCHAL_CP_NUM 0 /* number of coprocessors */
#define XCHAL_CP_MASK 0x00
#endif /*XTENSA_CONFIG_TIE_H*/
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* xtensa/config/system.h -- HAL definitions that are dependent on SYSTEM configuration
*
* NOTE: The location and contents of this file are highly subject to change.
*
* Source for configuration-independent binaries (which link in a
* configuration-specific HAL library) must NEVER include this file.
* The HAL itself has historically included this file in some instances,
* but this is not appropriate either, because the HAL is meant to be
* core-specific but system independent.
*/
/*
* Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2.1 of the GNU Lesser General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
* USA.
*/
#ifndef XTENSA_CONFIG_SYSTEM_H
#define XTENSA_CONFIG_SYSTEM_H
/*#include <xtensa/hal.h>*/
/*----------------------------------------------------------------------
DEVICE ADDRESSES
----------------------------------------------------------------------*/
/*
* Strange place to find these, but the configuration GUI
* allows moving these around to account for various core
* configurations. Specific boards (and their BSP software)
* will have specific meanings for these components.
*/
/* I/O Block areas: */
#define XSHAL_IOBLOCK_CACHED_VADDR 0xE0000000
#define XSHAL_IOBLOCK_CACHED_PADDR 0xF0000000
#define XSHAL_IOBLOCK_CACHED_SIZE 0x0E000000
#define XSHAL_IOBLOCK_BYPASS_VADDR 0xF0000000
#define XSHAL_IOBLOCK_BYPASS_PADDR 0xF0000000
#define XSHAL_IOBLOCK_BYPASS_SIZE 0x0E000000
/* System ROM: */
#define XSHAL_ROM_VADDR 0xEE000000
#define XSHAL_ROM_PADDR 0xFE000000
#define XSHAL_ROM_SIZE 0x00400000
/* Largest available area (free of vectors): */
#define XSHAL_ROM_AVAIL_VADDR 0xEE00052C
#define XSHAL_ROM_AVAIL_VSIZE 0x003FFAD4
/* System RAM: */
#define XSHAL_RAM_VADDR 0xD0000000
#define XSHAL_RAM_PADDR 0x00000000
#define XSHAL_RAM_VSIZE 0x08000000
#define XSHAL_RAM_PSIZE 0x10000000
#define XSHAL_RAM_SIZE XSHAL_RAM_PSIZE
/* Largest available area (free of vectors): */
#define XSHAL_RAM_AVAIL_VADDR 0xD0000370
#define XSHAL_RAM_AVAIL_VSIZE 0x07FFFC90
/*
* Shadow system RAM (same device as system RAM, at different address).
* (Emulation boards need this for the SONIC Ethernet driver
* when data caches are configured for writeback mode.)
* NOTE: on full MMU configs, this points to the BYPASS virtual address
* of system RAM, ie. is the same as XSHAL_RAM_* except that virtual
* addresses are viewed through the BYPASS static map rather than
* the CACHED static map.
*/
#define XSHAL_RAM_BYPASS_VADDR 0xD8000000
#define XSHAL_RAM_BYPASS_PADDR 0x00000000
#define XSHAL_RAM_BYPASS_PSIZE 0x08000000
/* Alternate system RAM (different device than system RAM): */
#define XSHAL_ALTRAM_VADDR 0xCEE00000
#define XSHAL_ALTRAM_PADDR 0xC0000000
#define XSHAL_ALTRAM_SIZE 0x00200000
/*----------------------------------------------------------------------
* DEVICE-ADDRESS DEPENDENT...
*
* Values written to CACHEATTR special register (or its equivalent)
* to enable and disable caches in various modes.
*----------------------------------------------------------------------*/
/*----------------------------------------------------------------------
BACKWARD COMPATIBILITY ...
----------------------------------------------------------------------*/
/*
* NOTE: the following two macros are DEPRECATED. Use the latter
* board-specific macros instead, which are specially tuned for the
* particular target environments' memory maps.
*/
#define XSHAL_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS /* disable caches in bypass mode */
#define XSHAL_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT /* default setting to enable caches (no writeback!) */
/*----------------------------------------------------------------------
ISS (Instruction Set Simulator) SPECIFIC ...
----------------------------------------------------------------------*/
#define XSHAL_ISS_CACHEATTR_WRITEBACK 0x1122222F /* enable caches in write-back mode */
#define XSHAL_ISS_CACHEATTR_WRITEALLOC 0x1122222F /* enable caches in write-allocate mode */
#define XSHAL_ISS_CACHEATTR_WRITETHRU 0x1122222F /* enable caches in write-through mode */
#define XSHAL_ISS_CACHEATTR_BYPASS 0x2222222F /* disable caches in bypass mode */
#define XSHAL_ISS_CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_WRITEBACK /* default setting to enable caches */
/* For Coware only: */
#define XSHAL_COWARE_CACHEATTR_WRITEBACK 0x11222222 /* enable caches in write-back mode */
#define XSHAL_COWARE_CACHEATTR_WRITEALLOC 0x11222222 /* enable caches in write-allocate mode */
#define XSHAL_COWARE_CACHEATTR_WRITETHRU 0x11222222 /* enable caches in write-through mode */
#define XSHAL_COWARE_CACHEATTR_BYPASS 0x22222222 /* disable caches in bypass mode */
#define XSHAL_COWARE_CACHEATTR_DEFAULT XSHAL_COWARE_CACHEATTR_WRITEBACK /* default setting to enable caches */
/* For BFM and other purposes: */
#define XSHAL_ALLVALID_CACHEATTR_WRITEBACK 0x11222222 /* enable caches without any invalid regions */
#define XSHAL_ALLVALID_CACHEATTR_DEFAULT XSHAL_ALLVALID_CACHEATTR_WRITEBACK /* default setting for caches without any invalid regions */
#define XSHAL_ISS_PIPE_REGIONS 0
#define XSHAL_ISS_SDRAM_REGIONS 0
/*----------------------------------------------------------------------
XT2000 BOARD SPECIFIC ...
----------------------------------------------------------------------*/
#define XSHAL_XT2000_CACHEATTR_WRITEBACK 0x22FFFFFF /* enable caches in write-back mode */
#define XSHAL_XT2000_CACHEATTR_WRITEALLOC 0x22FFFFFF /* enable caches in write-allocate mode */
#define XSHAL_XT2000_CACHEATTR_WRITETHRU 0x22FFFFFF /* enable caches in write-through mode */
#define XSHAL_XT2000_CACHEATTR_BYPASS 0x22FFFFFF /* disable caches in bypass mode */
#define XSHAL_XT2000_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_WRITEBACK /* default setting to enable caches */
#define XSHAL_XT2000_PIPE_REGIONS 0x00001000 /* BusInt pipeline regions */
#define XSHAL_XT2000_SDRAM_REGIONS 0x00000005 /* BusInt SDRAM regions */
/*----------------------------------------------------------------------
VECTOR SIZES
----------------------------------------------------------------------*/
/*
* Sizes allocated to vectors by the system (memory map) configuration.
* These sizes are constrained by core configuration (eg. one vector's
* code cannot overflow into another vector) but are dependent on the
* system or board (or LSP) memory map configuration.
*
* Whether or not each vector happens to be in a system ROM is also
* a system configuration matter, sometimes useful, included here also:
*/
#define XSHAL_RESET_VECTOR_SIZE 0x000004E0
#define XSHAL_RESET_VECTOR_ISROM 1
#define XSHAL_USER_VECTOR_SIZE 0x0000001C
#define XSHAL_USER_VECTOR_ISROM 0
#define XSHAL_PROGRAMEXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_USEREXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_KERNEL_VECTOR_SIZE 0x0000001C
#define XSHAL_KERNEL_VECTOR_ISROM 0
#define XSHAL_STACKEDEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_KERNELEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_DOUBLEEXC_VECTOR_SIZE 0x000000E0
#define XSHAL_DOUBLEEXC_VECTOR_ISROM 0
#define XSHAL_WINDOW_VECTORS_SIZE 0x00000180
#define XSHAL_WINDOW_VECTORS_ISROM 0
#define XSHAL_INTLEVEL2_VECTOR_SIZE 0x0000000C
#define XSHAL_INTLEVEL2_VECTOR_ISROM 0
#define XSHAL_INTLEVEL3_VECTOR_SIZE 0x0000000C
#define XSHAL_INTLEVEL3_VECTOR_ISROM 0
#define XSHAL_INTLEVEL4_VECTOR_SIZE 0x0000000C
#define XSHAL_INTLEVEL4_VECTOR_ISROM 1
#define XSHAL_DEBUG_VECTOR_SIZE XSHAL_INTLEVEL4_VECTOR_SIZE
#define XSHAL_DEBUG_VECTOR_ISROM XSHAL_INTLEVEL4_VECTOR_ISROM
#endif /*XTENSA_CONFIG_SYSTEM_H*/
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment