Commit 2605a103 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.linux-xtensa.org/kernel/xtensa-feed

* git://git.linux-xtensa.org/kernel/xtensa-feed:
  [patch 1/2] Xtensa: enable arbitary tty speed setting ioctls
  [patch 2/2] xtensa console.c: remove duplicate #include
  [XTENSA] Add support for cache-aliasing
  [XTENSA] Add kernel module support
  [XTENSA] Add support for executable/non-executable feature in the mmu
  [XTENSA] Use the generic version of get_order
  [XTENSA] Initialize semaphore_wake_lock
  [XTENSA] Add typecast macro for constants
  [XTENSA] Fix timer instabilities.
  [XTENSA] Fix fadvise64_64
  [XTENSA] Remove extraneous include statement
  [XTENSA] Move string-io functions to io.c from pci.c
  [XTENSA] Move pre-initialized structures to init_task.c
  [XTENSA] Add freestanding option to CFLAGS
  [XTENSA] Add getpgrp system-call to unistd.h
  [XTENSA] add missing system calls
  [XTENSA] fix wrong usage of __init and __initdata in traps.c
parents 53a3f308 ebb2a97b
......@@ -27,7 +27,12 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
PLATFORM = $(platform-y)
export PLATFORM
CFLAGS += -pipe -mlongcalls
# temporarily until string.h is fixed
cflags-y += -ffreestanding
cflags-y += -pipe -mlongcalls
CFLAGS += $(cflags-y)
KBUILD_DEFCONFIG := iss_defconfig
......
......@@ -7,7 +7,7 @@ extra-y := head.o vmlinux.lds
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
pci-dma.o
pci-dma.o init_task.o io.o
## windowspill.o
......
......@@ -18,12 +18,13 @@
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
......@@ -63,7 +64,6 @@ int main(void)
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
BLANK();
/* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
......@@ -73,27 +73,26 @@ int main(void)
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
BLANK();
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
BLANK();
/* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
BLANK();
DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
/* struct page */
DEFINE(PAGE_FLAGS, offsetof(struct page, flags));
/* constants */
DEFINE(_CLONE_VM, CLONE_VM);
DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED);
DEFINE(PG_ARCH_1, PG_arch_1);
return 0;
}
......@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2005 by Tensilica Inc.
* Copyright (C) 2004-2007 by Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
......@@ -169,7 +169,7 @@ _user_exception:
* We have to save all registers up to the first '1' from
* the right, except the current frame (bit 0).
* Assume a2 is: 001001000110001
* All regiser frames starting from the top fiel to the marked '1'
* All register frames starting from the top field to the marked '1'
* must be saved.
*/
......@@ -1572,10 +1572,12 @@ ENTRY(fast_second_level_miss)
l32i a0, a1, TASK_MM # tsk->mm
beqz a0, 9f
8: rsr a1, EXCVADDR # fault address
_PGD_OFFSET(a0, a1, a1)
/* We deliberately destroy a3 that holds the exception table. */
8: rsr a3, EXCVADDR # fault address
_PGD_OFFSET(a0, a3, a1)
l32i a0, a0, 0 # read pmdval
//beqi a0, _PAGE_USER, 2f
beqz a0, 2f
/* Read ptevaddr and convert to top of page-table page.
......@@ -1588,7 +1590,7 @@ ENTRY(fast_second_level_miss)
* The messy computation for 'pteval' above really simplifies
* into the following:
*
* pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL
* pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY
*/
movi a1, -PAGE_OFFSET
......@@ -1596,20 +1598,34 @@ ENTRY(fast_second_level_miss)
extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK
xor a0, a0, a1
movi a1, PAGE_DIRECTORY
movi a1, _PAGE_DIRECTORY
or a0, a0, a1 # ... | PAGE_DIRECTORY
/*
* We utilize all three wired-ways (7-9) to hold pmd translations.
* Memory regions are mapped to the DTLBs according to bits 28 and 29.
* This allows to map the three most common regions to three different
* DTLBs:
* 0,1 -> way 7 program (0040.0000) and virtual (c000.0000)
* 2 -> way 8 shared libaries (2000.0000)
* 3 -> way 0 stack (3000.0000)
*/
extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3
rsr a1, PTEVADDR
addx2 a3, a3, a3 # -> 0,3,6,9
srli a1, a1, PAGE_SHIFT
extui a3, a3, 2, 2 # -> 0,0,1,2
slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
addi a1, a1, DTLB_WAY_PGD # ... + way_number
addi a3, a3, DTLB_WAY_PGD
add a1, a1, a3 # ... + way_number
wdtlb a0, a1
3: wdtlb a0, a1
dsync
/* Exit critical section. */
4: movi a3, exc_table # restore a3
movi a0, 0
s32i a0, a3, EXC_TABLE_FIXUP
......@@ -1636,8 +1652,76 @@ ENTRY(fast_second_level_miss)
9: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0
j 8b
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
2: /* Special case for cache aliasing.
* We (should) only get here if a clear_user_page, copy_user_page
* or the aliased cache flush functions got preemptively interrupted
* by another task. Re-establish temporary mapping to the
* TLBTEMP_BASE areas.
*/
/* We shouldn't be in a double exception */
l32i a0, a2, PT_DEPC
bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f
/* Make sure the exception originated in the special functions */
movi a0, __tlbtemp_mapping_start
rsr a3, EPC_1
bltu a3, a0, 2f
movi a0, __tlbtemp_mapping_end
bgeu a3, a0, 2f
/* Check if excvaddr was in one of the TLBTEMP_BASE areas. */
movi a3, TLBTEMP_BASE_1
rsr a0, EXCVADDR
bltu a0, a3, 2f
addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
bgeu a1, a3, 2f
/* Check if we have to restore an ITLB mapping. */
movi a1, __tlbtemp_mapping_itlb
rsr a3, EPC_1
sub a3, a3, a1
/* Calculate VPN */
movi a1, PAGE_MASK
and a1, a1, a0
/* Jump for ITLB entry */
bgez a3, 1f
/* We can use up to two TLBTEMP areas, one for src and one for dst. */
extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1
add a1, a3, a1
/* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */
mov a0, a6
movnez a0, a7, a3
j 3b
/* ITLB entry. We only use dst in a6. */
1: witlb a6, a1
isync
j 4b
#endif // DCACHE_WAY_SIZE > PAGE_SIZE
2: /* Invalid PGD, default exception handling */
movi a3, exc_table
rsr a1, DEPC
xsr a3, EXCSAVE_1
s32i a1, a2, PT_AREG2
......@@ -1682,15 +1766,15 @@ ENTRY(fast_store_prohibited)
8: rsr a1, EXCVADDR # fault address
_PGD_OFFSET(a0, a1, a4)
l32i a0, a0, 0
//beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID
beqz a0, 2f
/* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
_PTE_OFFSET(a0, a1, a4)
l32i a4, a0, 0 # read pteval
movi a1, _PAGE_VALID | _PAGE_RW
bnall a4, a1, 2f
bbci.l a4, _PAGE_WRITABLE_BIT, 2f
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE
or a4, a4, a1
rsr a1, EXCVADDR
s32i a4, a0, 0
......@@ -1700,10 +1784,7 @@ ENTRY(fast_store_prohibited)
dhwb a0, 0
#endif
pdtlb a0, a1
beqz a0, 1f
idtlb a0 // FIXME do we need this?
wdtlb a4, a0
1:
/* Exit critical section. */
......
/*
* arch/xtensa/kernel/init_task.c
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/module.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* arch/xtensa/io.c
*
* IO primitives
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Copied from sparc.
*
* Chris Zankel <chris@zankel.net>
*
*/
#include <asm/io.h>
#include <asm/byteorder.h>
void outsb(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 1;
writeb(*(const char *)src, addr);
src += 1;
addr += 1;
}
}
void outsw(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 2;
writew(*(const short *)src, addr);
src += 2;
addr += 2;
}
}
void outsl(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 4;
writel(*(const long *)src, addr);
src += 4;
addr += 4;
}
}
void insb(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 1;
*(unsigned char *)dst = readb(addr);
dst += 1;
addr += 1;
}
}
void insw(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 2;
*(unsigned short *)dst = readw(addr);
dst += 2;
addr += 2;
}
}
void insl(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 4;
/*
* XXX I am sure we are in for an unaligned trap here.
*/
*(unsigned long *)dst = readl(addr);
dst += 4;
addr += 4;
}
}
......@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2006 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
......@@ -22,57 +22,216 @@
#include <linux/kernel.h>
#include <linux/cache.h>
LIST_HEAD(module_buf_list);
#undef DEBUG_RELOCATE
void *module_alloc(unsigned long size)
{
panic("module_alloc not implemented");
if (size == 0)
return NULL;
return vmalloc(size);
}
void module_free(struct module *mod, void *module_region)
{
panic("module_free not implemented");
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
}
int module_frob_arch_sections(Elf32_Ehdr *hdr,
Elf32_Shdr *sechdrs,
char *secstrings,
struct module *me)
struct module *mod)
{
panic("module_frob_arch_sections not implemented");
return 0;
}
static int
decode_calln_opcode (unsigned char *location)
{
#ifdef __XTENSA_EB__
return (location[0] & 0xf0) == 0x50;
#endif
#ifdef __XTENSA_EL__
return (location[0] & 0xf) == 0x5;
#endif
}
static int
decode_l32r_opcode (unsigned char *location)
{
#ifdef __XTENSA_EB__
return (location[0] & 0xf0) == 0x10;
#endif
#ifdef __XTENSA_EL__
return (location[0] & 0xf) == 0x1;
#endif
}
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *module)
struct module *mod)
{
panic ("apply_relocate not implemented");
printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
mod->name);
return -ENOEXEC;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *module)
struct module *mod)
{
panic("apply_relocate_add not implemented");
unsigned int i;
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
unsigned char *location;
uint32_t value;
#ifdef DEBUG_RELOCATE
printk("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
#endif
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rela[i].r_offset;
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rela[i].r_info);
value = sym->st_value + rela[i].r_addend;
switch (ELF32_R_TYPE(rela[i].r_info)) {
case R_XTENSA_NONE:
case R_XTENSA_DIFF8:
case R_XTENSA_DIFF16:
case R_XTENSA_DIFF32:
case R_XTENSA_ASM_EXPAND:
break;
case R_XTENSA_32:
case R_XTENSA_PLT:
*(uint32_t *)location += value;
break;
case R_XTENSA_SLOT0_OP:
if (decode_calln_opcode(location)) {
value -= ((unsigned long)location & -4) + 4;
if ((value & 3) != 0 ||
((value + (1 << 19)) >> 20) != 0) {
printk("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
strtab + sym->st_name);
return -ENOEXEC;
}
value = (signed int)value >> 2;
#ifdef __XTENSA_EB__
location[0] = ((location[0] & ~0x3) |
((value >> 16) & 0x3));
location[1] = (value >> 8) & 0xff;
location[2] = value & 0xff;
#endif
#ifdef __XTENSA_EL__
location[0] = ((location[0] & ~0xc0) |
((value << 6) & 0xc0));
location[1] = (value >> 2) & 0xff;
location[2] = (value >> 10) & 0xff;
#endif
} else if (decode_l32r_opcode(location)) {
value -= (((unsigned long)location + 3) & -4);
if ((value & 3) != 0 ||
(signed int)value >> 18 != -1) {
printk("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
strtab + sym->st_name);
return -ENOEXEC;
}
value = (signed int)value >> 2;
#ifdef __XTENSA_EB__
location[1] = (value >> 8) & 0xff;
location[2] = value & 0xff;
#endif
#ifdef __XTENSA_EL__
location[1] = value & 0xff;
location[2] = (value >> 8) & 0xff;
#endif
}
/* FIXME: Ignore any other opcodes. The Xtensa
assembler currently assumes that the linker will
always do relaxation and so all PC-relative
operands need relocations. (The assembler also
writes out the tentative PC-relative values,
assuming no link-time relaxation, so it is usually
safe to ignore the relocations.) If the
assembler's "--no-link-relax" flag can be made to
work, and if all kernel modules can be assembled
with that flag, then unexpected relocations could
be detected here. */
break;
case R_XTENSA_SLOT1_OP:
case R_XTENSA_SLOT2_OP:
case R_XTENSA_SLOT3_OP:
case R_XTENSA_SLOT4_OP:
case R_XTENSA_SLOT5_OP:
case R_XTENSA_SLOT6_OP:
case R_XTENSA_SLOT7_OP:
case R_XTENSA_SLOT8_OP:
case R_XTENSA_SLOT9_OP:
case R_XTENSA_SLOT10_OP:
case R_XTENSA_SLOT11_OP:
case R_XTENSA_SLOT12_OP:
case R_XTENSA_SLOT13_OP:
case R_XTENSA_SLOT14_OP:
printk("%s: unexpected FLIX relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
case R_XTENSA_SLOT0_ALT:
case R_XTENSA_SLOT1_ALT:
case R_XTENSA_SLOT2_ALT:
case R_XTENSA_SLOT3_ALT:
case R_XTENSA_SLOT4_ALT:
case R_XTENSA_SLOT5_ALT:
case R_XTENSA_SLOT6_ALT:
case R_XTENSA_SLOT7_ALT:
case R_XTENSA_SLOT8_ALT:
case R_XTENSA_SLOT9_ALT:
case R_XTENSA_SLOT10_ALT:
case R_XTENSA_SLOT11_ALT:
case R_XTENSA_SLOT12_ALT:
case R_XTENSA_SLOT13_ALT:
case R_XTENSA_SLOT14_ALT:
printk("%s: unexpected ALT relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
default:
printk("%s: unexpected relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
struct module *mod)
{
panic ("module_finalize not implemented");
return 0;
}
void module_arch_cleanup(struct module *mod)
{
panic("module_arch_cleanup not implemented");
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
{
panic("module_find_bug not implemented");
}
......@@ -394,72 +394,3 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret;
}
/*
* This probably belongs here rather than ioport.c because
* we do not want this crud linked into SBus kernels.
* Also, think for a moment about likes of floppy.c that
* include architecture specific parts. They may want to redefine ins/outs.
*
* We do not use horrible macros here because we want to
* advance pointer by sizeof(size).
*/
void outsb(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 1;
writeb(*(const char *)src, addr);
src += 1;
addr += 1;
}
}
void outsw(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 2;
writew(*(const short *)src, addr);
src += 2;
addr += 2;
}
}
void outsl(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 4;
writel(*(const long *)src, addr);
src += 4;
addr += 4;
}
}
void insb(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 1;
*(unsigned char *)dst = readb(addr);
dst += 1;
addr += 1;
}
}
void insw(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 2;
*(unsigned short *)dst = readw(addr);
dst += 2;
addr += 2;
}
}
void insl(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 4;
/*
* XXX I am sure we are in for an unaligned trap here.
*/
*(unsigned long *)dst = readl(addr);
dst += 4;
addr += 4;
}
}
......@@ -46,20 +46,6 @@
extern void ret_from_fork(void);
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
void (*pm_power_off)(void) = NULL;
......
......@@ -100,7 +100,7 @@ static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
return ret;
}
spinlock_t semaphore_wake_lock;
DEFINE_SPINLOCK(semaphore_wake_lock);
/*
* Semaphores are implemented using a two-way counter:
......
......@@ -93,3 +93,8 @@ asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
return (long)ret;
}
asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len)
{
return sys_fadvise64_64(fd, offset, len, advice);
}
......@@ -32,12 +32,20 @@ EXPORT_SYMBOL(rtc_lock);
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long ccount_nsec; /* nsec per ccount increment */
unsigned long nsec_per_ccount; /* nsec per ccount increment */
#endif
unsigned int last_ccount_stamp;
static long last_rtc_update = 0;
/*
* Scheduler clock - returns current tim in nanosec units.
*/
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies * (1000000000 / HZ);
}
static irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
......@@ -69,7 +77,6 @@ void __init time_init(void)
xtime.tv_nsec = 0;
last_rtc_update = xtime.tv_sec = sec_n;
last_ccount_stamp = get_ccount();
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
......@@ -85,7 +92,7 @@ int do_settimeofday(struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
unsigned long ccount;
unsigned long delta;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
......@@ -97,8 +104,10 @@ int do_settimeofday(struct timespec *tv)
* wall time. Discover what correction gettimeofday() would have
* made, and then undo it!
*/
ccount = get_ccount();
nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
delta = CCOUNT_PER_JIFFY;
delta += get_ccount() - get_linux_timer();
nsec -= delta * NSEC_PER_CCOUNT;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
......@@ -117,17 +126,21 @@ EXPORT_SYMBOL(do_settimeofday);
void do_gettimeofday(struct timeval *tv)
{
unsigned long flags;
unsigned long sec, usec, delta, seq;
unsigned long volatile sec, usec, delta, seq;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
delta = get_ccount() - last_ccount_stamp;
sec = xtime.tv_sec;
usec = (xtime.tv_nsec / NSEC_PER_USEC);
delta = get_linux_timer() - get_ccount();
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
usec += (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
usec += (((unsigned long) CCOUNT_PER_JIFFY - delta)
* (unsigned long) NSEC_PER_CCOUNT) / NSEC_PER_USEC;
for (; usec >= 1000000; sec++, usec -= 1000000)
;
......@@ -158,9 +171,12 @@ again:
write_seqlock(&xtime_lock);
last_ccount_stamp = next;
do_timer(1); /* Linux handler in kernel/timer.c */
/* Note that writing CCOMPARE clears the interrupt. */
next += CCOUNT_PER_JIFFY;
do_timer (1); /* Linux handler in kernel/timer.c */
set_linux_timer(next);
if (ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
......@@ -175,19 +191,15 @@ again:
write_sequnlock(&xtime_lock);
}
/* NOTE: writing CCOMPAREn clears the interrupt. */
/* Allow platform to do something useful (Wdog). */
set_linux_timer (next);
platform_heartbeat();
/* Make sure we didn't miss any tick... */
if ((signed long)(get_ccount() - next) > 0)
goto again;
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
return IRQ_HANDLED;
}
......
......@@ -83,7 +83,7 @@ typedef struct {
void* handler;
} dispatch_init_table_t;
dispatch_init_table_t __init dispatch_init_table[] = {
static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
......@@ -305,7 +305,7 @@ do_debug(struct pt_regs *regs)
#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
void trap_init(void)
void __init trap_init(void)
{
int i;
......
......@@ -5,9 +5,5 @@
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definition is now in the main makefile...
obj-y := init.o fault.o tlb.o misc.o
obj-m :=
obj-n :=
obj- :=
obj-y := init.o fault.o tlb.o misc.o cache.o
/*
* arch/xtensa/mm/cache.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2006 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor
* Marc Gauthier
*
*/
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/bootmem.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <asm/pgtable.h>
#include <asm/bootparam.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
//#define printd(x...) printk(x)
#define printd(x...) do { } while(0)
/*
* Note:
* The kernel provides one architecture bit PG_arch_1 in the page flags that
* can be used for cache coherency.
*
* I$-D$ coherency.
*
* The Xtensa architecture doesn't keep the instruction cache coherent with
* the data cache. We use the architecture bit to indicate if the caches
* are coherent. The kernel clears this bit whenever a page is added to the
* page cache. At that time, the caches might not be in sync. We, therefore,
* define this flag as 'clean' if set.
*
* D-cache aliasing.
*
* With cache aliasing, we have to always flush the cache when pages are
* unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
* page.
*
*
*
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/*
* Any time the kernel writes to a user page cache page, or it is about to
* read from a page cache page this routine is called.
*
*/
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
/*
* If we have a mapping but the page is not mapped to user-space
* yet, we simply mark this page dirty and defer flushing the
* caches until update_mmu().
*/
if (mapping && !mapping_mapped(mapping)) {
if (!test_bit(PG_arch_1, &page->flags))
set_bit(PG_arch_1, &page->flags);
return;
} else {
unsigned long phys = page_to_phys(page);
unsigned long temp = page->index << PAGE_SHIFT;
unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
unsigned long virt;
/*
* Flush the page in kernel space and user space.
* Note that we can omit that step if aliasing is not
* an issue, but we do have to synchronize I$ and D$
* if we have a mapping.
*/
if (!alias && !mapping)
return;
__flush_invalidate_dcache_page((long)page_address(page));
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
if (alias)
__flush_invalidate_dcache_page_alias(virt, phys);
if (mapping)
__invalidate_icache_page_alias(virt, phys);
}
/* There shouldn't be an entry in the cache for this page anymore. */
}
/*
* For now, flush the whole cache. FIXME??
*/
void flush_cache_range(struct vm_area_struct* vma,
unsigned long start, unsigned long end)
{
__flush_invalidate_dcache_all();
__invalidate_icache_all();
}
/*
* Remove any entry in the cache for this page.
*
* Note that this function is only called for user pages, so use the
* alias versions of the cache flush functions.
*/
void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
unsigned long pfn)
{
/* Note that we have to use the 'alias' address to avoid multi-hit */
unsigned long phys = page_to_phys(pfn_to_page(pfn));
unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
}
#endif
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
struct page *page;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
/* Invalidate old entry in TLBs */
invalidate_itlb_mapping(addr);
invalidate_dtlb_mapping(addr);
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page);
__flush_invalidate_dcache_page(paddr);
__flush_invalidate_dcache_page_alias(vaddr, phys);
__invalidate_icache_page_alias(vaddr, phys);
clear_bit(PG_arch_1, &page->flags);
}
#else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
&& (vma->vm_flags & VM_EXEC) != 0) {
unsigned long vaddr = addr & PAGE_MASK;
__flush_dcache_page(vaddr);
__invalidate_icache_page(vaddr);
set_bit(PG_arch_1, &page->flags);
}
#endif
}
/*
* access_process_vm() has called get_user_pages(), which has done a
* flush_dcache_page() on the page.
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
unsigned long phys = page_to_phys(page);
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
/* Flush and invalidate user page if aliased. */
if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys);
}
/* Copy data */
memcpy(dst, src, len);
/*
* Flush and invalidate kernel page if aliased and synchronize
* data and instruction caches for executable pages.
*/
if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0) {
__invalidate_icache_page_alias(temp, phys);
}
} else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len);
__invalidate_icache_range((unsigned long) dst, len);
}
}
extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
unsigned long phys = page_to_phys(page);
unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
/*
* Flush user page if aliased.
* (Note: a simply flush would be sufficient)
*/
if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys);
}
memcpy(dst, src, len);
}
#endif
......@@ -24,6 +24,8 @@
unsigned long asid_cache = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
#undef DEBUG_PAGE_FAULT
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
......@@ -64,7 +66,7 @@ void do_page_fault(struct pt_regs *regs)
exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
#if 0
#ifdef DEBUG_PAGE_FAULT
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
#endif
......@@ -219,7 +221,7 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) {
#if 1
#ifdef DEBUG_PAGE_FAULT
printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
current->comm, regs->pc, entry->fixup);
#endif
......
......@@ -15,40 +15,24 @@
* Kevin Chea
*/
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/bootmem.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/pgtable.h>
#include <asm/bootparam.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#define DEBUG 0
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
//static DEFINE_SPINLOCK(tlb_lock);
/*
* This flag is used to indicate that the page was mapped and modified in
* kernel space, so the cache is probably dirty at that address.
* If cache aliasing is enabled and the page color mismatches, update_mmu_cache
* synchronizes the caches if this bit is set.
*/
#define PG_cache_clean PG_arch_1
/* References to section boundaries */
......@@ -323,228 +307,22 @@ void show_mem(void)
printk("%d free pages\n", free);
}
/* ------------------------------------------------------------------------- */
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
/*
* With cache aliasing, the page color of the page in kernel space and user
* space might mismatch. We temporarily map the page to a different virtual
* address with the same color and clear the page there.
*/
void clear_user_page(void *kaddr, unsigned long vaddr, struct page* page)
{
/* There shouldn't be any entries for this page. */
__flush_invalidate_dcache_page_phys(__pa(page_address(page)));
if (!PAGE_COLOR_EQ(vaddr, kaddr)) {
unsigned long v, p;
/* Temporarily map page to DTLB_WAY_DCACHE_ALIAS0. */
spin_lock(&tlb_lock);
p = (unsigned long)pte_val((mk_pte(page,PAGE_KERNEL)));
kaddr = (void*)PAGE_COLOR_MAP0(vaddr);
v = (unsigned long)kaddr | DTLB_WAY_DCACHE_ALIAS0;
__asm__ __volatile__("wdtlb %0,%1; dsync" : :"a" (p), "a" (v));
clear_page(kaddr);
spin_unlock(&tlb_lock);
} else {
clear_page(kaddr);
}
/* We need to make sure that i$ and d$ are coherent. */
clear_bit(PG_cache_clean, &page->flags);
}
/*
* With cache aliasing, we have to make sure that the page color of the page
* in kernel space matches that of the virtual user address before we read
* the page. If the page color differ, we create a temporary DTLB entry with
* the corrent page color and use this 'temporary' address as the source.
* We then use the same approach as in clear_user_page and copy the data
* to the kernel space and clear the PG_cache_clean bit to synchronize caches
* later.
*
* Note:
* Instead of using another 'way' for the temporary DTLB entry, we could
* probably use the same entry that points to the kernel address (after
* saving the original value and restoring it when we are done).
*/
struct kmem_cache *pgtable_cache __read_mostly;
void copy_user_page(void* to, void* from, unsigned long vaddr,
struct page* to_page)
static void pgd_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
/* There shouldn't be any entries for the new page. */
__flush_invalidate_dcache_page_phys(__pa(page_address(to_page)));
spin_lock(&tlb_lock);
if (!PAGE_COLOR_EQ(vaddr, from)) {
unsigned long v, p, t;
__asm__ __volatile__ ("pdtlb %1,%2; rdtlb1 %0,%1"
: "=a"(p), "=a"(t) : "a"(from));
from = (void*)PAGE_COLOR_MAP0(vaddr);
v = (unsigned long)from | DTLB_WAY_DCACHE_ALIAS0;
__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
}
if (!PAGE_COLOR_EQ(vaddr, to)) {
unsigned long v, p;
p = (unsigned long)pte_val((mk_pte(to_page,PAGE_KERNEL)));
to = (void*)PAGE_COLOR_MAP1(vaddr);
v = (unsigned long)to | DTLB_WAY_DCACHE_ALIAS1;
__asm__ __volatile__ ("wdtlb %0,%1; dsync" ::"a" (p), "a" (v));
}
copy_page(to, from);
spin_unlock(&tlb_lock);
/* We need to make sure that i$ and d$ are coherent. */
clear_bit(PG_cache_clean, &to_page->flags);
}
/*
* Any time the kernel writes to a user page cache page, or it is about to
* read from a page cache page this routine is called.
*
* Note:
* The kernel currently only provides one architecture bit in the page
* flags that we use for I$/D$ coherency. Maybe, in future, we can
* use a sepearte bit for deferred dcache aliasing:
* If the page is not mapped yet, we only need to set a flag,
* if mapped, we need to invalidate the page.
*/
// FIXME: we probably need this for WB caches not only for Page Coloring..
void flush_dcache_page(struct page *page)
{
unsigned long addr = __pa(page_address(page));
struct address_space *mapping = page_mapping(page);
__flush_invalidate_dcache_page_phys(addr);
if (!test_bit(PG_cache_clean, &page->flags))
return;
/* If this page hasn't been mapped, yet, handle I$/D$ coherency later.*/
#if 0
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_cache_clean, &page->flags);
else
#endif
__invalidate_icache_page_phys(addr);
}
void flush_cache_range(struct vm_area_struct* vma, unsigned long s,
unsigned long e)
{
__flush_invalidate_cache_all();
}
void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
unsigned long pfn)
{
struct page *page = pfn_to_page(pfn);
/* Remove any entry for the old mapping. */
if (current->active_mm == vma->vm_mm) {
unsigned long addr = __pa(page_address(page));
__flush_invalidate_dcache_page_phys(addr);
if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_phys(addr);
} else {
BUG();
}
}
#endif /* (DCACHE_WAY_SIZE > PAGE_SIZE) */
pte_t* pte_alloc_one_kernel (struct mm_struct* mm, unsigned long addr)
{
pte_t* pte = (pte_t*)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (likely(pte)) {
pte_t* ptep = (pte_t*)(pte_val(*pte) + PAGE_OFFSET);
int i;
for (i = 0; i < 1024; i++, ptep++)
pte_clear(mm, addr, ptep);
}
return pte;
}
struct page* pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *page;
page = alloc_pages(GFP_KERNEL | __GFP_REPEAT, 0);
if (likely(page)) {
pte_t* ptep = kmap_atomic(page, KM_USER0);
int i;
pte_t* ptep = (pte_t*)addr;
int i;
for (i = 0; i < 1024; i++, ptep++)
pte_clear(mm, addr, ptep);
for (i = 0; i < 1024; i++, ptep++)
pte_clear(NULL, 0, ptep);
kunmap_atomic(ptep, KM_USER0);
}
return page;
}
/*
* Handle D$/I$ coherency.
*
* Note:
* We only have one architecture bit for the page flags, so we cannot handle
* cache aliasing, yet.
*/
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
void __init pgtable_cache_init(void)
{
unsigned long pfn = pte_pfn(pte);
struct page *page;
unsigned long vaddr = addr & PAGE_MASK;
if (!pfn_valid(pfn))
return;
page = pfn_to_page(pfn);
invalidate_itlb_mapping(addr);
invalidate_dtlb_mapping(addr);
/* We have a new mapping. Use it. */
write_dtlb_entry(pte, dtlb_probe(addr));
/* If the processor can execute from this page, synchronize D$/I$. */
if ((vma->vm_flags & VM_EXEC) != 0) {
write_itlb_entry(pte, itlb_probe(addr));
/* Synchronize caches, if not clean. */
if (!test_and_set_bit(PG_cache_clean, &page->flags)) {
__flush_dcache_page(vaddr);
__invalidate_icache_page(vaddr);
}
}
pgtable_cache = kmem_cache_create("pgd",
PAGE_SIZE, PAGE_SIZE,
SLAB_HWCACHE_ALIGN,
pgd_ctor);
}
......@@ -7,29 +7,33 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
/* Note: we might want to implement some of the loops as zero-overhead-loops,
* where applicable and if supported by the processor.
*/
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
#include <asm/tlbflush.h>
/* clear_page (page) */
/*
* clear_page and clear_user_page are the same for non-cache-aliased configs.
*
* clear_page (unsigned long page)
* a2
*/
ENTRY(clear_page)
entry a1, 16
addi a4, a2, PAGE_SIZE
movi a3, 0
1: s32i a3, a2, 0
movi a3, 0
__loopi a2, a7, PAGE_SIZE, 32
s32i a3, a2, 0
s32i a3, a2, 4
s32i a3, a2, 8
s32i a3, a2, 12
......@@ -37,42 +41,277 @@ ENTRY(clear_page)
s32i a3, a2, 20
s32i a3, a2, 24
s32i a3, a2, 28
addi a2, a2, 32
blt a2, a4, 1b
__endla a2, a7, 32
retw
/*
* copy_page and copy_user_page are the same for non-cache-aliased configs.
*
* copy_page (void *to, void *from)
* a2 a3
* a2 a3
*/
ENTRY(copy_page)
entry a1, 16
addi a4, a2, PAGE_SIZE
1: l32i a5, a3, 0
l32i a6, a3, 4
l32i a7, a3, 8
s32i a5, a2, 0
s32i a6, a2, 4
s32i a7, a2, 8
l32i a5, a3, 12
l32i a6, a3, 16
l32i a7, a3, 20
s32i a5, a2, 12
s32i a6, a2, 16
s32i a7, a2, 20
l32i a5, a3, 24
l32i a6, a3, 28
s32i a5, a2, 24
s32i a6, a2, 28
addi a2, a2, 32
addi a3, a3, 32
blt a2, a4, 1b
__loopi a2, a4, PAGE_SIZE, 32
l32i a8, a3, 0
l32i a9, a3, 4
s32i a8, a2, 0
s32i a9, a2, 4
l32i a8, a3, 8
l32i a9, a3, 12
s32i a8, a2, 8
s32i a9, a2, 12
l32i a8, a3, 16
l32i a9, a3, 20
s32i a8, a2, 16
s32i a9, a2, 20
l32i a8, a3, 24
l32i a9, a3, 28
s32i a8, a2, 24
s32i a9, a2, 28
addi a2, a2, 32
addi a3, a3, 32
__endl a2, a4
retw
/*
* If we have to deal with cache aliasing, we use temporary memory mappings
* to ensure that the source and destination pages have the same color as
* the virtual address. We use way 0 and 1 for temporary mappings in such cases.
*
* The temporary DTLB entries shouldn't be flushed by interrupts, but are
* flushed by preemptive task switches. Special code in the
* fast_second_level_miss handler re-established the temporary mapping.
* It requires that the PPNs for the destination and source addresses are
* in a6, and a7, respectively.
*/
/* TLB miss exceptions are treated special in the following region */
ENTRY(__tlbtemp_mapping_start)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
/*
* clear_user_page (void *addr, unsigned long vaddr, struct page *page)
* a2 a3 a4
*/
ENTRY(clear_user_page)
entry a1, 32
/* Mark page dirty and determine alias. */
movi a7, (1 << PG_ARCH_1)
l32i a5, a4, PAGE_FLAGS
xor a6, a2, a3
extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER
extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
or a5, a5, a7
slli a3, a3, PAGE_SHIFT
s32i a5, a4, PAGE_FLAGS
/* Skip setting up a temporary DTLB if not aliased. */
beqz a6, 1f
/* Invalidate kernel page. */
mov a10, a2
call8 __invalidate_dcache_page
/* Setup a temporary DTLB with the color of the VPN */
movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
movi a5, TLBTEMP_BASE_1 # virt
add a6, a2, a4 # ppn
add a2, a5, a3 # add 'color'
wdtlb a6, a2
dsync
1: movi a3, 0
__loopi a2, a7, PAGE_SIZE, 32
s32i a3, a2, 0
s32i a3, a2, 4
s32i a3, a2, 8
s32i a3, a2, 12
s32i a3, a2, 16
s32i a3, a2, 20
s32i a3, a2, 24
s32i a3, a2, 28
__endla a2, a7, 32
bnez a6, 1f
retw
/* We need to invalidate the temporary idtlb entry, if any. */
1: addi a2, a2, -PAGE_SIZE
idtlb a2
dsync
retw
/*
* copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page)
* a2 a3 a4 a5
*/
ENTRY(copy_user_page)
entry a1, 32
/* Mark page dirty and determine alias for destination. */
movi a8, (1 << PG_ARCH_1)
l32i a9, a5, PAGE_FLAGS
xor a6, a2, a4
xor a7, a3, a4
extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER
extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER
extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER
or a9, a9, a8
slli a4, a4, PAGE_SHIFT
s32i a9, a5, PAGE_FLAGS
movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE)
beqz a6, 1f
/* Invalidate dcache */
mov a10, a2
call8 __invalidate_dcache_page
/* Setup a temporary DTLB with a matching color. */
movi a8, TLBTEMP_BASE_1 # base
add a6, a2, a5 # ppn
add a2, a8, a4 # add 'color'
wdtlb a6, a2
dsync
/* Skip setting up a temporary DTLB for destination if not aliased. */
1: beqz a7, 1f
/* Setup a temporary DTLB with a matching color. */
movi a8, TLBTEMP_BASE_2 # base
add a7, a3, a5 # ppn
add a3, a8, a4
addi a8, a3, 1 # way1
wdtlb a7, a8
dsync
1: __loopi a2, a4, PAGE_SIZE, 32
l32i a8, a3, 0
l32i a9, a3, 4
s32i a8, a2, 0
s32i a9, a2, 4
l32i a8, a3, 8
l32i a9, a3, 12
s32i a8, a2, 8
s32i a9, a2, 12
l32i a8, a3, 16
l32i a9, a3, 20
s32i a8, a2, 16
s32i a9, a2, 20
l32i a8, a3, 24
l32i a9, a3, 28
s32i a8, a2, 24
s32i a9, a2, 28
addi a2, a2, 32
addi a3, a3, 32
__endl a2, a4
/* We need to invalidate any temporary mapping! */
bnez a6, 1f
bnez a7, 2f
retw
1: addi a2, a2, -PAGE_SIZE
idtlb a2
dsync
bnez a7, 2f
retw
2: addi a3, a3, -PAGE_SIZE+1
idtlb a3
dsync
retw
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
/*
* void __flush_invalidate_dcache_page_alias (addr, phys)
* a2 a3
*/
ENTRY(__flush_invalidate_dcache_page_alias)
entry sp, 16
movi a7, 0 # required for exception handler
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
mov a4, a2
wdtlb a6, a2
dsync
___flush_invalidate_dcache_page a2 a3
idtlb a4
dsync
retw
#endif
ENTRY(__tlbtemp_mapping_itlb)
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
ENTRY(__invalidate_icache_page_alias)
entry sp, 16
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
mov a4, a2
witlb a6, a2
isync
___invalidate_icache_page a2 a3
iitlb a4
isync
retw
#endif
/* End of special treatment in tlb miss exception */
ENTRY(__tlbtemp_mapping_end)
/*
* void __invalidate_icache_page(ulong start)
*/
......@@ -121,8 +360,6 @@ ENTRY(__flush_dcache_page)
dsync
retw
/*
* void __invalidate_icache_range(ulong start, ulong size)
*/
......@@ -168,7 +405,6 @@ ENTRY(__invalidate_dcache_range)
___invalidate_dcache_range a2 a3 a4
retw
/*
......
......@@ -20,7 +20,6 @@
#include <linux/param.h>
#include <linux/serial.h>
#include <linux/serialP.h>
#include <linux/console.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
......
......@@ -13,10 +13,6 @@
#ifndef _XTENSA_BUGS_H
#define _XTENSA_BUGS_H
#include <asm/processor.h>
static void __init check_bugs(void)
{
}
static void check_bugs(void) { }
#endif /* _XTENSA_BUGS_H */
......@@ -19,6 +19,15 @@
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
#define DCACHE_WAY_SHIFT (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH)
#define ICACHE_WAY_SHIFT (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH)
/* Maximum cache size per way. */
#if DCACHE_WAY_SIZE >= ICACHE_WAY_SIZE
# define CACHE_WAY_SIZE DCACHE_WAY_SIZE
#else
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif
#endif /* _XTENSA_CACHE_H */
......@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) 2001 - 2006 Tensilica Inc.
* (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
......@@ -18,10 +18,7 @@
#include <asm/page.h>
/*
* flush and invalidate data cache, invalidate instruction cache:
*
* __flush_invalidate_cache_all()
* __flush_invalidate_cache_range(from,sze)
* Lo-level routines for cache flushing.
*
* invalidate data or instruction cache:
*
......@@ -40,26 +37,39 @@
* __flush_invalidate_dcache_all()
* __flush_invalidate_dcache_page(adr)
* __flush_invalidate_dcache_range(from,size)
*
* specials for cache aliasing:
*
* __flush_invalidate_dcache_page_alias(vaddr,paddr)
* __invalidate_icache_page_alias(vaddr,paddr)
*/
extern void __flush_invalidate_cache_all(void);
extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_all(void);
extern void __invalidate_dcache_all(void);
extern void __invalidate_icache_all(void);
extern void __invalidate_dcache_page(unsigned long);
extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long);
#if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_invalidate_dcache_all(void);
extern void __flush_dcache_page(unsigned long);
extern void __flush_dcache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else
# define __flush_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_range(p,s) do { } while(0)
# define __flush_dcache_range(p,s) do { } while(0)
# define __flush_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
#endif
#if (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
#endif
/*
......@@ -71,17 +81,21 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
* (see also Documentation/cachetlb.txt)
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
#define flush_cache_all() __flush_invalidate_cache_all();
#define flush_cache_mm(mm) __flush_invalidate_cache_all();
#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all();
#define flush_cache_all() \
do { \
__flush_invalidate_dcache_all(); \
__invalidate_icache_all(); \
} while (0)
#define flush_cache_vmap(start,end) __flush_invalidate_cache_all();
#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all();
#define flush_cache_mm(mm) flush_cache_all()
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
extern void flush_dcache_page(struct page*);
#define flush_cache_vmap(start,end) flush_cache_all()
#define flush_cache_vunmap(start,end) flush_cache_all()
extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
......@@ -101,24 +115,39 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
#endif
/* Ensure consistency between data and instruction cache. */
#define flush_icache_range(start,end) \
__invalidate_icache_range(start,(end)-(start))
do { \
__flush_dcache_range(start, (end) - (start)); \
__invalidate_icache_range(start,(end) - (start)); \
} while (0)
/* This is not required, see Documentation/cachetlb.txt */
#define flush_icache_page(vma,page) do { } while(0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
#else
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
__flush_dcache_range((unsigned long) dst, len); \
__invalidate_icache_range((unsigned long) dst, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* __KERNEL__ */
#endif
#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */
......@@ -20,6 +20,56 @@
#define EM_XTENSA 94
#define EM_XTENSA_OLD 0xABC7
/* Xtensa relocations defined by the ABIs */
#define R_XTENSA_NONE 0
#define R_XTENSA_32 1
#define R_XTENSA_RTLD 2
#define R_XTENSA_GLOB_DAT 3
#define R_XTENSA_JMP_SLOT 4
#define R_XTENSA_RELATIVE 5
#define R_XTENSA_PLT 6
#define R_XTENSA_OP0 8
#define R_XTENSA_OP1 9
#define R_XTENSA_OP2 10
#define R_XTENSA_ASM_EXPAND 11
#define R_XTENSA_ASM_SIMPLIFY 12
#define R_XTENSA_GNU_VTINHERIT 15
#define R_XTENSA_GNU_VTENTRY 16
#define R_XTENSA_DIFF8 17
#define R_XTENSA_DIFF16 18
#define R_XTENSA_DIFF32 19
#define R_XTENSA_SLOT0_OP 20
#define R_XTENSA_SLOT1_OP 21
#define R_XTENSA_SLOT2_OP 22
#define R_XTENSA_SLOT3_OP 23
#define R_XTENSA_SLOT4_OP 24
#define R_XTENSA_SLOT5_OP 25
#define R_XTENSA_SLOT6_OP 26
#define R_XTENSA_SLOT7_OP 27
#define R_XTENSA_SLOT8_OP 28
#define R_XTENSA_SLOT9_OP 29
#define R_XTENSA_SLOT10_OP 30
#define R_XTENSA_SLOT11_OP 31
#define R_XTENSA_SLOT12_OP 32
#define R_XTENSA_SLOT13_OP 33
#define R_XTENSA_SLOT14_OP 34
#define R_XTENSA_SLOT0_ALT 35
#define R_XTENSA_SLOT1_ALT 36
#define R_XTENSA_SLOT2_ALT 37
#define R_XTENSA_SLOT3_ALT 38
#define R_XTENSA_SLOT4_ALT 39
#define R_XTENSA_SLOT5_ALT 40
#define R_XTENSA_SLOT6_ALT 41
#define R_XTENSA_SLOT7_ALT 42
#define R_XTENSA_SLOT8_ALT 43
#define R_XTENSA_SLOT9_ALT 44
#define R_XTENSA_SLOT10_ALT 45
#define R_XTENSA_SLOT11_ALT 46
#define R_XTENSA_SLOT12_ALT 47
#define R_XTENSA_SLOT13_ALT 48
#define R_XTENSA_SLOT14_ALT 49
/* ELF register definitions. This is needed for core dump support. */
/*
......
......@@ -14,6 +14,7 @@
#ifdef __KERNEL__
#include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/kernel.h>
#include <linux/types.h>
......
......@@ -91,6 +91,10 @@
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
#define TCGETS2 _IOR('T', 42, struct termios2)
#define TCSETS2 _IOW('T', 43, struct termios2)
#define TCSETSW2 _IOW('T', 44, struct termios2)
#define TCSETSF2 _IOW('T', 45, struct termios2)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
......
/*
* linux/include/asm-xtensa/page.h
* include/asm-xtensa/page.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_PAGE_H
......@@ -14,6 +14,12 @@
#ifdef __KERNEL__
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/cache.h>
/*
* Fixed TLB translations in the processor.
*/
#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
......@@ -26,13 +32,60 @@
*/
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#define PGTABLE_START 0x80000000
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#define PGTABLE_START 0x80000000
/*
* Cache aliasing:
*
* If the cache size for one way is greater than the page size, we have to
* deal with cache aliasing. The cache index is wider than the page size:
*
* | |cache| cache index
* | pfn |off| virtual address
* |xxxx:X|zzz|
* | : | |
* | \ / | |
* |trans.| |
* | / \ | |
* |yyyy:Y|zzz| physical address
*
* When the page number is translated to the physical page address, the lowest
* bit(s) (X) that are part of the cache index are also translated (Y).
* If this translation changes bit(s) (X), the cache index is also afected,
* thus resulting in a different cache line than before.
* The kernel does not provide a mechanism to ensure that the page color
* (represented by this bit) remains the same when allocated or when pages
* are remapped. When user pages are mapped into kernel space, the color of
* the page might also change.
*
* We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
* to temporarily map a patch so we can match the color.
*/
#if DCACHE_WAY_SIZE > PAGE_SIZE
# define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT)
# define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1))
# define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0)
#else
# define DCACHE_ALIAS_ORDER 0
#endif
#if ICACHE_WAY_SIZE > PAGE_SIZE
# define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT)
# define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1))
# define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT)
# define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0)
#else
# define ICACHE_ALIAS_ORDER 0
#endif
#ifdef __ASSEMBLY__
......@@ -58,34 +111,23 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/*
* Pure 2^n version of get_order
* Use 'nsau' instructions if supported by the processor or the generic version.
*/
static inline int get_order(unsigned long size)
#if XCHAL_HAVE_NSA
static inline __attribute_const__ int get_order(unsigned long size)
{
int order;
#ifndef XCHAL_HAVE_NSU
unsigned long x1, x2, x4, x8, x16;
size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
x1 = size & 0xAAAAAAAA;
x2 = size & 0xCCCCCCCC;
x4 = size & 0xF0F0F0F0;
x8 = size & 0xFF00FF00;
x16 = size & 0xFFFF0000;
order = x2 ? 2 : 0;
order += (x16 != 0) * 16;
order += (x8 != 0) * 8;
order += (x4 != 0) * 4;
order += (x1 != 0);
return order;
#else
size = (size - 1) >> PAGE_SHIFT;
asm ("nsau %0, %1" : "=r" (order) : "r" (size));
return 32 - order;
#endif
int lz;
asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT));
return 32 - lz;
}
#else
# include <asm-generic/page.h>
#endif
struct page;
extern void clear_page(void *page);
......@@ -96,11 +138,11 @@ extern void copy_page(void *to, void *from);
* some extra work
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
#if DCACHE_WAY_SIZE > PAGE_SIZE
extern void clear_user_page(void*, unsigned long, struct page*);
extern void copy_user_page(void*, void*, unsigned long, struct page*);
#else
# define clear_user_page(page,vaddr,pg) clear_page(page)
# define clear_user_page(page, vaddr, pg) clear_page(page)
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#endif
......
/*
* linux/include/asm-xtensa/pgalloc.h
* include/asm-xtensa/pgalloc.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001-2005 Tensilica Inc.
* Copyright (C) 2001-2007 Tensilica Inc.
*/
#ifndef _XTENSA_PGALLOC_H
......@@ -13,103 +13,54 @@
#ifdef __KERNEL__
#include <linux/threads.h>
#include <linux/highmem.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
/* Cache aliasing:
*
* If the cache size for one way is greater than the page size, we have to
* deal with cache aliasing. The cache index is wider than the page size:
*
* |cache |
* |pgnum |page| virtual address
* |xxxxxX|zzzz|
* | | |
* \ / | |
* trans.| |
* / \ | |
* |yyyyyY|zzzz| physical address
*
* When the page number is translated to the physical page address, the lowest
* bit(s) (X) that are also part of the cache index are also translated (Y).
* If this translation changes this bit (X), the cache index is also afected,
* thus resulting in a different cache line than before.
* The kernel does not provide a mechanism to ensure that the page color
* (represented by this bit) remains the same when allocated or when pages
* are remapped. When user pages are mapped into kernel space, the color of
* the page might also change.
*
* We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
* to temporarily map a patch so we can match the color.
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1))
# define PAGE_COLOR(a) \
(((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT)
# define PAGE_COLOR_EQ(a,b) \
((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0)
# define PAGE_COLOR_MAP0(v) \
(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK))
# define PAGE_COLOR_MAP1(v) \
(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE)
#endif
/*
* Allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pgd_free(pgd) free_page((unsigned long)(pgd))
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
#define pmd_populate_kernel(mm, pmdp, ptep) \
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
{
pmd_val(*(pmdp)) = (unsigned long)(pte);
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page)
static inline void pgd_free(pgd_t *pgd)
{
pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page);
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
free_page((unsigned long)pgd);
}
/* Use a slab cache for the pte pages (see also sparc64 implementation) */
extern struct kmem_cache *pgtable_cache;
#else
# define pmd_populate_kernel(mm, pmdp, pte) \
(pmd_val(*(pmdp)) = (unsigned long)(pte))
# define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page))
#endif
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pgd_t *pgd;
pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER);
if (likely(pgd != NULL))
__flush_dcache_page((unsigned long)pgd);
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
}
return pgd;
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long addr)
{
return virt_to_page(pte_alloc_one_kernel(mm, addr));
}
extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr);
extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr);
static inline void pte_free_kernel(pte_t *pte)
{
kmem_cache_free(pgtable_cache, pte);
}
#define pte_free_kernel(pte) free_page((unsigned long)pte)
#define pte_free(pte) __free_page(pte)
static inline void pte_free(struct page *page)
{
kmem_cache_free(pgtable_cache, page_address(page));
}
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
This diff is collapsed.
......@@ -33,7 +33,7 @@
* the 1 GB requirement applies to the stack as well.
*/
#define TASK_SIZE 0x40000000
#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
/*
* General exception cause assigned to debug exceptions. Debug exceptions go
......
/*
* include/asm-xtensa/syscall.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
struct pt_regs;
struct sigaction;
asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
......@@ -17,4 +27,16 @@ asmlinkage long sys_rt_sigaction(int,
const struct sigaction __user *,
struct sigaction __user *,
size_t);
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
/* Should probably move to linux/syscalls.h */
struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec __user *tsp, void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec __user *tsp, const sigset_t __user *sigmask,
size_t sigsetsize);
......@@ -157,6 +157,7 @@ struct ktermios {
#define HUPCL 0002000
#define CLOCAL 0004000
#define CBAUDEX 0010000
#define BOTHER 0010000
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
......@@ -172,10 +173,12 @@ struct ktermios {
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
#define CIBAUD 002003600000 /* input baud rate (not used) */
#define CIBAUD 002003600000 /* input baud rate */
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
/* c_lflag bits */
#define ISIG 0000001
......
......@@ -95,8 +95,10 @@ struct termio {
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
})
#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */
......
......@@ -41,10 +41,10 @@
extern unsigned long ccount_per_jiffy;
extern unsigned long ccount_nsec;
#define CCOUNT_PER_JIFFY ccount_per_jiffy
#define CCOUNT_NSEC ccount_nsec
#define NSEC_PER_CCOUNT ccount_nsec
#else
#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
#define CCOUNT_NSEC (1000000000UL / CONFIG_XTENSA_CPU_CLOCK)
#define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK)
#endif
......
......@@ -11,14 +11,36 @@
#ifndef _XTENSA_TLB_H
#define _XTENSA_TLB_H
#define tlb_start_vma(tlb,vma) do { } while (0)
#define tlb_end_vma(tlb,vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
#include <asm/cache.h>
#include <asm/page.h>
#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
/* Note, read http://lkml.org/lkml/2004/1/15/6 */
# define tlb_start_vma(tlb,vma) do { } while (0)
# define tlb_end_vma(tlb,vma) do { } while (0)
#else
# define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
# define tlb_end_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while(0)
#endif
#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#include <asm/page.h>
#define __pte_free_tlb(tlb,pte) pte_free(pte)
......
......@@ -11,6 +11,15 @@
#ifndef _XTENSA_TYPES_H
#define _XTENSA_TYPES_H
#ifdef __ASSEMBLY__
# define __XTENSA_UL(x) (x)
# define __XTENSA_UL_CONST(x) x
#else
# define __XTENSA_UL(x) ((unsigned long)(x))
# define __XTENSA_UL_CONST(x) x##UL
#endif
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
......
......@@ -151,7 +151,7 @@ __SYSCALL( 61, sys_fcntl64, 3)
#define __NR_available62 62
__SYSCALL( 62, sys_ni_syscall, 0)
#define __NR_fadvise64_64 63
__SYSCALL( 63, sys_fadvise64_64, 6)
__SYSCALL( 63, xtensa_fadvise64_64, 6)
#define __NR_utime 64 /* glibc 2.3.3 ?? */
__SYSCALL( 64, sys_utime, 2)
#define __NR_utimes 65
......@@ -339,8 +339,8 @@ __SYSCALL(148, sys_setpgid, 2)
__SYSCALL(149, sys_getpgid, 1)
#define __NR_getppid 150
__SYSCALL(150, sys_getppid, 0)
#define __NR_available151 151
__SYSCALL(151, sys_ni_syscall, 0)
#define __NR_getpgrp 151
__SYSCALL(151, sys_getpgrp, 0)
#define __NR_reserved152 152 /* set_thread_area */
__SYSCALL(152, sys_ni_syscall, 0)
......@@ -577,7 +577,112 @@ __SYSCALL(258, sys_keyctl, 5)
#define __NR_available259 259
__SYSCALL(259, sys_ni_syscall, 0)
#define __NR_syscall_count 261
#define __NR_readahead 260
__SYSCALL(260, sys_readahead, 5)
#define __NR_remap_file_pages 261
__SYSCALL(261, sys_remap_file_pages, 5)
#define __NR_migrate_pages 262
__SYSCALL(262, sys_migrate_pages, 0)
#define __NR_mbind 263
__SYSCALL(263, sys_mbind, 6)
#define __NR_get_mempolicy 264
__SYSCALL(264, sys_get_mempolicy, 5)
#define __NR_set_mempolicy 265
__SYSCALL(265, sys_set_mempolicy, 3)
#define __NR_unshare 266
__SYSCALL(266, sys_unshare, 1)
#define __NR_move_pages 267
__SYSCALL(267, sys_move_pages, 0)
#define __NR_splice 268
__SYSCALL(268, sys_splice, 0)
#define __NR_tee 269
__SYSCALL(269, sys_tee, 0)
#define __NR_vmsplice 270
__SYSCALL(270, sys_vmsplice, 0)
#define __NR_available271 271
__SYSCALL(271, sys_ni_syscall, 0)
#define __NR_pselect6 272
__SYSCALL(272, sys_pselect6, 0)
#define __NR_ppoll 273
__SYSCALL(273, sys_ppoll, 0)
#define __NR_epoll_pwait 274
__SYSCALL(274, sys_epoll_pwait, 0)
#define __NR_available275 275
__SYSCALL(275, sys_ni_syscall, 0)
#define __NR_inotify_init 276
__SYSCALL(276, sys_inotify_init, 0)
#define __NR_inotify_add_watch 277
__SYSCALL(277, sys_inotify_add_watch, 3)
#define __NR_inotify_rm_watch 278
__SYSCALL(278, sys_inotify_rm_watch, 2)
#define __NR_available279 279
__SYSCALL(279, sys_ni_syscall, 0)
#define __NR_getcpu 280
__SYSCALL(280, sys_getcpu, 0)
#define __NR_kexec_load 281
__SYSCALL(281, sys_ni_syscall, 0)
#define __NR_ioprio_set 282
__SYSCALL(282, sys_ioprio_set, 2)
#define __NR_ioprio_get 283
__SYSCALL(283, sys_ioprio_get, 3)
#define __NR_set_robust_list 284
__SYSCALL(284, sys_set_robust_list, 3)
#define __NR_get_robust_list 285
__SYSCALL(285, sys_get_robust_list, 3)
#define __NR_reserved286 286 /* sync_file_rangeX */
__SYSCALL(286, sys_ni_syscall, 3)
#define __NR_available287 287
__SYSCALL(287, sys_faccessat, 0)
/* Relative File Operations */
#define __NR_openat 288
__SYSCALL(288, sys_openat, 4)
#define __NR_mkdirat 289
__SYSCALL(289, sys_mkdirat, 3)
#define __NR_mknodat 290
__SYSCALL(290, sys_mknodat, 4)
#define __NR_unlinkat 291
__SYSCALL(291, sys_unlinkat, 3)
#define __NR_renameat 292
__SYSCALL(292, sys_renameat, 4)
#define __NR_linkat 293
__SYSCALL(293, sys_linkat, 5)
#define __NR_symlinkat 294
__SYSCALL(294, sys_symlinkat, 3)
#define __NR_readlinkat 295
__SYSCALL(295, sys_readlinkat, 4)
#define __NR_utimensat 296
__SYSCALL(296, sys_utimensat, 0)
#define __NR_fchownat 297
__SYSCALL(297, sys_fchownat, 5)
#define __NR_futimesat 298
__SYSCALL(298, sys_futimesat, 4)
#define __NR_fstatat64 299
__SYSCALL(299, sys_fstatat64, 0)
#define __NR_fchmodat 300
__SYSCALL(300, sys_fchmodat, 4)
#define __NR_faccessat 301
__SYSCALL(301, sys_faccessat, 4)
#define __NR_available302 302
__SYSCALL(302, sys_ni_syscall, 0)
#define __NR_available303 303
__SYSCALL(303, sys_ni_syscall, 0)
#define __NR_signalfd 304
__SYSCALL(304, sys_signalfd, 3)
#define __NR_timerfd 305
__SYSCALL(305, sys_timerfd, 4)
#define __NR_eventfd 306
__SYSCALL(306, sys_eventfd, 1)
#define __NR_syscall_count 307
/*
* sysxtensa syscall handler
......@@ -612,8 +717,19 @@ __SYSCALL(259, sys_ni_syscall, 0)
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_GETPGRP
#endif /* __KERNEL__ */
/*
* Ignore legacy system calls in the checksyscalls.sh script
*/
#endif /* _XTENSA_UNISTD_H */
#define __IGNORE_fork /* use clone */
#define __IGNORE_time
#define __IGNORE_alarm /* use setitimer */
#define __IGNORE_pause
#define __IGNORE_mmap /* use mmap2 */
#define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */
#endif /* __KERNEL__ */
#endif /* _XTENSA_UNISTD_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment