Commit dbdf20db authored by Bernd Schmidt's avatar Bernd Schmidt Committed by Bryan Wu

Blackfin arch: Faster C implementation of no-MPU CPLB handler

This is a mixture ofcMichael McTernan's patch and the existing cplb-mpu code.

We ditch the old cplb-nompu implementation, which is a good example of
why a good algorithm in a HLL is preferrable to a bad algorithm written in
assembly.  Rather than try to construct a table of all posible CPLBs and
search it, we just create a (smaller) table of memory regions and
their attributes.  Some of the data structures are now unified for both
the mpu and nompu cases.  A lot of needless complexity in cplbinit.c is
removed.

Further optimizations:
  * compile cplbmgr.c with a lot of -ffixed-reg options, and omit saving
    these registers on the stack when entering a CPLB exception.
  * lose cli/nop/nop/sti sequences for some workarounds - these don't
  * make
    sense in an exception context

Additional code unification should be possible after this.

[Mike Frysinger <vapier.adi@gmail.com>:
 - convert CPP if statements to C if statements
 - remove redundant statements
 - use a do...while loop rather than a for loop to get slightly better
   optimization and to avoid gcc "may be used uninitialized" warnings ...
   we know that the [id]cplb_nr_bounds variables will never be 0, so this
   is OK
 - the no-mpu code was the last user of MAX_MEM_SIZE and with that rewritten,
   we can punt it
 - add some BUG_ON() checks to make sure we dont overflow the small
   cplb_bounds array
 - add i/d cplb entries for the bootrom because there is functions/data in
   there we want to access
 - we do not need a NULL trailing entry as any time we access the bounds
   arrays, we use the nr_bounds variable
]
Signed-off-by: default avatarMichael McTernan <mmcternan@airvana.com>
Signed-off-by: default avatarMike Frysinger <vapier.adi@gmail.com>
Signed-off-by: default avatarBernd Schmidt <bernds_cb1@t-online.de>
Signed-off-by: default avatarBryan Wu <cooloney@kernel.org>
parent 6651ece9
......@@ -524,14 +524,6 @@ config MEM_SDGCTL
default 0x0
endmenu
config MAX_MEM_SIZE
int "Max SDRAM Memory Size in MBytes"
depends on !MPU
default 512
help
This is the max memory size that the kernel will create CPLB
tables for. Your system will not be able to handle any more.
#
# Max & Min Speeds for various Chips
#
......
......@@ -357,3 +357,42 @@
SYSCFG = [sp++];
csync;
.endm
.macro save_context_cplb
[--sp] = (R7:0, P5:0);
[--sp] = fp;
[--sp] = a0.x;
[--sp] = a0.w;
[--sp] = a1.x;
[--sp] = a1.w;
[--sp] = LC0;
[--sp] = LC1;
[--sp] = LT0;
[--sp] = LT1;
[--sp] = LB0;
[--sp] = LB1;
[--sp] = RETS;
.endm
.macro restore_context_cplb
RETS = [sp++];
LB1 = [sp++];
LB0 = [sp++];
LT1 = [sp++];
LT0 = [sp++];
LC1 = [sp++];
LC0 = [sp++];
a1.w = [sp++];
a1.x = [sp++];
a0.w = [sp++];
a0.x = [sp++];
fp = [sp++];
(R7:0, P5:0) = [SP++];
.endm
/*
* File: include/asm-blackfin/cplbinit.h
* Based on:
* Author:
*
* Created:
* Description:
*
* Modified:
* Copyright 2004-2006 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __ASM_BFIN_CPLB_MPU_H
#define __ASM_BFIN_CPLB_MPU_H
#include <linux/threads.h>
struct cplb_entry {
unsigned long data, addr;
};
struct mem_region {
unsigned long start, end;
unsigned long dcplb_data;
unsigned long icplb_data;
};
extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
extern int first_switched_icplb;
extern int first_mask_dcplb;
extern int first_switched_dcplb;
extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[];
extern int nr_dcplb_prot[], nr_cplb_flush[];
extern int page_mask_order;
extern int page_mask_nelts;
extern unsigned long *current_rwx_mask[NR_CPUS];
extern void flush_switched_cplbs(unsigned int);
extern void set_mask_dcplbs(unsigned long *, unsigned int);
extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
#endif /* __ASM_BFIN_CPLB_MPU_H */
......@@ -116,4 +116,8 @@
#define CPLB_INOCACHE CPLB_USER_RD | CPLB_VALID
#define CPLB_IDOCACHE CPLB_INOCACHE | CPLB_L1_CHBL
#define FAULT_RW (1 << 16)
#define FAULT_USERSUPV (1 << 17)
#define FAULT_CPLBBITS 0x0000ffff
#endif /* _CPLB_H */
......@@ -32,96 +32,56 @@
#include <asm/blackfin.h>
#include <asm/cplb.h>
#include <linux/threads.h>
#ifdef CONFIG_MPU
#include <asm/cplb-mpu.h>
extern void bfin_icache_init(struct cplb_entry *icplb_tbl);
extern void bfin_dcache_init(struct cplb_entry *icplb_tbl);
#ifdef CONFIG_CPLB_SWITCH_TAB_L1
# define PDT_ATTR __attribute__((l1_data))
#else
# define PDT_ATTR
#endif
#define INITIAL_T 0x1
#define SWITCH_T 0x2
#define I_CPLB 0x4
#define D_CPLB 0x8
struct cplb_entry {
unsigned long data, addr;
};
#define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M)
struct cplb_boundary {
unsigned long eaddr; /* End of this region. */
unsigned long data; /* CPLB data value. */
};
#define CPLB_MEM CONFIG_MAX_MEM_SIZE
extern struct cplb_boundary dcplb_bounds[];
extern struct cplb_boundary icplb_bounds[];
extern int dcplb_nr_bounds, icplb_nr_bounds;
/*
* Number of required data CPLB switchtable entries
* MEMSIZE / 4 (we mostly install 4M page size CPLBs
* approx 16 for smaller 1MB page size CPLBs for allignment purposes
* 1 for L1 Data Memory
* possibly 1 for L2 Data Memory
* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
* 1 for ASYNC Memory
*/
#define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \
+ ASYNC_MEMORY_CPLB_COVERAGE) * 2)
extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
extern int first_switched_icplb;
extern int first_switched_dcplb;
/*
* Number of required instruction CPLB switchtable entries
* MEMSIZE / 4 (we mostly install 4M page size CPLBs
* approx 12 for smaller 1MB page size CPLBs for allignment purposes
* 1 for L1 Instruction Memory
* possibly 1 for L2 Instruction Memory
* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
*/
#define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2)
/* Number of CPLB table entries, used for cplb-nompu. */
#define CPLB_TBL_ENTRIES (16 * 4)
enum {
ZERO_P, L1I_MEM, L1D_MEM, L2_MEM, SDRAM_KERN, SDRAM_RAM_MTD, SDRAM_DMAZ,
RES_MEM, ASYNC_MEM, OCB_ROM
};
extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[];
extern int nr_dcplb_prot[], nr_cplb_flush[];
struct cplb_desc {
u32 start; /* start address */
u32 end; /* end address */
u32 psize; /* prefered size if any otherwise 1MB or 4MB*/
u16 attr;/* attributes */
u16 i_conf;/* I-CPLB DATA */
u16 d_conf;/* D-CPLB DATA */
u16 valid;/* valid */
const s8 name[30];/* name */
};
#ifdef CONFIG_MPU
struct cplb_tab {
u_long *tab;
u16 pos;
u16 size;
};
extern int first_mask_dcplb;
extern u_long icplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
extern u_long dcplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
extern int page_mask_order;
extern int page_mask_nelts;
/* Till here we are discussing about the static memory management model.
* However, the operating envoronments commonly define more CPLB
* descriptors to cover the entire addressable memory than will fit into
* the available on-chip 16 CPLB MMRs. When this happens, the below table
* will be used which will hold all the potentially required CPLB descriptors
*
* This is how Page descriptor Table is implemented in uClinux/Blackfin.
*/
extern unsigned long *current_rwx_mask[NR_CPUS];
extern u_long ipdt_tables[NR_CPUS][MAX_SWITCH_I_CPLBS+1];
extern u_long dpdt_tables[NR_CPUS][MAX_SWITCH_D_CPLBS+1];
#ifdef CONFIG_CPLB_INFO
extern u_long ipdt_swapcount_tables[NR_CPUS][MAX_SWITCH_I_CPLBS];
extern u_long dpdt_swapcount_tables[NR_CPUS][MAX_SWITCH_D_CPLBS];
#endif
extern void bfin_icache_init(u_long icplbs[]);
extern void bfin_dcache_init(u_long dcplbs[]);
extern void flush_switched_cplbs(unsigned int);
extern void set_mask_dcplbs(unsigned long *, unsigned int);
extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
#endif /* CONFIG_MPU */
extern void bfin_icache_init(struct cplb_entry *icplb_tbl);
extern void bfin_dcache_init(struct cplb_entry *icplb_tbl);
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
extern void generate_cplb_tables_all(void);
extern void generate_cplb_tables_cpu(unsigned int cpu);
#endif
#endif
......@@ -53,9 +53,11 @@
/* This one pushes RETI without using CLI. Interrupts are enabled. */
#define SAVE_CONTEXT_SYSCALL save_context_syscall
#define SAVE_CONTEXT save_context_with_interrupts
#define SAVE_CONTEXT_CPLB save_context_cplb
#define RESTORE_ALL_SYS restore_context_no_interrupts
#define RESTORE_CONTEXT restore_context_with_interrupts
#define RESTORE_CONTEXT_CPLB restore_context_cplb
#endif /* __ASSEMBLY__ */
#endif /* __BFIN_ENTRY_H */
......@@ -3,3 +3,8 @@
#
obj-y := cplbinit.o cacheinit.o cplbmgr.o
CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
-ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
-ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
-ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
......@@ -107,3 +107,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
while (i_i < MAX_CPLBS)
icplb_tbl[cpu][i_i++].data = 0;
}
void generate_cplb_tables_all(void)
{
}
......@@ -25,8 +25,13 @@
#include <asm/cplbinit.h>
#include <asm/mmu_context.h>
#define FAULT_RW (1 << 16)
#define FAULT_USERSUPV (1 << 17)
/*
* WARNING
*
* This file is compiled with certain -ffixed-reg options. We have to
* make sure not to call any functions here that could clobber these
* registers.
*/
int page_mask_nelts;
int page_mask_order;
......
......@@ -2,4 +2,9 @@
# arch/blackfin/kernel/cplb-nompu/Makefile
#
obj-y := cplbinit.o cacheinit.o cplbhdlr.o cplbmgr.o
obj-y := cplbinit.o cacheinit.o cplbmgr.o
CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
-ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
-ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
-ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
......@@ -25,19 +25,15 @@
#include <asm/cplbinit.h>
#if defined(CONFIG_BFIN_ICACHE)
void __cpuinit bfin_icache_init(u_long icplb[])
void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
{
unsigned long *table = icplb;
unsigned long ctrl;
int i;
SSYNC();
for (i = 0; i < MAX_CPLBS; i++) {
unsigned long addr = *table++;
unsigned long data = *table++;
if (addr == (unsigned long)-1)
break;
bfin_write32(ICPLB_ADDR0 + i * 4, addr);
bfin_write32(ICPLB_DATA0 + i * 4, data);
bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
}
ctrl = bfin_read_IMEM_CONTROL();
ctrl |= IMC | ENICPLB;
......@@ -47,24 +43,20 @@ void __cpuinit bfin_icache_init(u_long icplb[])
#endif
#if defined(CONFIG_BFIN_DCACHE)
void __cpuinit bfin_dcache_init(u_long dcplb[])
void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
{
unsigned long *table = dcplb;
unsigned long ctrl;
int i;
SSYNC();
for (i = 0; i < MAX_CPLBS; i++) {
unsigned long addr = *table++;
unsigned long data = *table++;
if (addr == (unsigned long)-1)
break;
bfin_write32(DCPLB_ADDR0 + i * 4, addr);
bfin_write32(DCPLB_DATA0 + i * 4, data);
bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
}
ctrl = bfin_read_DMEM_CONTROL();
ctrl |= DMEM_CNTR;
bfin_write_DMEM_CONTROL(ctrl);
SSYNC();
}
#endif
/*
* File: arch/blackfin/mach-common/cplbhdlr.S
* Based on:
* Author: LG Soft India
*
* Created: ?
* Description: CPLB exception handler
*
* Modified:
* Copyright 2004-2006 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/linkage.h>
#include <asm/cplb.h>
#include <asm/entry.h>
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
.section .l1.text
#else
.text
#endif
.type _cplb_mgr, STT_FUNC;
.type _panic_cplb_error, STT_FUNC;
.align 2
ENTRY(__cplb_hdr)
R2 = SEQSTAT;
/* Mask the contents of SEQSTAT and leave only EXCAUSE in R2 */
R2 <<= 26;
R2 >>= 26;
R1 = 0x23; /* Data access CPLB protection violation */
CC = R2 == R1;
IF !CC JUMP .Lnot_data_write;
R0 = 2; /* is a write to data space*/
JUMP .Lis_icplb_miss;
.Lnot_data_write:
R1 = 0x2C; /* CPLB miss on an instruction fetch */
CC = R2 == R1;
R0 = 0; /* is_data_miss == False*/
IF CC JUMP .Lis_icplb_miss;
R1 = 0x26;
CC = R2 == R1;
IF !CC JUMP .Lunknown;
R0 = 1; /* is_data_miss == True*/
.Lis_icplb_miss:
#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE)
# if defined(CONFIG_BFIN_ICACHE) && !defined(CONFIG_BFIN_DCACHE)
R1 = CPLB_ENABLE_ICACHE;
# endif
# if !defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
R1 = CPLB_ENABLE_DCACHE;
# endif
# if defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
R1 = CPLB_ENABLE_DCACHE | CPLB_ENABLE_ICACHE;
# endif
#else
R1 = 0;
#endif
[--SP] = RETS;
CALL _cplb_mgr;
RETS = [SP++];
CC = R0 == 0;
IF !CC JUMP .Lnot_replaced;
RTS;
/*
* Diagnostic exception handlers
*/
.Lunknown:
R0 = CPLB_UNKNOWN_ERR;
JUMP .Lcplb_error;
.Lnot_replaced:
CC = R0 == CPLB_NO_UNLOCKED;
IF !CC JUMP .Lnext_check;
R0 = CPLB_NO_UNLOCKED;
JUMP .Lcplb_error;
.Lnext_check:
CC = R0 == CPLB_NO_ADDR_MATCH;
IF !CC JUMP .Lnext_check2;
R0 = CPLB_NO_ADDR_MATCH;
JUMP .Lcplb_error;
.Lnext_check2:
CC = R0 == CPLB_PROT_VIOL;
IF !CC JUMP .Lstrange_return_from_cplb_mgr;
R0 = CPLB_PROT_VIOL;
JUMP .Lcplb_error;
.Lstrange_return_from_cplb_mgr:
IDLE;
CSYNC;
JUMP .Lstrange_return_from_cplb_mgr;
.Lcplb_error:
R1 = sp;
SP += -12;
call _panic_cplb_error;
SP += 12;
JUMP.L _handle_bad_cplb;
ENDPROC(__cplb_hdr)
......@@ -29,417 +29,143 @@
#include <asm/cplbinit.h>
#include <asm/mem_map.h>
u_long icplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
u_long dcplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1];
struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
#ifdef CONFIG_CPLB_SWITCH_TAB_L1
#define PDT_ATTR __attribute__((l1_data))
#else
#define PDT_ATTR
#endif
u_long ipdt_tables[NR_CPUS][MAX_SWITCH_I_CPLBS+1] PDT_ATTR;
u_long dpdt_tables[NR_CPUS][MAX_SWITCH_D_CPLBS+1] PDT_ATTR;
#ifdef CONFIG_CPLB_INFO
u_long ipdt_swapcount_tables[NR_CPUS][MAX_SWITCH_I_CPLBS] PDT_ATTR;
u_long dpdt_swapcount_tables[NR_CPUS][MAX_SWITCH_D_CPLBS] PDT_ATTR;
#endif
int first_switched_icplb PDT_ATTR;
int first_switched_dcplb PDT_ATTR;
struct s_cplb {
struct cplb_tab init_i;
struct cplb_tab init_d;
struct cplb_tab switch_i;
struct cplb_tab switch_d;
};
struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
struct cplb_boundary icplb_bounds[7] PDT_ATTR;
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
static struct cplb_desc cplb_data[] = {
{
.start = 0,
.end = SIZE_1K,
.psize = SIZE_1K,
.attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
.i_conf = SDRAM_OOPS,
.d_conf = SDRAM_OOPS,
#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
.valid = 1,
#else
.valid = 0,
#endif
.name = "Zero Pointer Guard Page",
},
{
.start = 0, /* dyanmic */
.end = 0, /* dynamic */
.psize = SIZE_4M,
.attr = INITIAL_T | SWITCH_T | I_CPLB,
.i_conf = L1_IMEMORY,
.d_conf = 0,
.valid = 1,
.name = "L1 I-Memory",
},
{
.start = 0, /* dynamic */
.end = 0, /* dynamic */
.psize = SIZE_4M,
.attr = INITIAL_T | SWITCH_T | D_CPLB,
.i_conf = 0,
.d_conf = L1_DMEMORY,
#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
.valid = 1,
#else
.valid = 0,
#endif
.name = "L1 D-Memory",
},
{
.start = L2_START,
.end = L2_START + L2_LENGTH,
.psize = SIZE_1M,
.attr = L2_ATTR,
.i_conf = L2_IMEMORY,
.d_conf = L2_DMEMORY,
.valid = (L2_LENGTH > 0),
.name = "L2 Memory",
},
{
.start = 0,
.end = 0, /* dynamic */
.psize = 0,
.attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
.i_conf = SDRAM_IGENERIC,
.d_conf = SDRAM_DGENERIC,
.valid = 1,
.name = "Kernel Memory",
},
{
.start = 0, /* dynamic */
.end = 0, /* dynamic */
.psize = 0,
.attr = INITIAL_T | SWITCH_T | D_CPLB,
.i_conf = SDRAM_IGENERIC,
.d_conf = SDRAM_DNON_CHBL,
.valid = 1,
.name = "uClinux MTD Memory",
},
{
.start = 0, /* dynamic */
.end = 0, /* dynamic */
.psize = SIZE_1M,
.attr = INITIAL_T | SWITCH_T | D_CPLB,
.d_conf = SDRAM_DNON_CHBL,
.valid = 1,
.name = "Uncached DMA Zone",
},
{
.start = 0, /* dynamic */
.end = 0, /* dynamic */
.psize = 0,
.attr = SWITCH_T | D_CPLB,
.i_conf = 0, /* dynamic */
.d_conf = 0, /* dynamic */
.valid = 1,
.name = "Reserved Memory",
},
{
.start = ASYNC_BANK0_BASE,
.end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
.psize = 0,
.attr = SWITCH_T | D_CPLB,
.d_conf = SDRAM_EBIU,
.valid = 1,
.name = "Asynchronous Memory Banks",
},
{
.start = BOOT_ROM_START,
.end = BOOT_ROM_START + BOOT_ROM_LENGTH,
.psize = SIZE_1M,
.attr = SWITCH_T | I_CPLB | D_CPLB,
.i_conf = SDRAM_IGENERIC,
.d_conf = SDRAM_DGENERIC,
.valid = 1,
.name = "On-Chip BootROM",
},
};
int icplb_nr_bounds PDT_ATTR;
int dcplb_nr_bounds PDT_ATTR;
static bool __init lock_kernel_check(u32 start, u32 end)
void __init generate_cplb_tables_cpu(unsigned int cpu)
{
if (start >= (u32)__init_begin || end <= (u32)_stext)
return false;
/* This cplb block overlapped with kernel area. */
return true;
}
int i_d, i_i;
unsigned long addr;
static void __init
fill_cplbtab(struct cplb_tab *table,
unsigned long start, unsigned long end,
unsigned long block_size, unsigned long cplb_data)
{
int i;
struct cplb_entry *d_tbl = dcplb_tbl[cpu];
struct cplb_entry *i_tbl = icplb_tbl[cpu];
switch (block_size) {
case SIZE_4M:
i = 3;
break;
case SIZE_1M:
i = 2;
break;
case SIZE_4K:
i = 1;
break;
case SIZE_1K:
default:
i = 0;
break;
}
printk(KERN_INFO "NOMPU: setting up cplb tables\n");
cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
i_d = i_i = 0;
while ((start < end) && (table->pos < table->size)) {
/* Set up the zero page. */
d_tbl[i_d].addr = 0;
d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
table->tab[table->pos++] = start;
/* Cover kernel memory with 4M pages. */
addr = 0;
if (lock_kernel_check(start, start + block_size))
table->tab[table->pos++] =
cplb_data | CPLB_LOCK | CPLB_DIRTY;
else
table->tab[table->pos++] = cplb_data;
for (; addr < memory_start; addr += 4 * 1024 * 1024) {
d_tbl[i_d].addr = addr;
d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
i_tbl[i_i].addr = addr;
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
}
start += block_size;
/* Cover L1 memory. One 4M area for code and data each is enough. */
if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
d_tbl[i_d].addr = L1_DATA_A_START;
d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
}
}
i_tbl[i_i].addr = L1_CODE_START;
i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
static void __init close_cplbtab(struct cplb_tab *table)
{
while (table->pos < table->size)
table->tab[table->pos++] = 0;
}
first_switched_dcplb = i_d;
first_switched_icplb = i_i;
/* helper function */
static void __init
__fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
{
if (cplb_data[i].psize) {
fill_cplbtab(t,
cplb_data[i].start,
cplb_data[i].end,
cplb_data[i].psize,
cplb_data[i].i_conf);
} else {
#if defined(CONFIG_BFIN_ICACHE)
if (ANOMALY_05000263 && i == SDRAM_KERN) {
fill_cplbtab(t,
cplb_data[i].start,
cplb_data[i].end,
SIZE_4M,
cplb_data[i].i_conf);
} else
#endif
{
fill_cplbtab(t,
cplb_data[i].start,
a_start,
SIZE_1M,
cplb_data[i].i_conf);
fill_cplbtab(t,
a_start,
a_end,
SIZE_4M,
cplb_data[i].i_conf);
fill_cplbtab(t, a_end,
cplb_data[i].end,
SIZE_1M,
cplb_data[i].i_conf);
}
}
}
BUG_ON(first_switched_dcplb > MAX_CPLBS);
BUG_ON(first_switched_icplb > MAX_CPLBS);
static void __init
__fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
{
if (cplb_data[i].psize) {
fill_cplbtab(t,
cplb_data[i].start,
cplb_data[i].end,
cplb_data[i].psize,
cplb_data[i].d_conf);
} else {
fill_cplbtab(t,
cplb_data[i].start,
a_start, SIZE_1M,
cplb_data[i].d_conf);
fill_cplbtab(t, a_start,
a_end, SIZE_4M,
cplb_data[i].d_conf);
fill_cplbtab(t, a_end,
cplb_data[i].end,
SIZE_1M,
cplb_data[i].d_conf);
}
while (i_d < MAX_CPLBS)
d_tbl[i_d++].data = 0;
while (i_i < MAX_CPLBS)
i_tbl[i_i++].data = 0;
}
void __init generate_cplb_tables_cpu(unsigned int cpu)
void __init generate_cplb_tables_all(void)
{
int i_d, i_i;
u16 i, j, process;
u32 a_start, a_end, as, ae, as_1m;
struct cplb_tab *t_i = NULL;
struct cplb_tab *t_d = NULL;
struct s_cplb cplb;
printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n");
cplb.init_i.size = CPLB_TBL_ENTRIES;
cplb.init_d.size = CPLB_TBL_ENTRIES;
cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
cplb.init_i.pos = 0;
cplb.init_d.pos = 0;
cplb.switch_i.pos = 0;
cplb.switch_d.pos = 0;
cplb.init_i.tab = icplb_tables[cpu];
cplb.init_d.tab = dcplb_tables[cpu];
cplb.switch_i.tab = ipdt_tables[cpu];
cplb.switch_d.tab = dpdt_tables[cpu];
cplb_data[L1I_MEM].start = get_l1_code_start_cpu(cpu);
cplb_data[L1I_MEM].end = cplb_data[L1I_MEM].start + L1_CODE_LENGTH;
cplb_data[L1D_MEM].start = get_l1_data_a_start_cpu(cpu);
cplb_data[L1D_MEM].end = get_l1_data_b_start_cpu(cpu) + L1_DATA_B_LENGTH;
cplb_data[SDRAM_KERN].end = memory_end;
i_d = 0;
/* Normal RAM, including MTD FS. */
#ifdef CONFIG_MTD_UCLINUX
cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start;
cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
# if defined(CONFIG_ROMFS_FS)
cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
/*
* The ROMFS_FS size is often not multiple of 1MB.
* This can cause multiple CPLB sets covering the same memory area.
* This will then cause multiple CPLB hit exceptions.
* Workaround: We ensure a contiguous memory area by extending the kernel
* memory section over the mtd section.
* For ROMFS_FS memory must be covered with ICPLBs anyways.
* So there is no difference between kernel and mtd memory setup.
*/
cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
cplb_data[SDRAM_RAM_MTD].valid = 0;
# endif
dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size;
#else
cplb_data[SDRAM_RAM_MTD].valid = 0;
dcplb_bounds[i_d].eaddr = memory_end;
#endif
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
/* DMA uncached region. */
if (DMA_UNCACHED_REGION) {
dcplb_bounds[i_d].eaddr = _ramend;
dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
}
if (_ramend != physical_mem_end) {
/* Reserved memory. */
dcplb_bounds[i_d].eaddr = physical_mem_end;
dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
SDRAM_DGENERIC : SDRAM_DNON_CHBL);
}
/* Addressing hole up to the async bank. */
dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
dcplb_bounds[i_d++].data = 0;
/* ASYNC banks. */
dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
dcplb_bounds[i_d++].data = SDRAM_EBIU;
/* Addressing hole up to BootROM. */
dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
dcplb_bounds[i_d++].data = 0;
/* BootROM -- largest one should be less than 1 meg. */
dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
if (L2_LENGTH) {
/* Addressing hole up to L2 SRAM. */
dcplb_bounds[i_d].eaddr = L2_START;
dcplb_bounds[i_d++].data = 0;
/* L2 SRAM. */
dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
dcplb_bounds[i_d++].data = L2_DMEMORY;
}
dcplb_nr_bounds = i_d;
BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION;
cplb_data[SDRAM_DMAZ].end = _ramend;
cplb_data[RES_MEM].start = _ramend;
cplb_data[RES_MEM].end = physical_mem_end;
if (reserved_mem_dcache_on)
cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
else
cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
if (reserved_mem_icache_on)
cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
else
cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) {
if (!cplb_data[i].valid)
continue;
as_1m = cplb_data[i].start % SIZE_1M;
/* We need to make sure all sections are properly 1M aligned
* However between Kernel Memory and the Kernel mtd section, depending on the
* rootfs size, there can be overlapping memory areas.
*/
if (as_1m && i != L1I_MEM && i != L1D_MEM) {
i_i = 0;
/* Normal RAM, including MTD FS. */
#ifdef CONFIG_MTD_UCLINUX
if (i == SDRAM_RAM_MTD) {
if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start)
cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M;
else
cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M));
} else
icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
#else
icplb_bounds[i_i].eaddr = memory_end;
#endif
printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n",
cplb_data[i].name, cplb_data[i].start);
}
as = cplb_data[i].start % SIZE_4M;
ae = cplb_data[i].end % SIZE_4M;
if (as)
a_start = cplb_data[i].start + (SIZE_4M - (as));
else
a_start = cplb_data[i].start;
a_end = cplb_data[i].end - ae;
for (j = INITIAL_T; j <= SWITCH_T; j++) {
switch (j) {
case INITIAL_T:
if (cplb_data[i].attr & INITIAL_T) {
t_i = &cplb.init_i;
t_d = &cplb.init_d;
process = 1;
} else
process = 0;
break;
case SWITCH_T:
if (cplb_data[i].attr & SWITCH_T) {
t_i = &cplb.switch_i;
t_d = &cplb.switch_d;
process = 1;
} else
process = 0;
break;
default:
process = 0;
break;
}
if (!process)
continue;
if (cplb_data[i].attr & I_CPLB)
__fill_code_cplbtab(t_i, i, a_start, a_end);
if (cplb_data[i].attr & D_CPLB)
__fill_data_cplbtab(t_d, i, a_start, a_end);
}
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
/* DMA uncached region. */
if (DMA_UNCACHED_REGION) {
icplb_bounds[i_i].eaddr = _ramend;
icplb_bounds[i_i++].data = 0;
}
/* make sure we locked the kernel start */
BUG_ON(cplb.init_i.pos < 2 + cplb_data[ZERO_P].valid);
BUG_ON(cplb.init_d.pos < 1 + cplb_data[ZERO_P].valid + cplb_data[L1D_MEM].valid);
/* make sure we didnt overflow the table */
BUG_ON(cplb.init_i.size < cplb.init_i.pos);
BUG_ON(cplb.init_d.size < cplb.init_d.pos);
BUG_ON(cplb.switch_i.size < cplb.switch_i.pos);
BUG_ON(cplb.switch_d.size < cplb.switch_d.pos);
/* close tables */
close_cplbtab(&cplb.init_i);
close_cplbtab(&cplb.init_d);
cplb.init_i.tab[cplb.init_i.pos] = -1;
cplb.init_d.tab[cplb.init_d.pos] = -1;
cplb.switch_i.tab[cplb.switch_i.pos] = -1;
cplb.switch_d.tab[cplb.switch_d.pos] = -1;
if (_ramend != physical_mem_end) {
/* Reserved memory. */
icplb_bounds[i_i].eaddr = physical_mem_end;
icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
SDRAM_IGENERIC : SDRAM_INON_CHBL);
}
/* Addressing hole up to BootROM. */
icplb_bounds[i_i].eaddr = BOOT_ROM_START;
icplb_bounds[i_i++].data = 0;
/* BootROM -- largest one should be less than 1 meg. */
icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
if (L2_LENGTH) {
/* Addressing hole up to L2 SRAM, including the async bank. */
icplb_bounds[i_i].eaddr = L2_START;
icplb_bounds[i_i++].data = 0;
/* L2 SRAM. */
icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
icplb_bounds[i_i++].data = L2_IMEMORY;
}
icplb_nr_bounds = i_i;
BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
}
#endif
/*
* File: arch/blackfin/mach-common/cplbmgtr.S
* Based on:
* Author: LG Soft India
*
* Created: ?
* Description: CPLB replacement routine for CPLB mismatch
*
* Modified:
* Copyright 2004-2006 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Usage: int _cplb_mgr(is_data_miss,int enable_cache)
* is_data_miss==2 => Mark as Dirty, write to the clean data page
* is_data_miss==1 => Replace a data CPLB.
* is_data_miss==0 => Replace an instruction CPLB.
*
* Returns:
* CPLB_RELOADED => Successfully updated CPLB table.
* CPLB_NO_UNLOCKED => All CPLBs are locked, so cannot be evicted.
* This indicates that the CPLBs in the configuration
* tablei are badly configured, as this should never
* occur.
* CPLB_NO_ADDR_MATCH => The address being accessed, that triggered the
* exception, is not covered by any of the CPLBs in
* the configuration table. The application is
* presumably misbehaving.
* CPLB_PROT_VIOL => The address being accessed, that triggered the
* exception, was not a first-write to a clean Write
* Back Data page, and so presumably is a genuine
* violation of the page's protection attributes.
* The application is misbehaving.
*/
#include <linux/linkage.h>
#include <asm/blackfin.h>
#include <asm/cplb.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
.section .l1.text
#else
.text
#endif
.align 2;
ENTRY(_cplb_mgr)
[--SP]=( R7:4,P5:3 );
CC = R0 == 2;
IF CC JUMP .Ldcplb_write;
CC = R0 == 0;
IF !CC JUMP .Ldcplb_miss_compare;
/* ICPLB Miss Exception. We need to choose one of the
* currently-installed CPLBs, and replace it with one
* from the configuration table.
*/
/* A multi-word instruction can cross a page boundary. This means the
* first part of the instruction can be in a valid page, but the
* second part is not, and hence generates the instruction miss.
* However, the fault address is for the start of the instruction,
* not the part that's in the bad page. Therefore, we have to check
* whether the fault address applies to a page that is already present
* in the table.
*/
P4.L = LO(ICPLB_FAULT_ADDR);
P4.H = HI(ICPLB_FAULT_ADDR);
P1 = 16;
P5.L = _page_size_table;
P5.H = _page_size_table;
P0.L = LO(ICPLB_DATA0);
P0.H = HI(ICPLB_DATA0);
R4 = [P4]; /* Get faulting address*/
R6 = 64; /* Advance past the fault address, which*/
R6 = R6 + R4; /* we'll use if we find a match*/
R3 = ((16 << 8) | 2); /* Extract mask, two bits at posn 16 */
R5 = 0;
.Lisearch:
R1 = [P0-0x100]; /* Address for this CPLB */
R0 = [P0++]; /* Info for this CPLB*/
CC = BITTST(R0,0); /* Is the CPLB valid?*/
IF !CC JUMP .Lnomatch; /* Skip it, if not.*/
CC = R4 < R1(IU); /* If fault address less than page start*/
IF CC JUMP .Lnomatch; /* then skip this one.*/
R2 = EXTRACT(R0,R3.L) (Z); /* Get page size*/
P1 = R2;
P1 = P5 + (P1<<2); /* index into page-size table*/
R2 = [P1]; /* Get the page size*/
R1 = R1 + R2; /* and add to page start, to get page end*/
CC = R4 < R1(IU); /* and see whether fault addr is in page.*/
IF !CC R4 = R6; /* If so, advance the address and finish loop.*/
IF !CC JUMP .Lisearch_done;
.Lnomatch:
/* Go around again*/
R5 += 1;
CC = BITTST(R5, 4); /* i.e CC = R5 >= 16*/
IF !CC JUMP .Lisearch;
.Lisearch_done:
I0 = R4; /* Fault address we'll search for*/
/* set up pointers */
P0.L = LO(ICPLB_DATA0);
P0.H = HI(ICPLB_DATA0);
/* The replacement procedure for ICPLBs */
P4.L = LO(IMEM_CONTROL);
P4.H = HI(IMEM_CONTROL);
/* Turn off CPLBs while we work, necessary according to HRM before
* modifying CPLB descriptors
*/
R5 = [P4]; /* Control Register*/
BITCLR(R5,ENICPLB_P);
CLI R1;
SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
.align 8;
[P4] = R5;
SSYNC;
STI R1;
R1 = -1; /* end point comparison */
R3 = 16; /* counter */
/* Search through CPLBs for first non-locked entry */
/* Overwrite it by moving everyone else up by 1 */
.Licheck_lock:
R0 = [P0++];
R3 = R3 + R1;
CC = R3 == R1;
IF CC JUMP .Lall_locked;
CC = BITTST(R0, 0); /* an invalid entry is good */
IF !CC JUMP .Lifound_victim;
CC = BITTST(R0,1); /* but a locked entry isn't */
IF CC JUMP .Licheck_lock;
.Lifound_victim:
#ifdef CONFIG_CPLB_INFO
R7 = [P0 - 0x104];
GET_PDA(P2, R2);
P3 = [P2 + PDA_IPDT_SWAPCOUNT];
P2 = [P2 + PDA_IPDT];
P3 += -4;
.Licount:
R2 = [P2]; /* address from config table */
P2 += 8;
P3 += 8;
CC = R2==-1;
IF CC JUMP .Licount_done;
CC = R7==R2;
IF !CC JUMP .Licount;
R7 = [P3];
R7 += 1;
[P3] = R7;
CSYNC;
.Licount_done:
#endif
LC0=R3;
LSETUP(.Lis_move,.Lie_move) LC0;
.Lis_move:
R0 = [P0];
[P0 - 4] = R0;
R0 = [P0 - 0x100];
[P0-0x104] = R0;
.Lie_move:
P0+=4;
/* Clear ICPLB_DATA15, in case we don't find a replacement
* otherwise, we would have a duplicate entry, and will crash
*/
R0 = 0;
[P0 - 4] = R0;
/* We've made space in the ICPLB table, so that ICPLB15
* is now free to be overwritten. Next, we have to determine
* which CPLB we need to install, from the configuration
* table. This is a matter of getting the start-of-page
* addresses and page-lengths from the config table, and
* determining whether the fault address falls within that
* range.
*/
GET_PDA(P3, R0);
P2 = [P3 + PDA_IPDT];
#ifdef CONFIG_CPLB_INFO
P3 = [P3 + PDA_IPDT_SWAPCOUNT];
P3 += -8;
#endif
P0.L = _page_size_table;
P0.H = _page_size_table;
/* Retrieve our fault address (which may have been advanced
* because the faulting instruction crossed a page boundary).
*/
R0 = I0;
/* An extraction pattern, to get the page-size bits from
* the CPLB data entry. Bits 16-17, so two bits at posn 16.
*/
R1 = ((16<<8)|2);
.Linext: R4 = [P2++]; /* address from config table */
R2 = [P2++]; /* data from config table */
#ifdef CONFIG_CPLB_INFO
P3 += 8;
#endif
CC = R4 == -1; /* End of config table*/
IF CC JUMP .Lno_page_in_table;
/* See if failed address > start address */
CC = R4 <= R0(IU);
IF !CC JUMP .Linext;
/* extract page size (17:16)*/
R3 = EXTRACT(R2, R1.L) (Z);
/* add page size to addr to get range */
P5 = R3;
P5 = P0 + (P5 << 2); /* scaled, for int access*/
R3 = [P5];
R3 = R3 + R4;
/* See if failed address < (start address + page size) */
CC = R0 < R3(IU);
IF !CC JUMP .Linext;
/* We've found a CPLB in the config table that covers
* the faulting address, so install this CPLB into the
* last entry of the table.
*/
P1.L = LO(ICPLB_DATA15); /* ICPLB_DATA15 */
P1.H = HI(ICPLB_DATA15);
[P1] = R2;
[P1-0x100] = R4;
#ifdef CONFIG_CPLB_INFO
R3 = [P3];
R3 += 1;
[P3] = R3;
#endif
/* P4 points to IMEM_CONTROL, and R5 contains its old
* value, after we disabled ICPLBS. Re-enable them.
*/
BITSET(R5,ENICPLB_P);
CLI R2;
SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
.align 8;
[P4] = R5;
SSYNC;
STI R2;
( R7:4,P5:3 ) = [SP++];
R0 = CPLB_RELOADED;
RTS;
/* FAILED CASES*/
.Lno_page_in_table:
R0 = CPLB_NO_ADDR_MATCH;
JUMP .Lfail_ret;
.Lall_locked:
R0 = CPLB_NO_UNLOCKED;
JUMP .Lfail_ret;
.Lprot_violation:
R0 = CPLB_PROT_VIOL;
.Lfail_ret:
/* Make sure we turn protection/cache back on, even in the failing case */
BITSET(R5,ENICPLB_P);
CLI R2;
SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
.align 8;
[P4] = R5;
SSYNC;
STI R2;
( R7:4,P5:3 ) = [SP++];
RTS;
.Ldcplb_write:
/* if a DCPLB is marked as write-back (CPLB_WT==0), and
* it is clean (CPLB_DIRTY==0), then a write to the
* CPLB's page triggers a protection violation. We have to
* mark the CPLB as dirty, to indicate that there are
* pending writes associated with the CPLB.
*/
P4.L = LO(DCPLB_STATUS);
P4.H = HI(DCPLB_STATUS);
P3.L = LO(DCPLB_DATA0);
P3.H = HI(DCPLB_DATA0);
R5 = [P4];
/* A protection violation can be caused by more than just writes
* to a clean WB page, so we have to ensure that:
* - It's a write
* - to a clean WB page
* - and is allowed in the mode the access occurred.
*/
CC = BITTST(R5, 16); /* ensure it was a write*/
IF !CC JUMP .Lprot_violation;
/* to check the rest, we have to retrieve the DCPLB.*/
/* The low half of DCPLB_STATUS is a bit mask*/
R2 = R5.L (Z); /* indicating which CPLB triggered the event.*/
R3 = 30; /* so we can use this to determine the offset*/
R2.L = SIGNBITS R2;
R2 = R2.L (Z); /* into the DCPLB table.*/
R3 = R3 - R2;
P4 = R3;
P3 = P3 + (P4<<2);
R3 = [P3]; /* Retrieve the CPLB*/
/* Now we can check whether it's a clean WB page*/
CC = BITTST(R3, 14); /* 0==WB, 1==WT*/
IF CC JUMP .Lprot_violation;
CC = BITTST(R3, 7); /* 0 == clean, 1 == dirty*/
IF CC JUMP .Lprot_violation;
/* Check whether the write is allowed in the mode that was active.*/
R2 = 1<<3; /* checking write in user mode*/
CC = BITTST(R5, 17); /* 0==was user, 1==was super*/
R5 = CC;
R2 <<= R5; /* if was super, check write in super mode*/
R2 = R3 & R2;
CC = R2 == 0;
IF CC JUMP .Lprot_violation;
/* It's a genuine write-to-clean-page.*/
BITSET(R3, 7); /* mark as dirty*/
[P3] = R3; /* and write back.*/
NOP;
CSYNC;
( R7:4,P5:3 ) = [SP++];
R0 = CPLB_RELOADED;
RTS;
.Ldcplb_miss_compare:
/* Data CPLB Miss event. We need to choose a CPLB to
* evict, and then locate a new CPLB to install from the
* config table, that covers the faulting address.
*/
P1.L = LO(DCPLB_DATA15);
P1.H = HI(DCPLB_DATA15);
P4.L = LO(DCPLB_FAULT_ADDR);
P4.H = HI(DCPLB_FAULT_ADDR);
R4 = [P4];
I0 = R4;
/* The replacement procedure for DCPLBs*/
R6 = R1; /* Save for later*/
/* Turn off CPLBs while we work.*/
P4.L = LO(DMEM_CONTROL);
P4.H = HI(DMEM_CONTROL);
R5 = [P4];
BITCLR(R5,ENDCPLB_P);
CLI R0;
SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
.align 8;
[P4] = R5;
SSYNC;
STI R0;
/* Start looking for a CPLB to evict. Our order of preference
* is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs
* are no good.
*/
I1.L = LO(DCPLB_DATA0);
I1.H = HI(DCPLB_DATA0);
P1 = 2;
P2 = 16;
I2.L = _dcplb_preference;
I2.H = _dcplb_preference;
LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1;
.Lsdsearch1:
R0 = [I2++]; /* Get the bits we're interested in*/
P0 = I1; /* Go back to start of table*/
LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2;
.Lsdsearch2:
R1 = [P0++]; /* Fetch each installed CPLB in turn*/
R2 = R1 & R0; /* and test for interesting bits.*/
CC = R2 == 0; /* If none are set, it'll do.*/
IF !CC JUMP .Lskip_stack_check;
R2 = [P0 - 0x104]; /* R2 - PageStart */
P3.L = _page_size_table; /* retrieve end address */
P3.H = _page_size_table; /* retrieve end address */
R3 = 0x1002; /* 16th - position, 2 bits -length */
#if ANOMALY_05000209
nop; /* Anomaly 05000209 */
#endif
R7 = EXTRACT(R1,R3.l);
R7 = R7 << 2; /* Page size index offset */
P5 = R7;
P3 = P3 + P5;
R7 = [P3]; /* page size in bytes */
R7 = R2 + R7; /* R7 - PageEnd */
R4 = SP; /* Test SP is in range */
CC = R7 < R4; /* if PageEnd < SP */
IF CC JUMP .Ldfound_victim;
R3 = 0x284; /* stack length from start of trap till
* the point.
* 20 stack locations for future modifications
*/
R4 = R4 + R3;
CC = R4 < R2; /* if SP + stacklen < PageStart */
IF CC JUMP .Ldfound_victim;
.Lskip_stack_check:
.Ledsearch2: NOP;
.Ledsearch1: NOP;
/* If we got here, we didn't find a DCPLB we considered
* replacable, which means all of them were locked.
*/
JUMP .Lall_locked;
.Ldfound_victim:
#ifdef CONFIG_CPLB_INFO
R7 = [P0 - 0x104];
GET_PDA(P2, R2);
P3 = [P2 + PDA_DPDT_SWAPCOUNT];
P2 = [P2 + PDA_DPDT];
P3 += -4;
.Ldicount:
R2 = [P2];
P2 += 8;
P3 += 8;
CC = R2==-1;
IF CC JUMP .Ldicount_done;
CC = R7==R2;
IF !CC JUMP .Ldicount;
R7 = [P3];
R7 += 1;
[P3] = R7;
.Ldicount_done:
#endif
/* Clean down the hardware loops*/
R2 = 0;
LC1 = R2;
LC0 = R2;
/* There's a suitable victim in [P0-4] (because we've
* advanced already).
*/
.LDdoverwrite:
/* [P0-4] is a suitable victim CPLB, so we want to
* overwrite it by moving all the following CPLBs
* one space closer to the start.
*/
R1.L = LO(DCPLB_DATA16); /* DCPLB_DATA15 + 4 */
R1.H = HI(DCPLB_DATA16);
R0 = P0;
/* If the victim happens to be in DCPLB15,
* we don't need to move anything.
*/
CC = R1 == R0;
IF CC JUMP .Lde_moved;
R1 = R1 - R0;
R1 >>= 2;
P1 = R1;
LSETUP(.Lds_move, .Lde_move) LC0=P1;
.Lds_move:
R0 = [P0++]; /* move data */
[P0 - 8] = R0;
R0 = [P0-0x104] /* move address */
.Lde_move:
[P0-0x108] = R0;
.Lde_moved:
NOP;
/* Clear DCPLB_DATA15, in case we don't find a replacement
* otherwise, we would have a duplicate entry, and will crash
*/
R0 = 0;
[P0 - 0x4] = R0;
/* We've now made space in DCPLB15 for the new CPLB to be
* installed. The next stage is to locate a CPLB in the
* config table that covers the faulting address.
*/
R0 = I0; /* Our faulting address */
GET_PDA(P3, R1);
P2 = [P3 + PDA_DPDT];
#ifdef CONFIG_CPLB_INFO
P3 = [P3 + PDA_DPDT_SWAPCOUNT];
P3 += -8;
#endif
P1.L = _page_size_table;
P1.H = _page_size_table;
/* An extraction pattern, to retrieve bits 17:16.*/
R1 = (16<<8)|2;
.Ldnext: R4 = [P2++]; /* address */
R2 = [P2++]; /* data */
#ifdef CONFIG_CPLB_INFO
P3 += 8;
#endif
CC = R4 == -1;
IF CC JUMP .Lno_page_in_table;
/* See if failed address > start address */
CC = R4 <= R0(IU);
IF !CC JUMP .Ldnext;
/* extract page size (17:16)*/
R3 = EXTRACT(R2, R1.L) (Z);
/* add page size to addr to get range */
P5 = R3;
P5 = P1 + (P5 << 2);
R3 = [P5];
R3 = R3 + R4;
/* See if failed address < (start address + page size) */
CC = R0 < R3(IU);
IF !CC JUMP .Ldnext;
/* We've found the CPLB that should be installed, so
* write it into CPLB15, masking off any caching bits
* if necessary.
*/
P1.L = LO(DCPLB_DATA15);
P1.H = HI(DCPLB_DATA15);
/* If the DCPLB has cache bits set, but caching hasn't
* been enabled, then we want to mask off the cache-in-L1
* bit before installing. Moreover, if caching is off, we
* also want to ensure that the DCPLB has WT mode set, rather
* than WB, since WB pages still trigger first-write exceptions
* even when not caching is off, and the page isn't marked as
* cachable. Finally, we could mark the page as clean, not dirty,
* but we choose to leave that decision to the user; if the user
* chooses to have a CPLB pre-defined as dirty, then they always
* pay the cost of flushing during eviction, but don't pay the
* cost of first-write exceptions to mark the page as dirty.
*/
#ifdef CONFIG_BFIN_WT
BITSET(R6, 14); /* Set WT*/
#endif
[P1] = R2;
[P1-0x100] = R4;
#ifdef CONFIG_CPLB_INFO
R3 = [P3];
R3 += 1;
[P3] = R3;
#endif
/* We've installed the CPLB, so re-enable CPLBs. P4
* points to DMEM_CONTROL, and R5 is the value we
* last wrote to it, when we were disabling CPLBs.
*/
BITSET(R5,ENDCPLB_P);
CLI R2;
.align 8;
[P4] = R5;
SSYNC;
STI R2;
( R7:4,P5:3 ) = [SP++];
R0 = CPLB_RELOADED;
RTS;
ENDPROC(_cplb_mgr)
#ifdef CONFIG_CPLB_SWITCH_TAB_L1
.section .l1.data
#else
.data
#endif
ENTRY(_page_size_table)
.byte4 0x00000400; /* 1K */
.byte4 0x00001000; /* 4K */
.byte4 0x00100000; /* 1M */
.byte4 0x00400000; /* 4M */
END(_page_size_table)
ENTRY(_dcplb_preference)
.byte4 0x00000001; /* valid bit */
.byte4 0x00000002; /* lock bit */
END(_dcplb_preference)
/*
* File: arch/blackfin/kernel/cplb-nompu-c/cplbmgr.c
* Based on: arch/blackfin/kernel/cplb-mpu/cplbmgr.c
* Author: Michael McTernan <mmcternan@airvana.com>
*
* Created: 01Nov2008
* Description: CPLB miss handler.
*
* Modified:
* Copyright 2008 Airvana Inc.
* Copyright 2004-2007 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <asm/blackfin.h>
#include <asm/cplbinit.h>
#include <asm/cplb.h>
#include <asm/mmu_context.h>
/*
* WARNING
*
* This file is compiled with certain -ffixed-reg options. We have to
* make sure not to call any functions here that could clobber these
* registers.
*/
int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
int nr_dcplb_supv_miss[NR_CPUS], nr_icplb_supv_miss[NR_CPUS];
int nr_cplb_flush[NR_CPUS], nr_dcplb_prot[NR_CPUS];
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
#define MGR_ATTR __attribute__((l1_text))
#else
#define MGR_ATTR
#endif
/*
* We're in an exception handler. The normal cli nop nop workaround
* isn't going to do very much, as the only thing that can interrupt
* us is an NMI, and the cli isn't going to stop that.
*/
#define NOWA_SSYNC __asm__ __volatile__ ("ssync;")
/* Anomaly handlers provide SSYNCs, so avoid extra if anomaly is present */
#if ANOMALY_05000125
#define bfin_write_DMEM_CONTROL_SSYNC(v) bfin_write_DMEM_CONTROL(v)
#define bfin_write_IMEM_CONTROL_SSYNC(v) bfin_write_IMEM_CONTROL(v)
#else
#define bfin_write_DMEM_CONTROL_SSYNC(v) \
do { NOWA_SSYNC; bfin_write_DMEM_CONTROL(v); NOWA_SSYNC; } while (0)
#define bfin_write_IMEM_CONTROL_SSYNC(v) \
do { NOWA_SSYNC; bfin_write_IMEM_CONTROL(v); NOWA_SSYNC; } while (0)
#endif
static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
unsigned long addr)
{
unsigned long ctrl = bfin_read_DMEM_CONTROL();
bfin_write_DMEM_CONTROL_SSYNC(ctrl & ~ENDCPLB);
bfin_write32(DCPLB_DATA0 + idx * 4, data);
bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
bfin_write_DMEM_CONTROL_SSYNC(ctrl);
#ifdef CONFIG_CPLB_INFO
dcplb_tbl[cpu][idx].addr = addr;
dcplb_tbl[cpu][idx].data = data;
#endif
}
static inline void write_icplb_data(int cpu, int idx, unsigned long data,
unsigned long addr)
{
unsigned long ctrl = bfin_read_IMEM_CONTROL();
bfin_write_IMEM_CONTROL_SSYNC(ctrl & ~ENICPLB);
bfin_write32(ICPLB_DATA0 + idx * 4, data);
bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
bfin_write_IMEM_CONTROL_SSYNC(ctrl);
#ifdef CONFIG_CPLB_INFO
icplb_tbl[cpu][idx].addr = addr;
icplb_tbl[cpu][idx].data = data;
#endif
}
/*
* Given the contents of the status register, return the index of the
* CPLB that caused the fault.
*/
static inline int faulting_cplb_index(int status)
{
int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
return 30 - signbits;
}
/*
* Given the contents of the status register and the DCPLB_DATA contents,
* return true if a write access should be permitted.
*/
static inline int write_permitted(int status, unsigned long data)
{
if (status & FAULT_USERSUPV)
return !!(data & CPLB_SUPV_WR);
else
return !!(data & CPLB_USER_WR);
}
/* Counters to implement round-robin replacement. */
static int icplb_rr_index[NR_CPUS] PDT_ATTR;
static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
/*
* Find an ICPLB entry to be evicted and return its index.
*/
static int evict_one_icplb(int cpu)
{
int i = first_switched_icplb + icplb_rr_index[cpu];
if (i >= MAX_CPLBS) {
i -= MAX_CPLBS - first_switched_icplb;
icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
}
icplb_rr_index[cpu]++;
return i;
}
static int evict_one_dcplb(int cpu)
{
int i = first_switched_dcplb + dcplb_rr_index[cpu];
if (i >= MAX_CPLBS) {
i -= MAX_CPLBS - first_switched_dcplb;
dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
}
dcplb_rr_index[cpu]++;
return i;
}
MGR_ATTR static int icplb_miss(int cpu)
{
unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
int status = bfin_read_ICPLB_STATUS();
int idx;
unsigned long i_data, base, addr1, eaddr;
nr_icplb_miss[cpu]++;
if (unlikely(status & FAULT_USERSUPV))
nr_icplb_supv_miss[cpu]++;
base = 0;
for (idx = 0; idx < icplb_nr_bounds; idx++) {
eaddr = icplb_bounds[idx].eaddr;
if (addr < eaddr)
break;
base = eaddr;
}
if (unlikely(idx == icplb_nr_bounds))
return CPLB_NO_ADDR_MATCH;
i_data = icplb_bounds[idx].data;
if (unlikely(i_data == 0))
return CPLB_NO_ADDR_MATCH;
addr1 = addr & ~(SIZE_4M - 1);
addr &= ~(SIZE_1M - 1);
i_data |= PAGE_SIZE_1MB;
if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
/*
* This works because
* (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
*/
i_data |= PAGE_SIZE_4MB;
addr = addr1;
}
/* Pick entry to evict */
idx = evict_one_icplb(cpu);
write_icplb_data(cpu, idx, i_data, addr);
return CPLB_RELOADED;
}
MGR_ATTR static int dcplb_miss(int cpu)
{
unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
int status = bfin_read_DCPLB_STATUS();
int idx;
unsigned long d_data, base, addr1, eaddr;
nr_dcplb_miss[cpu]++;
if (unlikely(status & FAULT_USERSUPV))
nr_dcplb_supv_miss[cpu]++;
base = 0;
for (idx = 0; idx < dcplb_nr_bounds; idx++) {
eaddr = dcplb_bounds[idx].eaddr;
if (addr < eaddr)
break;
base = eaddr;
}
if (unlikely(idx == dcplb_nr_bounds))
return CPLB_NO_ADDR_MATCH;
d_data = dcplb_bounds[idx].data;
if (unlikely(d_data == 0))
return CPLB_NO_ADDR_MATCH;
addr1 = addr & ~(SIZE_4M - 1);
addr &= ~(SIZE_1M - 1);
d_data |= PAGE_SIZE_1MB;
if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
/*
* This works because
* (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
*/
d_data |= PAGE_SIZE_4MB;
addr = addr1;
}
/* Pick entry to evict */
idx = evict_one_dcplb(cpu);
write_dcplb_data(cpu, idx, d_data, addr);
return CPLB_RELOADED;
}
MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
{
int status = bfin_read_DCPLB_STATUS();
nr_dcplb_prot[cpu]++;
if (likely(status & FAULT_RW)) {
int idx = faulting_cplb_index(status);
unsigned long regaddr = DCPLB_DATA0 + idx * 4;
unsigned long data = bfin_read32(regaddr);
/* Check if fault is to dirty a clean page */
if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
write_permitted(status, data)) {
dcplb_tbl[cpu][idx].data = data;
bfin_write32(regaddr, data);
return CPLB_RELOADED;
}
}
return CPLB_PROT_VIOL;
}
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
{
int cause = seqstat & 0x3f;
unsigned int cpu = smp_processor_id();
switch (cause) {
case 0x2C:
return icplb_miss(cpu);
case 0x26:
return dcplb_miss(cpu);
default:
if (unlikely(cause == 0x23))
return dcplb_protection_fault(cpu);
return CPLB_UNKNOWN_ERR;
}
}
......@@ -20,8 +20,6 @@ static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
#define page(flags) (((flags) & 0x30000) >> 16)
#define strpage(flags) page_strtbl[page(flags)]
#ifdef CONFIG_MPU
struct cplbinfo_data {
loff_t pos;
char cplb_type;
......@@ -75,88 +73,6 @@ static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu)
}
}
#else
struct cplbinfo_data {
loff_t pos;
char cplb_type;
u32 mem_control;
unsigned long *pdt_tables, *pdt_swapcount;
unsigned long cplb_addr, cplb_data;
};
extern int page_size_table[];
static int cplb_find_entry(unsigned long addr_tbl, unsigned long data_tbl,
unsigned long addr_find, unsigned long data_find)
{
int i;
for (i = 0; i < 16; ++i) {
unsigned long cplb_addr = bfin_read32(addr_tbl + i * 4);
unsigned long cplb_data = bfin_read32(data_tbl + i * 4);
if (addr_find >= cplb_addr &&
addr_find < cplb_addr + page_size_table[page(cplb_data)] &&
cplb_data == data_find)
return i;
}
return -1;
}
static void cplbinfo_print_header(struct seq_file *m)
{
seq_printf(m, "Address\t\tData\tSize\tValid\tLocked\tSwapin\tiCount\toCount\n");
}
static int cplbinfo_nomore(struct cplbinfo_data *cdata)
{
return cdata->pdt_tables[cdata->pos * 2] == 0xffffffff;
}
static int cplbinfo_show(struct seq_file *m, void *p)
{
struct cplbinfo_data *cdata;
unsigned long data, addr;
int entry;
loff_t pos;
cdata = p;
pos = cdata->pos * 2;
addr = cdata->pdt_tables[pos];
data = cdata->pdt_tables[pos + 1];
entry = cplb_find_entry(cdata->cplb_addr, cdata->cplb_data, addr, data);
seq_printf(m,
"0x%08lx\t0x%05lx\t%s\t%c\t%c\t%2d\t%ld\t%ld\n",
addr, data, strpage(data),
(data & CPLB_VALID) ? 'Y' : 'N',
(data & CPLB_LOCK) ? 'Y' : 'N', entry,
cdata->pdt_swapcount[pos],
cdata->pdt_swapcount[pos + 1]);
return 0;
}
static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu)
{
if (cdata->cplb_type == 'I') {
cdata->mem_control = bfin_read_IMEM_CONTROL();
cdata->pdt_tables = ipdt_tables[cpu];
cdata->pdt_swapcount = ipdt_swapcount_tables[cpu];
cdata->cplb_addr = ICPLB_ADDR0;
cdata->cplb_data = ICPLB_DATA0;
} else {
cdata->mem_control = bfin_read_DMEM_CONTROL();
cdata->pdt_tables = dpdt_tables[cpu];
cdata->pdt_swapcount = dpdt_swapcount_tables[cpu];
cdata->cplb_addr = DCPLB_ADDR0;
cdata->cplb_data = DCPLB_DATA0;
}
}
#endif
static void *cplbinfo_start(struct seq_file *m, loff_t *pos)
{
struct cplbinfo_data *cdata = m->private;
......
......@@ -88,6 +88,7 @@ void __init generate_cplb_tables(void)
{
unsigned int cpu;
generate_cplb_tables_all();
/* Generate per-CPU I&D CPLB tables */
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
generate_cplb_tables_cpu(cpu);
......@@ -97,19 +98,11 @@ void __init generate_cplb_tables(void)
void __cpuinit bfin_setup_caches(unsigned int cpu)
{
#ifdef CONFIG_BFIN_ICACHE
#ifdef CONFIG_MPU
bfin_icache_init(icplb_tbl[cpu]);
#else
bfin_icache_init(icplb_tables[cpu]);
#endif
#endif
#ifdef CONFIG_BFIN_DCACHE
#ifdef CONFIG_MPU
bfin_dcache_init(dcplb_tbl[cpu]);
#else
bfin_dcache_init(dcplb_tables[cpu]);
#endif
#endif
/*
......
......@@ -112,24 +112,21 @@ ENTRY(_ex_dcplb_viol)
ENTRY(_ex_dcplb_miss)
ENTRY(_ex_icplb_miss)
(R7:6,P5:4) = [sp++];
ASTAT = [sp++];
SAVE_ALL_SYS
#ifdef CONFIG_MPU
/* We leave the previously pushed ASTAT on the stack. */
SAVE_CONTEXT_CPLB
/* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
* will change the stack pointer. */
R0 = SEQSTAT;
R1 = SP;
#endif
DEBUG_HWTRACE_SAVE(p5, r7)
#ifdef CONFIG_MPU
sp += -12;
call _cplb_hdr;
sp += 12;
CC = R0 == 0;
IF !CC JUMP _handle_bad_cplb;
#else
call __cplb_hdr;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* While we were processing this, did we double fault? */
......@@ -143,7 +140,8 @@ ENTRY(_ex_icplb_miss)
#endif
DEBUG_HWTRACE_RESTORE(p5, r7)
RESTORE_ALL_SYS
RESTORE_CONTEXT_CPLB
ASTAT = [SP++];
SP = EX_SCRATCH_REG;
rtx;
ENDPROC(_ex_icplb_miss)
......@@ -298,9 +296,8 @@ ENTRY(_handle_bad_cplb)
* the stack to get ready so, we can fall through - we
* need to make a CPLB exception look like a normal exception
*/
RESTORE_ALL_SYS
[--sp] = ASTAT;
RESTORE_CONTEXT_CPLB
/* ASTAT is still on the stack, where it is needed. */
[--sp] = (R7:6,P5:4);
ENTRY(_ex_replaceable)
......
......@@ -119,16 +119,6 @@ asmlinkage void init_pda(void)
cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
#ifdef CONFIG_MPU
#else
cpu_pda[cpu].ipdt = ipdt_tables[cpu];
cpu_pda[cpu].dpdt = dpdt_tables[cpu];
#ifdef CONFIG_CPLB_INFO
cpu_pda[cpu].ipdt_swapcount = ipdt_swapcount_tables[cpu];
cpu_pda[cpu].dpdt_swapcount = dpdt_swapcount_tables[cpu];
#endif
#endif
#ifdef CONFIG_SMP
cpu_pda[cpu].imask = 0x1f;
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment