Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
811d50cb
Commit
811d50cb
authored
Nov 20, 2007
by
Paul Mundt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sh: Move in the
SH-5
TLB miss.
Signed-off-by:
Paul Mundt
<
lethal@linux-sh.org
>
parent
379a95d1
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
75 additions
and
75 deletions
+75
-75
arch/sh/mm/Makefile_32
arch/sh/mm/Makefile_32
+1
-1
arch/sh/mm/Makefile_64
arch/sh/mm/Makefile_64
+1
-1
arch/sh/mm/fault_32.c
arch/sh/mm/fault_32.c
+0
-0
arch/sh/mm/fault_64.c
arch/sh/mm/fault_64.c
+73
-73
No files found.
arch/sh/mm/Makefile_32
View file @
811d50cb
...
@@ -12,7 +12,7 @@ obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
...
@@ -12,7 +12,7 @@ obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
endif
endif
mmu-y := tlb-nommu.o pg-nommu.o
mmu-y := tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush_32.o \
mmu-$(CONFIG_MMU) := fault
_32
.o clear_page.o copy_page.o tlb-flush_32.o \
ioremap_32.o
ioremap_32.o
obj-y += $(mmu-y)
obj-y += $(mmu-y)
...
...
arch/sh/mm/Makefile_64
View file @
811d50cb
...
@@ -5,7 +5,7 @@
...
@@ -5,7 +5,7 @@
obj-y := init.o extable_64.o consistent.o
obj-y := init.o extable_64.o consistent.o
mmu-y := tlb-nommu.o pg-nommu.o
mmu-y := tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU) := ioremap_64.o tlb-flush_64.o
mmu-$(CONFIG_MMU) :=
fault_64.o
ioremap_64.o tlb-flush_64.o
obj-y += $(mmu-y)
obj-y += $(mmu-y)
...
...
arch/sh/mm/fault.c
→
arch/sh/mm/fault
_32
.c
View file @
811d50cb
File moved
arch/sh
64/mm/tlbmiss
.c
→
arch/sh
/mm/fault_64
.c
View file @
811d50cb
/*
/*
* This file is subject to the terms and conditions of the GNU General Public
* The SH64 TLB miss.
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* arch/sh64/mm/tlbmiss.c
*
*
* Original code from fault.c
* Original code from fault.c
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2000, 2001 Paolo Alberelli
...
@@ -12,16 +8,20 @@
...
@@ -12,16 +8,20 @@
* Copyright (C) 2003 Richard.Curnow@superh.com
* Copyright (C) 2003 Richard.Curnow@superh.com
*
*
* IMPORTANT NOTES :
* IMPORTANT NOTES :
* The do_fast_page_fault function is called from a context in entry.S where very few registers
* The do_fast_page_fault function is called from a context in entry.S
* have been saved. In particular, the code in this file must be compiled not to use ANY
* where very few registers have been saved. In particular, the code in
* caller-save registers that are not part of the restricted save set. Also, it means that
* this file must be compiled not to use ANY caller-save registers that
* code in this file must not make calls to functions elsewhere in the kernel, or else the
* are not part of the restricted save set. Also, it means that code in
* excepting context will see corruption in its caller-save registers. Plus, the entry.S save
* this file must not make calls to functions elsewhere in the kernel, or
* area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside
* else the excepting context will see corruption in its caller-save
* it and panic on any exception.
* registers. Plus, the entry.S save area is non-reentrant, so this code
* has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
* on any exception.
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
*/
#include <linux/signal.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
...
@@ -33,14 +33,13 @@
...
@@ -33,14 +33,13 @@
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/tlb.h>
#include <asm/io.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/
registers.h>
/* required by inline asm statements */
#include <asm/
cpu/registers.h>
/* Callable from fault.c, so not static */
/* Callable from fault.c, so not static */
inline
void
__do_tlb_refill
(
unsigned
long
address
,
inline
void
__do_tlb_refill
(
unsigned
long
address
,
...
@@ -88,48 +87,47 @@ inline void __do_tlb_refill(unsigned long address,
...
@@ -88,48 +87,47 @@ inline void __do_tlb_refill(unsigned long address,
}
}
static
int
handle_vmalloc_fault
(
struct
mm_struct
*
mm
,
unsigned
long
protection_flags
,
static
int
handle_vmalloc_fault
(
struct
mm_struct
*
mm
,
unsigned
long
protection_flags
,
unsigned
long
long
textaccess
,
unsigned
long
long
textaccess
,
unsigned
long
address
)
unsigned
long
address
)
{
{
pgd_t
*
dir
;
pgd_t
*
dir
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pmd_t
*
pmd
;
static
pte_t
*
pte
;
static
pte_t
*
pte
;
pte_t
entry
;
pte_t
entry
;
dir
=
pgd_offset_k
(
address
);
dir
=
pgd_offset_k
(
address
);
pmd
=
pmd_offset
(
dir
,
address
);
if
(
pmd_none
(
*
pmd
))
{
pud
=
pud_offset
(
dir
,
address
);
if
(
pud_none_or_clear_bad
(
pud
))
return
0
;
return
0
;
}
if
(
pmd_bad
(
*
pmd
))
{
pmd
=
pmd_offset
(
pud
,
address
);
pmd_clear
(
pmd
);
if
(
pmd_none_or_clear_bad
(
pmd
))
return
0
;
return
0
;
}
pte
=
pte_offset_kernel
(
pmd
,
address
);
pte
=
pte_offset_kernel
(
pmd
,
address
);
entry
=
*
pte
;
entry
=
*
pte
;
if
(
pte_none
(
entry
)
||
!
pte_present
(
entry
))
{
if
(
pte_none
(
entry
)
||
!
pte_present
(
entry
))
return
0
;
return
0
;
}
if
((
pte_val
(
entry
)
&
protection_flags
)
!=
protection_flags
)
if
((
pte_val
(
entry
)
&
protection_flags
)
!=
protection_flags
)
{
return
0
;
return
0
;
}
__do_tlb_refill
(
address
,
textaccess
,
pte
);
__do_tlb_refill
(
address
,
textaccess
,
pte
);
return
1
;
return
1
;
}
}
static
int
handle_tlbmiss
(
struct
mm_struct
*
mm
,
unsigned
long
long
protection_flags
,
static
int
handle_tlbmiss
(
struct
mm_struct
*
mm
,
unsigned
long
long
textaccess
,
unsigned
long
long
protection_flags
,
unsigned
long
address
)
unsigned
long
long
textaccess
,
unsigned
long
address
)
{
{
pgd_t
*
dir
;
pgd_t
*
dir
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
pte_t
*
pte
;
pte_t
entry
;
pte_t
entry
;
...
@@ -144,49 +142,49 @@ static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_fl
...
@@ -144,49 +142,49 @@ static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_fl
See how mm->pgd is allocated and initialised in pgd_alloc to see why
See how mm->pgd is allocated and initialised in pgd_alloc to see why
the next test is necessary. - RPC */
the next test is necessary. - RPC */
if
(
address
>=
(
unsigned
long
)
TASK_SIZE
)
{
if
(
address
>=
(
unsigned
long
)
TASK_SIZE
)
/* upper half - never has page table entries. */
/* upper half - never has page table entries. */
return
0
;
return
0
;
}
dir
=
pgd_offset
(
mm
,
address
);
dir
=
pgd_offset
(
mm
,
address
);
if
(
pgd_none
(
*
dir
)
)
{
if
(
pgd_none
(
*
dir
)
||
!
pgd_present
(
*
dir
))
return
0
;
return
0
;
}
if
(
!
pgd_present
(
*
dir
))
if
(
!
pgd_present
(
*
dir
))
{
return
0
;
return
0
;
}
p
md
=
pm
d_offset
(
dir
,
address
);
p
ud
=
pu
d_offset
(
dir
,
address
);
if
(
p
md_none
(
*
pmd
))
{
if
(
p
ud_none
(
*
pud
)
||
!
pud_present
(
*
pud
))
return
0
;
return
0
;
}
if
(
!
pmd_present
(
*
pmd
))
{
pmd
=
pmd_offset
(
pud
,
address
);
if
(
pmd_none
(
*
pmd
)
||
!
pmd_present
(
*
pmd
))
return
0
;
return
0
;
}
pte
=
pte_offset_kernel
(
pmd
,
address
);
pte
=
pte_offset_kernel
(
pmd
,
address
);
entry
=
*
pte
;
entry
=
*
pte
;
if
(
pte_none
(
entry
))
{
return
0
;
if
(
pte_none
(
entry
)
||
!
pte_present
(
entry
))
}
if
(
!
pte_present
(
entry
))
{
return
0
;
return
0
;
}
/* If the page doesn't have sufficient protection bits set to service the
/*
kind of fault being handled, there's not much point doing the TLB refill.
* If the page doesn't have sufficient protection bits set to
Punt the fault to the general handler. */
* service the kind of fault being handled, there's not much
if
((
pte_val
(
entry
)
&
protection_flags
)
!=
protection_flags
)
{
* point doing the TLB refill. Punt the fault to the general
* handler.
*/
if
((
pte_val
(
entry
)
&
protection_flags
)
!=
protection_flags
)
return
0
;
return
0
;
}
__do_tlb_refill
(
address
,
textaccess
,
pte
);
__do_tlb_refill
(
address
,
textaccess
,
pte
);
return
1
;
return
1
;
}
}
/* Put all this information into one structure so that everything is just arithmetic
/*
relative to a single base address. This reduces the number of movi/shori pairs needed
* Put all this information into one structure so that everything is just
just to load addresses of static data. */
* arithmetic relative to a single base address. This reduces the number
* of movi/shori pairs needed just to load addresses of static data.
*/
struct
expevt_lookup
{
struct
expevt_lookup
{
unsigned
short
protection_flags
[
8
];
unsigned
short
protection_flags
[
8
];
unsigned
char
is_text_access
[
8
];
unsigned
char
is_text_access
[
8
];
...
@@ -216,7 +214,8 @@ static struct expevt_lookup expevt_lookup_table = {
...
@@ -216,7 +214,8 @@ static struct expevt_lookup expevt_lookup_table = {
general fault handling in fault.c which deals with mapping file-backed
general fault handling in fault.c which deals with mapping file-backed
pages, stack growth, segmentation faults, swapping etc etc)
pages, stack growth, segmentation faults, swapping etc etc)
*/
*/
asmlinkage
int
do_fast_page_fault
(
unsigned
long
long
ssr_md
,
unsigned
long
long
expevt
,
asmlinkage
int
do_fast_page_fault
(
unsigned
long
long
ssr_md
,
unsigned
long
long
expevt
,
unsigned
long
address
)
unsigned
long
address
)
{
{
struct
task_struct
*
tsk
;
struct
task_struct
*
tsk
;
...
@@ -226,17 +225,18 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long
...
@@ -226,17 +225,18 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long
unsigned
long
long
index
;
unsigned
long
long
index
;
unsigned
long
long
expevt4
;
unsigned
long
long
expevt4
;
/* The next few lines implement a way of hashing EXPEVT into a small array index
/* The next few lines implement a way of hashing EXPEVT into a
which can be used to lookup parameters specific to the type of TLBMISS being
* small array index which can be used to lookup parameters
handled. Note:
* specific to the type of TLBMISS being handled.
ITLBMISS has EXPEVT==0xa40
*
RTLBMISS has EXPEVT==0x040
* Note:
WTLBMISS has EXPEVT==0x060
* ITLBMISS has EXPEVT==0xa40
*/
* RTLBMISS has EXPEVT==0x040
* WTLBMISS has EXPEVT==0x060
*/
expevt4
=
(
expevt
>>
4
);
expevt4
=
(
expevt
>>
4
);
/* TODO : xor ssr_md into this expression too.
Then we can check that PRU is set
/* TODO : xor ssr_md into this expression too.
Then we can check
when it needs to be. */
* that PRU is set
when it needs to be. */
index
=
expevt4
^
(
expevt4
>>
5
);
index
=
expevt4
^
(
expevt4
>>
5
);
index
&=
7
;
index
&=
7
;
protection_flags
=
expevt_lookup_table
.
protection_flags
[
index
];
protection_flags
=
expevt_lookup_table
.
protection_flags
[
index
];
...
@@ -262,18 +262,18 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long
...
@@ -262,18 +262,18 @@ asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long
if
((
address
>=
VMALLOC_START
&&
address
<
VMALLOC_END
)
||
if
((
address
>=
VMALLOC_START
&&
address
<
VMALLOC_END
)
||
(
address
>=
IOBASE_VADDR
&&
address
<
IOBASE_END
))
{
(
address
>=
IOBASE_VADDR
&&
address
<
IOBASE_END
))
{
if
(
ssr_md
)
{
if
(
ssr_md
)
/* Process-contexts can never have this address range mapped */
/*
if
(
handle_vmalloc_fault
(
mm
,
protection_flags
,
textaccess
,
address
))
{
* Process-contexts can never have this address
* range mapped
*/
if
(
handle_vmalloc_fault
(
mm
,
protection_flags
,
textaccess
,
address
))
return
1
;
return
1
;
}
}
}
else
if
(
!
in_interrupt
()
&&
mm
)
{
}
else
if
(
!
in_interrupt
()
&&
mm
)
{
if
(
handle_tlbmiss
(
mm
,
protection_flags
,
textaccess
,
address
))
{
if
(
handle_tlbmiss
(
mm
,
protection_flags
,
textaccess
,
address
))
return
1
;
return
1
;
}
}
}
return
0
;
return
0
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment