Commit f50b153b authored by Kumar Gala's avatar Kumar Gala Committed by Linus Torvalds

[PATCH] ppc32: Support 36-bit physical addressing on e500

To add support for 36-bit physical addressing on e500 the following changes
have been made.  The changes are generalized to support any physical address
size larger than 32-bits:

* Allow FSL Book-E parts to use a 64-bit PTE, it is 44-bits of pfn, 20-bits
  of flags.

* Introduced new CPU feature (CPU_FTR_BIG_PHYS) to allow runtime handling of
  updating hardware register (SPRN_MAS7) which holds the upper 32-bits of
  physical address that will be written into the TLB.  This is useful since
  not all e500 cores support 36-bit physical addressing.

* Currently have a pass through implementation of fixup_bigphys_addr

* Moved _PAGE_DIRTY in the 64-bit PTE case to free room for three additional
  storage attributes that may exist in future FSL Book-E cores and updated
  fault handler to copy these bits into the hardware TLBs.
Signed-off-by: default avatarKumar Gala <kumar.gala@freescale.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b464fce5
...@@ -98,13 +98,19 @@ config FSL_BOOKE ...@@ -98,13 +98,19 @@ config FSL_BOOKE
config PTE_64BIT config PTE_64BIT
bool bool
depends on 44x depends on 44x || E500
default y default y if 44x
default y if E500 && PHYS_64BIT
config PHYS_64BIT config PHYS_64BIT
bool bool 'Large physical address support' if E500
depends on 44x depends on 44x || E500
default y default y if 44x
---help---
This option enables kernel support for larger than 32-bit physical
addresses. This features is not be available on all e500 cores.
If in doubt, say N here.
config ALTIVEC config ALTIVEC
bool "AltiVec Support" bool "AltiVec Support"
......
...@@ -347,6 +347,38 @@ skpinv: addi r6,r6,1 /* Increment */ ...@@ -347,6 +347,38 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_SRR1,r3 mtspr SPRN_SRR1,r3
rfi /* change context and jump to start_kernel */ rfi /* change context and jump to start_kernel */
/* Macros to hide the PTE size differences
*
* FIND_PTE -- walks the page tables given EA & pgdir pointer
* r10 -- EA of fault
* r11 -- PGDIR pointer
* r12 -- free
* label 2: is the bailout case
*
* if we find the pte (fall through):
* r11 is low pte word
* r12 is pointer to the pte
*/
#ifdef CONFIG_PTE_64BIT
#define PTE_FLAGS_OFFSET 4
#define FIND_PTE \
rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
beq 2f; /* Bail if no table */ \
rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
lwz r11, 4(r12); /* Get pte entry */
#else
#define PTE_FLAGS_OFFSET 0
#define FIND_PTE \
rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
lwz r11, 0(r11); /* Get L1 entry */ \
rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
beq 2f; /* Bail if no table */ \
rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
lwz r11, 0(r12); /* Get Linux PTE */
#endif
/* /*
* Interrupt vector entry code * Interrupt vector entry code
* *
...@@ -405,13 +437,7 @@ interrupt_base: ...@@ -405,13 +437,7 @@ interrupt_base:
mfspr r11,SPRN_SPRG3 mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11) lwz r11,PGDIR(r11)
4: 4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ FIND_PTE
lwz r11, 0(r11) /* Get L1 entry */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
beq 2f /* Bail if no table */
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
/* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
...@@ -420,14 +446,12 @@ interrupt_base: ...@@ -420,14 +446,12 @@ interrupt_base:
/* Update 'changed'. */ /* Update 'changed'. */
ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
stw r11, 0(r12) /* Update Linux page table */ stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
/* MAS2 not updated as the entry does exist in the tlb, this /* MAS2 not updated as the entry does exist in the tlb, this
fault taken to detect state transition (eg: COW -> DIRTY) fault taken to detect state transition (eg: COW -> DIRTY)
*/ */
lis r12, MAS3_RPN@h andi. r11, r11, _PAGE_HWEXEC
ori r12, r12, _PAGE_HWEXEC | MAS3_RPN@l
and r11, r11, r12
rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
...@@ -439,7 +463,10 @@ interrupt_base: ...@@ -439,7 +463,10 @@ interrupt_base:
/* find the TLB index that caused the fault. It has to be here. */ /* find the TLB index that caused the fault. It has to be here. */
tlbsx 0, r10 tlbsx 0, r10
mtspr SPRN_MAS3,r11 /* only update the perm bits, assume the RPN is fine */
mfspr r12, SPRN_MAS3
rlwimi r12, r11, 0, 20, 31
mtspr SPRN_MAS3,r12
tlbwe tlbwe
/* Done...restore registers and get out of here. */ /* Done...restore registers and get out of here. */
...@@ -530,18 +557,15 @@ interrupt_base: ...@@ -530,18 +557,15 @@ interrupt_base:
lwz r11,PGDIR(r11) lwz r11,PGDIR(r11)
4: 4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ FIND_PTE
lwz r11, 0(r11) /* Get L1 entry */ andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ beq 2f /* Bail if not present */
beq 2f /* Bail if no table */
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
andi. r13, r11, _PAGE_PRESENT
beq 2f
#ifdef CONFIG_PTE_64BIT
lwz r13, 0(r12)
#endif
ori r11, r11, _PAGE_ACCESSED ori r11, r11, _PAGE_ACCESSED
stw r11, 0(r12) stw r11, PTE_FLAGS_OFFSET(r12)
/* Jump to common tlb load */ /* Jump to common tlb load */
b finish_tlb_load b finish_tlb_load
...@@ -594,18 +618,15 @@ interrupt_base: ...@@ -594,18 +618,15 @@ interrupt_base:
lwz r11,PGDIR(r11) lwz r11,PGDIR(r11)
4: 4:
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ FIND_PTE
lwz r11, 0(r11) /* Get L1 entry */ andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ beq 2f /* Bail if not present */
beq 2f /* Bail if no table */
rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
lwz r11, 0(r12) /* Get Linux PTE */
andi. r13, r11, _PAGE_PRESENT
beq 2f
#ifdef CONFIG_PTE_64BIT
lwz r13, 0(r12)
#endif
ori r11, r11, _PAGE_ACCESSED ori r11, r11, _PAGE_ACCESSED
stw r11, 0(r12) stw r11, PTE_FLAGS_OFFSET(r12)
/* Jump to common TLB load point */ /* Jump to common TLB load point */
b finish_tlb_load b finish_tlb_load
...@@ -690,27 +711,39 @@ finish_tlb_load: ...@@ -690,27 +711,39 @@ finish_tlb_load:
*/ */
mfspr r12, SPRN_MAS2 mfspr r12, SPRN_MAS2
#ifdef CONFIG_PTE_64BIT
rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */
#else
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
#endif
mtspr SPRN_MAS2, r12 mtspr SPRN_MAS2, r12
bge 5, 1f bge 5, 1f
/* addr > TASK_SIZE */ /* is user addr */
li r10, (MAS3_UX | MAS3_UW | MAS3_UR) andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
andi. r13, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
andi. r12, r11, _PAGE_USER /* Test for _PAGE_USER */ srwi r10, r12, 1
iseleq r12, 0, r10
and r10, r12, r13
srwi r12, r10, 1
or r12, r12, r10 /* Copy user perms into supervisor */ or r12, r12, r10 /* Copy user perms into supervisor */
iseleq r12, 0, r12
b 2f b 2f
/* addr <= TASK_SIZE */ /* is kernel addr */
1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ 1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
ori r12, r12, (MAS3_SX | MAS3_SR) ori r12, r12, (MAS3_SX | MAS3_SR)
#ifdef CONFIG_PTE_64BIT
2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
mtspr SPRN_MAS3, r12
BEGIN_FTR_SECTION
srwi r10, r13, 8 /* grab RPN[8:31] */
mtspr SPRN_MAS7, r10
END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
#else
2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ 2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
mtspr SPRN_MAS3, r11 mtspr SPRN_MAS3, r11
#endif
tlbwe tlbwe
/* Done...restore registers and get out of here. */ /* Done...restore registers and get out of here. */
......
...@@ -31,3 +31,11 @@ get_ccsrbar(void) ...@@ -31,3 +31,11 @@ get_ccsrbar(void)
} }
EXPORT_SYMBOL(get_ccsrbar); EXPORT_SYMBOL(get_ccsrbar);
/* For now this is a pass through */
phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
{
return addr;
};
EXPORT_SYMBOL(fixup_bigphys_addr);
...@@ -86,8 +86,9 @@ static inline unsigned int cpu_has_feature(unsigned int feature) ...@@ -86,8 +86,9 @@ static inline unsigned int cpu_has_feature(unsigned int feature)
#define CPU_FTR_DUAL_PLL_750FX 0x00004000 #define CPU_FTR_DUAL_PLL_750FX 0x00004000
#define CPU_FTR_NO_DPM 0x00008000 #define CPU_FTR_NO_DPM 0x00008000
#define CPU_FTR_HAS_HIGH_BATS 0x00010000 #define CPU_FTR_HAS_HIGH_BATS 0x00010000
#define CPU_FTR_NEED_COHERENT 0x00020000 #define CPU_FTR_NEED_COHERENT 0x00020000
#define CPU_FTR_NO_BTIC 0x00040000 #define CPU_FTR_NO_BTIC 0x00040000
#define CPU_FTR_BIG_PHYS 0x00080000
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
......
...@@ -225,8 +225,7 @@ extern unsigned long ioremap_bot, ioremap_base; ...@@ -225,8 +225,7 @@ extern unsigned long ioremap_bot, ioremap_base;
/* ERPN in a PTE never gets cleared, ignore it */ /* ERPN in a PTE never gets cleared, ignore it */
#define _PTE_NONE_MASK 0xffffffff00000000ULL #define _PTE_NONE_MASK 0xffffffff00000000ULL
#elif defined(CONFIG_E500) #elif defined(CONFIG_FSL_BOOKE)
/* /*
MMU Assist Register 3: MMU Assist Register 3:
...@@ -240,21 +239,29 @@ extern unsigned long ioremap_bot, ioremap_base; ...@@ -240,21 +239,29 @@ extern unsigned long ioremap_bot, ioremap_base;
entries use the top 29 bits. entries use the top 29 bits.
*/ */
/* Definitions for e500 core */ /* Definitions for FSL Book-E Cores */
#define _PAGE_PRESENT 0x001 /* S: PTE contains a translation */ #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
#define _PAGE_USER 0x002 /* S: User page (maps to UR) */ #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
#define _PAGE_FILE 0x002 /* S: when !present: nonlinear file mapping */ #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
#define _PAGE_ACCESSED 0x004 /* S: Page referenced */ #define _PAGE_ACCESSED 0x00004 /* S: Page referenced */
#define _PAGE_HWWRITE 0x008 /* H: Dirty & RW, set in exception */ #define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */
#define _PAGE_RW 0x010 /* S: Write permission */ #define _PAGE_RW 0x00010 /* S: Write permission */
#define _PAGE_HWEXEC 0x020 /* H: UX permission */ #define _PAGE_HWEXEC 0x00020 /* H: UX permission */
#define _PAGE_ENDIAN 0x040 /* H: E bit */ #define _PAGE_ENDIAN 0x00040 /* H: E bit */
#define _PAGE_GUARDED 0x080 /* H: G bit */ #define _PAGE_GUARDED 0x00080 /* H: G bit */
#define _PAGE_COHERENT 0x100 /* H: M bit */ #define _PAGE_COHERENT 0x00100 /* H: M bit */
#define _PAGE_NO_CACHE 0x200 /* H: I bit */ #define _PAGE_NO_CACHE 0x00200 /* H: I bit */
#define _PAGE_WRITETHRU 0x400 /* H: W bit */ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */
#define _PAGE_DIRTY 0x800 /* S: Page dirty */
#ifdef CONFIG_PTE_64BIT
#define _PAGE_DIRTY 0x08000 /* S: Page dirty */
/* ERPN in a PTE never gets cleared, ignore it */
#define _PTE_NONE_MASK 0xffffffffffff0000ULL
#else
#define _PAGE_DIRTY 0x00800 /* S: Page dirty */
#endif
#define _PMD_PRESENT 0 #define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_PRESENT_MASK (PAGE_MASK)
...@@ -433,7 +440,11 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); ...@@ -433,7 +440,11 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
/* in some case we want to additionaly adjust where the pfn is in the pte to /* in some case we want to additionaly adjust where the pfn is in the pte to
* allow room for more flags */ * allow room for more flags */
#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
#else
#define PFN_SHIFT_OFFSET (PAGE_SHIFT) #define PFN_SHIFT_OFFSET (PAGE_SHIFT)
#endif
#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
#define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_page(x) pfn_to_page(pte_pfn(x))
......
...@@ -172,6 +172,7 @@ do { \ ...@@ -172,6 +172,7 @@ do { \
#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */ #define SPRN_MAS5 0x275 /* MMU Assist Register 5 */
#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
#define SPRN_MAS7 0x3b0 /* MMU Assist Register 7 */
#define SPRN_PID1 0x279 /* Process ID Register 1 */ #define SPRN_PID1 0x279 /* Process ID Register 1 */
#define SPRN_PID2 0x27A /* Process ID Register 2 */ #define SPRN_PID2 0x27A /* Process ID Register 2 */
#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */ #define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment