Commit 41743a4e authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Paul Mackerras

powerpc: Free a PTE bit on ppc64 with 64K pages

This frees a PTE bit when using 64K pages on ppc64.  This is done
by getting rid of the separate _PAGE_HASHPTE bit.  Instead, we just test
if any of the 16 sub-page bits is set.  For non-combo pages (ie. real
64K pages), we set SUB0 and the location encoding in that field.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent ff1f4ee9
...@@ -388,7 +388,7 @@ _GLOBAL(__hash_page_4K) ...@@ -388,7 +388,7 @@ _GLOBAL(__hash_page_4K)
*/ */
rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
or r30,r30,r31 or r30,r30,r31
ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
oris r30,r30,_PAGE_COMBO@h oris r30,r30,_PAGE_COMBO@h
/* Write the linux PTE atomically (setting busy) */ /* Write the linux PTE atomically (setting busy) */
stdcx. r30,0,r6 stdcx. r30,0,r6
...@@ -468,7 +468,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) ...@@ -468,7 +468,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
* go to out-of-line code to try to modify the HPTE. We look for * go to out-of-line code to try to modify the HPTE. We look for
* the bit at (1 >> (index + 32)) * the bit at (1 >> (index + 32))
*/ */
andi. r0,r31,_PAGE_HASHPTE rldicl. r0,r31,64-12,48
li r26,0 /* Default hidx */ li r26,0 /* Default hidx */
beq htab_insert_pte beq htab_insert_pte
...@@ -726,11 +726,11 @@ BEGIN_FTR_SECTION ...@@ -726,11 +726,11 @@ BEGIN_FTR_SECTION
bne- ht64_bail_ok bne- ht64_bail_ok
END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
/* Prepare new PTE value (turn access RW into DIRTY, then /* Prepare new PTE value (turn access RW into DIRTY, then
* add BUSY,HASHPTE and ACCESSED) * add BUSY and ACCESSED)
*/ */
rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
or r30,r30,r31 or r30,r30,r31
ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
/* Write the linux PTE atomically (setting busy) */ /* Write the linux PTE atomically (setting busy) */
stdcx. r30,0,r6 stdcx. r30,0,r6
bne- 1b bne- 1b
...@@ -798,18 +798,21 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) ...@@ -798,18 +798,21 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
/* Check if we may already be in the hashtable, in this case, we /* Check if we may already be in the hashtable, in this case, we
* go to out-of-line code to try to modify the HPTE * go to out-of-line code to try to modify the HPTE
*/ */
andi. r0,r31,_PAGE_HASHPTE rldicl. r0,r31,64-12,48
bne ht64_modify_pte bne ht64_modify_pte
ht64_insert_pte: ht64_insert_pte:
/* Clear hpte bits in new pte (we also clear BUSY btw) and /* Clear hpte bits in new pte (we also clear BUSY btw) and
* add _PAGE_HASHPTE * add _PAGE_HPTE_SUB0
*/ */
lis r0,_PAGE_HPTEFLAGS@h lis r0,_PAGE_HPTEFLAGS@h
ori r0,r0,_PAGE_HPTEFLAGS@l ori r0,r0,_PAGE_HPTEFLAGS@l
andc r30,r30,r0 andc r30,r30,r0
#ifdef CONFIG_PPC_64K_PAGES
oris r30,r30,_PAGE_HPTE_SUB0@h
#else
ori r30,r30,_PAGE_HASHPTE ori r30,r30,_PAGE_HASHPTE
#endif
/* Phyical address in r5 */ /* Phyical address in r5 */
rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
sldi r5,r5,PAGE_SHIFT sldi r5,r5,PAGE_SHIFT
......
...@@ -458,8 +458,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -458,8 +458,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
old_pte = pte_val(*ptep); old_pte = pte_val(*ptep);
if (old_pte & _PAGE_BUSY) if (old_pte & _PAGE_BUSY)
goto out; goto out;
new_pte = old_pte | _PAGE_BUSY | new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
_PAGE_ACCESSED | _PAGE_HASHPTE;
} while(old_pte != __cmpxchg_u64((unsigned long *)ptep, } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
old_pte, new_pte)); old_pte, new_pte));
...@@ -499,8 +498,11 @@ repeat: ...@@ -499,8 +498,11 @@ repeat:
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
/* clear HPTE slot informations in new PTE */ /* clear HPTE slot informations in new PTE */
#ifdef CONFIG_PPC_64K_PAGES
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
#else
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
#endif
/* Add in WIMG bits */ /* Add in WIMG bits */
/* XXX We should store these in the pte */ /* XXX We should store these in the pte */
/* --BenH: I think they are ... */ /* --BenH: I think they are ... */
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
/* PTE bits */ /* PTE bits */
#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
#define _PAGE_F_SECOND _PAGE_SECONDARY #define _PAGE_F_SECOND _PAGE_SECONDARY
......
...@@ -75,6 +75,20 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) ...@@ -75,6 +75,20 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead,
* we set that to be the whole sub-bits mask. The C code will only
* test this, so a multi-bit mask will work. For combo pages, this
* is equivalent as effectively, the old _PAGE_HASHPTE was an OR of
* all the sub bits. For real 64k pages, we now have the assembly set
* _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap
* that mask. This is fine as long as the HIDX bits are never set on
* a PTE that isn't hashed, which is the case today.
*
* A little nit is for the huge page C code, which does the hashing
* in C, we need to provide which bit to use.
*/
#define _PAGE_HASHPTE _PAGE_HPTE_SUB
/* Note the full page bits must be in the same location as for normal /* Note the full page bits must be in the same location as for normal
* 4k pages as the same asssembly will be used to insert 64K pages * 4k pages as the same asssembly will be used to insert 64K pages
* wether the kernel has CONFIG_PPC_64K_PAGES or not * wether the kernel has CONFIG_PPC_64K_PAGES or not
...@@ -83,8 +97,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) ...@@ -83,8 +97,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
/* PTE flags to conserve for HPTE identification */ /* PTE flags to conserve for HPTE identification */
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\ #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO)
_PAGE_COMBO)
/* Shift to put page number into pte. /* Shift to put page number into pte.
* *
......
...@@ -91,7 +91,6 @@ ...@@ -91,7 +91,6 @@
#define _PAGE_DIRTY 0x0080 /* C: page changed */ #define _PAGE_DIRTY 0x0080 /* C: page changed */
#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ #define _PAGE_ACCESSED 0x0100 /* R: page referenced */
#define _PAGE_RW 0x0200 /* software: user write access allowed */ #define _PAGE_RW 0x0200 /* software: user write access allowed */
#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment