Commit e701d269 authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Josh Boyer

[POWERPC] 4xx: Fix 4xx flush_tlb_page()

On 4xx CPUs, the current implementation of flush_tlb_page() uses
a low level _tlbie() assembly function that only works for the
current PID. Thus, invalidations caused by, for example, a COW
fault triggered by get_user_pages() from a different context will
not work properly, causing among other things, gdb breakpoints
to fail.

This patch adds a "pid" argument to _tlbie() on 4xx processors,
and uses it to flush entries in the right context. FSL BookE
also gets the argument but it seems they don't need it (their
tlbivax form ignores the PID when invalidating according to the
document I have).
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: default avatarKumar Gala <galak@kernel.crashing.org>
Signed-off-by: default avatarJosh Boyer <jwboyer@linux.vnet.ibm.com>
parent 57d75561
...@@ -288,7 +288,16 @@ _GLOBAL(_tlbia) ...@@ -288,7 +288,16 @@ _GLOBAL(_tlbia)
*/ */
_GLOBAL(_tlbie) _GLOBAL(_tlbie)
#if defined(CONFIG_40x) #if defined(CONFIG_40x)
/* We run the search with interrupts disabled because we have to change
* the PID and I don't want to preempt when that happens.
*/
mfmsr r5
mfspr r6,SPRN_PID
wrteei 0
mtspr SPRN_PID,r4
tlbsx. r3, 0, r3 tlbsx. r3, 0, r3
mtspr SPRN_PID,r6
wrtee r5
bne 10f bne 10f
sync sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
...@@ -297,23 +306,23 @@ _GLOBAL(_tlbie) ...@@ -297,23 +306,23 @@ _GLOBAL(_tlbie)
tlbwe r3, r3, TLB_TAG tlbwe r3, r3, TLB_TAG
isync isync
10: 10:
#elif defined(CONFIG_44x) #elif defined(CONFIG_44x)
mfspr r4,SPRN_MMUCR mfspr r5,SPRN_MMUCR
mfspr r5,SPRN_PID /* Get PID */ rlwimi r5,r4,0,24,31 /* Set TID */
rlwimi r4,r5,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical /* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have * and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes * are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
mfmsr r5 mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
andc r6,r5,r6 andc r6,r4,r6
mtmsr r6 mtmsr r6
mtspr SPRN_MMUCR,r4 mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3 tlbsx. r3, 0, r3
mtmsr r5 mtmsr r4
bne 10f bne 10f
sync sync
/* There are only 64 TLB entries, so r3 < 64, /* There are only 64 TLB entries, so r3 < 64,
......
...@@ -309,7 +309,7 @@ good_area: ...@@ -309,7 +309,7 @@ good_area:
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &page->flags);
} }
pte_update(ptep, 0, _PAGE_HWEXEC); pte_update(ptep, 0, _PAGE_HWEXEC);
_tlbie(address); _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return 0; return 0;
......
...@@ -61,12 +61,12 @@ extern unsigned long total_lowmem; ...@@ -61,12 +61,12 @@ extern unsigned long total_lowmem;
#define mmu_mapin_ram() (0UL) #define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx) #elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void); extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE) #elif defined(CONFIG_FSL_BOOKE)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void); extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void); extern void adjust_total_lowmem(void);
......
...@@ -224,7 +224,16 @@ _GLOBAL(_tlbia) ...@@ -224,7 +224,16 @@ _GLOBAL(_tlbia)
*/ */
_GLOBAL(_tlbie) _GLOBAL(_tlbie)
#if defined(CONFIG_40x) #if defined(CONFIG_40x)
/* We run the search with interrupts disabled because we have to change
* the PID and I don't want to preempt when that happens.
*/
mfmsr r5
mfspr r6,SPRN_PID
wrteei 0
mtspr SPRN_PID,r4
tlbsx. r3, 0, r3 tlbsx. r3, 0, r3
mtspr SPRN_PID,r6
wrtee r5
bne 10f bne 10f
sync sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
...@@ -234,22 +243,21 @@ _GLOBAL(_tlbie) ...@@ -234,22 +243,21 @@ _GLOBAL(_tlbie)
isync isync
10: 10:
#elif defined(CONFIG_44x) #elif defined(CONFIG_44x)
mfspr r4,SPRN_MMUCR mfspr r5,SPRN_MMUCR
mfspr r5,SPRN_PID /* Get PID */ rlwimi r5,r4,0,24,31 /* Set TID */
rlwimi r4,r5,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical /* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have * and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes * are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
mfmsr r5 mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
andc r6,r5,r6 andc r6,r4,r6
mtmsr r6 mtmsr r6
mtspr SPRN_MMUCR,r4 mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3 tlbsx. r3, 0, r3
mtmsr r5 mtmsr r4
bne 10f bne 10f
sync sync
/* There are only 64 TLB entries, so r3 < 64, /* There are only 64 TLB entries, so r3 < 64,
......
...@@ -227,7 +227,7 @@ good_area: ...@@ -227,7 +227,7 @@ good_area:
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &page->flags);
} }
pte_update(ptep, 0, _PAGE_HWEXEC); pte_update(ptep, 0, _PAGE_HWEXEC);
_tlbie(address); _tlbie(address, mm->context.id);
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return 0; return 0;
......
...@@ -54,12 +54,12 @@ extern unsigned int num_tlbcam_entries; ...@@ -54,12 +54,12 @@ extern unsigned int num_tlbcam_entries;
#define mmu_mapin_ram() (0UL) #define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx) #elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void); extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE) #elif defined(CONFIG_FSL_BOOKE)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void); extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void); extern void adjust_total_lowmem(void);
......
...@@ -236,7 +236,7 @@ ebony_early_serial_map(void) ...@@ -236,7 +236,7 @@ ebony_early_serial_map(void)
gen550_init(0, &port); gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */ /* Purge TLB entry added in head_44x.S for early serial access */
_tlbie(UART0_IO_BASE); _tlbie(UART0_IO_BASE, 0);
#endif #endif
port.membase = ioremap64(PPC440GP_UART1_ADDR, 8); port.membase = ioremap64(PPC440GP_UART1_ADDR, 8);
......
...@@ -259,7 +259,7 @@ ocotea_early_serial_map(void) ...@@ -259,7 +259,7 @@ ocotea_early_serial_map(void)
gen550_init(0, &port); gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */ /* Purge TLB entry added in head_44x.S for early serial access */
_tlbie(UART0_IO_BASE); _tlbie(UART0_IO_BASE, 0);
#endif #endif
port.membase = ioremap64(PPC440GX_UART1_ADDR, 8); port.membase = ioremap64(PPC440GX_UART1_ADDR, 8);
......
...@@ -316,7 +316,7 @@ taishan_early_serial_map(void) ...@@ -316,7 +316,7 @@ taishan_early_serial_map(void)
gen550_init(0, &port); gen550_init(0, &port);
/* Purge TLB entry added in head_44x.S for early serial access */ /* Purge TLB entry added in head_44x.S for early serial access */
_tlbie(UART0_IO_BASE); _tlbie(UART0_IO_BASE, 0);
#endif #endif
port.membase = ioremap64(PPC440GX_UART1_ADDR, 8); port.membase = ioremap64(PPC440GX_UART1_ADDR, 8);
......
#ifndef _ASM_POWERPC_TLBFLUSH_H #ifndef _ASM_POWERPC_TLBFLUSH_H
#define _ASM_POWERPC_TLBFLUSH_H #define _ASM_POWERPC_TLBFLUSH_H
/* /*
* TLB flushing: * TLB flushing:
* *
...@@ -16,9 +17,6 @@ ...@@ -16,9 +17,6 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
struct mm_struct;
struct vm_area_struct;
#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
/* /*
* TLB flushing for software loaded TLB chips * TLB flushing for software loaded TLB chips
...@@ -28,7 +26,9 @@ struct vm_area_struct; ...@@ -28,7 +26,9 @@ struct vm_area_struct;
* specific tlbie's * specific tlbie's
*/ */
extern void _tlbie(unsigned long address); #include <linux/mm.h>
extern void _tlbie(unsigned long address, unsigned int pid);
#if defined(CONFIG_40x) || defined(CONFIG_8xx) #if defined(CONFIG_40x) || defined(CONFIG_8xx)
#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") #define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
...@@ -44,13 +44,13 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -44,13 +44,13 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr) unsigned long vmaddr)
{ {
_tlbie(vmaddr); _tlbie(vmaddr, vma->vm_mm->context.id);
} }
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr) unsigned long vmaddr)
{ {
_tlbie(vmaddr); _tlbie(vmaddr, vma->vm_mm->context.id);
} }
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment