Commit 8178d000 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'master' of...

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/perfcounters into perfcounters/core
parents 8f28827a 20002ded
...@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
else else
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we /* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order * can just store as long as we do the two halves in the right order
* with a barrier in between. This is possible because we take care, * with a barrier in between. This is possible because we take care,
* in the hash code, to pre-invalidate if the PTE was already hashed, * in the hash code, to pre-invalidate if the PTE was already hashed,
...@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#else #else
/* Anything else just stores the PTE normally. That covers all 64-bit /* Anything else just stores the PTE normally. That covers all 64-bit
* cases, and 32-bit non-hash with 64-bit PTEs in UP mode * cases, and 32-bit non-hash with 32-bit PTEs.
*/ */
*ptep = pte; *ptep = pte;
#endif #endif
......
...@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o ...@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
......
...@@ -67,6 +67,8 @@ int main(void) ...@@ -67,6 +67,8 @@ int main(void)
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
#else #else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
......
...@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION ...@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION
bne- do_ste_alloc /* If so handle it */ bne- do_ste_alloc /* If so handle it */
END_FTR_SECTION_IFCLR(CPU_FTR_SLB) END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
clrrdi r11,r1,THREAD_SHIFT
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
/* /*
* On iSeries, we soft-disable interrupts here, then * On iSeries, we soft-disable interrupts here, then
* hard-enable interrupts so that the hash_page code can spin on * hard-enable interrupts so that the hash_page code can spin on
...@@ -833,6 +838,20 @@ handle_page_fault: ...@@ -833,6 +838,20 @@ handle_page_fault:
bl .low_hash_fault bl .low_hash_fault
b .ret_from_except b .ret_from_except
/*
* We come here as a result of a DSI at a point where we don't want
* to call hash_page, such as when we are accessing memory (possibly
* user memory) inside a PMU interrupt that occurred while interrupts
* were soft-disabled. We want to invoke the exception handler for
* the access, or panic if there isn't a handler.
*/
77: bl .save_nvgprs
mr r4,r3
addi r3,r1,STACK_FRAME_OVERHEAD
li r5,SIGSEGV
bl .bad_page_fault
b .ret_from_except
/* here we have a segment miss */ /* here we have a segment miss */
do_ste_alloc: do_ste_alloc:
bl .ste_allocate /* try to insert stab entry */ bl .ste_allocate /* try to insert stab entry */
......
This diff is collapsed.
...@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, ...@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
: "memory" ); : "memory" );
} }
void slb_flush_and_rebolt(void) static void __slb_flush_and_rebolt(void)
{ {
/* If you change this make sure you change SLB_NUM_BOLTED /* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */ * appropriately too. */
unsigned long linear_llp, vmalloc_llp, lflags, vflags; unsigned long linear_llp, vmalloc_llp, lflags, vflags;
unsigned long ksp_esid_data, ksp_vsid_data; unsigned long ksp_esid_data, ksp_vsid_data;
WARN_ON(!irqs_disabled());
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp; lflags = SLB_VSID_KERNEL | linear_llp;
...@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void) ...@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void)
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
} }
/*
* We can't take a PMU exception in the following code, so hard
* disable interrupts.
*/
hard_irq_disable();
/* We need to do this all in asm, so we're sure we don't touch /* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */ * the stack between the slbia and rebolting it. */
asm volatile("isync\n" asm volatile("isync\n"
...@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void) ...@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void)
: "memory"); : "memory");
} }
void slb_flush_and_rebolt(void)
{
WARN_ON(!irqs_disabled());
/*
* We can't take a PMU exception in the following code, so hard
* disable interrupts.
*/
hard_irq_disable();
__slb_flush_and_rebolt();
get_paca()->slb_cache_ptr = 0;
}
void slb_vmalloc_update(void) void slb_vmalloc_update(void)
{ {
unsigned long vflags; unsigned long vflags;
...@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) ...@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
/* Flush all user entries from the segment table of the current processor. */ /* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm) void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{ {
unsigned long offset = get_paca()->slb_cache_ptr; unsigned long offset;
unsigned long slbie_data = 0; unsigned long slbie_data = 0;
unsigned long pc = KSTK_EIP(tsk); unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk); unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base; unsigned long unmapped_base;
/*
* We need interrupts hard-disabled here, not just soft-disabled,
* so that a PMU interrupt can't occur, which might try to access
* user memory (to get a stack trace) and possible cause an SLB miss
* which would update the slb_cache/slb_cache_ptr fields in the PACA.
*/
hard_irq_disable();
offset = get_paca()->slb_cache_ptr;
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) { offset <= SLB_CACHE_ENTRIES) {
int i; int i;
...@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
} }
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
} else { } else {
slb_flush_and_rebolt(); __slb_flush_and_rebolt();
} }
/* Workaround POWER5 < DD2.1 issue */ /* Workaround POWER5 < DD2.1 issue */
......
...@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
{ {
struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
struct stab_entry *ste; struct stab_entry *ste;
unsigned long offset = __get_cpu_var(stab_cache_ptr); unsigned long offset;
unsigned long pc = KSTK_EIP(tsk); unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk); unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base; unsigned long unmapped_base;
...@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) ...@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Force previous translations to complete. DRENG */ /* Force previous translations to complete. DRENG */
asm volatile("isync" : : : "memory"); asm volatile("isync" : : : "memory");
/*
* We need interrupts hard-disabled here, not just soft-disabled,
* so that a PMU interrupt can't occur, which might try to access
* user memory (to get a stack trace) and possible cause an STAB miss
* which would update the stab_cache/stab_cache_ptr per-cpu variables.
*/
hard_irq_disable();
offset = __get_cpu_var(stab_cache_ptr);
if (offset <= NR_STAB_CACHE_ENTRIES) { if (offset <= NR_STAB_CACHE_ENTRIES) {
int i; int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment