Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
4f0dbc27
Commit
4f0dbc27
authored
Aug 20, 2009
by
Benjamin Herrenschmidt
Browse files
Options
Browse Files
Download
Plain Diff
Merge commit 'paulus-perf/master' into next
parents
3c15a688
20002ded
Changes
7
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
588 additions
and
16 deletions
+588
-16
arch/powerpc/include/asm/pgtable.h
arch/powerpc/include/asm/pgtable.h
+3
-3
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/Makefile
+1
-1
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/asm-offsets.c
+2
-0
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/exceptions-64s.S
+19
-0
arch/powerpc/kernel/perf_callchain.c
arch/powerpc/kernel/perf_callchain.c
+527
-0
arch/powerpc/mm/slb.c
arch/powerpc/mm/slb.c
+26
-11
arch/powerpc/mm/stab.c
arch/powerpc/mm/stab.c
+10
-1
No files found.
arch/powerpc/include/asm/pgtable.h
View file @
4f0dbc27
...
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
...
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
else
else
pte_update
(
ptep
,
~
_PAGE_HASHPTE
,
pte_val
(
pte
));
pte_update
(
ptep
,
~
_PAGE_HASHPTE
,
pte_val
(
pte
));
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
&& defined(CONFIG_SMP)
#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE
in SMP mode.
In this case, we
/* Second case is 32-bit with 64-bit PTE
.
In this case, we
* can just store as long as we do the two halves in the right order
* can just store as long as we do the two halves in the right order
* with a barrier in between. This is possible because we take care,
* with a barrier in between. This is possible because we take care,
* in the hash code, to pre-invalidate if the PTE was already hashed,
* in the hash code, to pre-invalidate if the PTE was already hashed,
...
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
...
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#else
#else
/* Anything else just stores the PTE normally. That covers all 64-bit
/* Anything else just stores the PTE normally. That covers all 64-bit
* cases, and 32-bit non-hash with
64-bit PTEs in UP mode
* cases, and 32-bit non-hash with
32-bit PTEs.
*/
*/
*
ptep
=
pte
;
*
ptep
=
pte
;
#endif
#endif
...
...
arch/powerpc/kernel/Makefile
View file @
4f0dbc27
...
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
...
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE)
+=
ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE)
+=
ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER)
+=
ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER)
+=
ftrace.o
obj-$(CONFIG_PPC_PERF_CTRS)
+=
perf_counter.o
obj-$(CONFIG_PPC_PERF_CTRS)
+=
perf_counter.o
perf_callchain.o
obj64-$(CONFIG_PPC_PERF_CTRS)
+=
power4-pmu.o ppc970-pmu.o power5-pmu.o
\
obj64-$(CONFIG_PPC_PERF_CTRS)
+=
power4-pmu.o ppc970-pmu.o power5-pmu.o
\
power5+-pmu.o power6-pmu.o power7-pmu.o
power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS)
+=
mpc7450-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS)
+=
mpc7450-pmu.o
...
...
arch/powerpc/kernel/asm-offsets.c
View file @
4f0dbc27
...
@@ -69,6 +69,8 @@ int main(void)
...
@@ -69,6 +69,8 @@ int main(void)
DEFINE
(
MMCONTEXTID
,
offsetof
(
struct
mm_struct
,
context
.
id
));
DEFINE
(
MMCONTEXTID
,
offsetof
(
struct
mm_struct
,
context
.
id
));
#ifdef CONFIG_PPC64
#ifdef CONFIG_PPC64
DEFINE
(
AUDITCONTEXT
,
offsetof
(
struct
task_struct
,
audit_context
));
DEFINE
(
AUDITCONTEXT
,
offsetof
(
struct
task_struct
,
audit_context
));
DEFINE
(
SIGSEGV
,
SIGSEGV
);
DEFINE
(
NMI_MASK
,
NMI_MASK
);
#else
#else
DEFINE
(
THREAD_INFO
,
offsetof
(
struct
task_struct
,
stack
));
DEFINE
(
THREAD_INFO
,
offsetof
(
struct
task_struct
,
stack
));
#endif
/* CONFIG_PPC64 */
#endif
/* CONFIG_PPC64 */
...
...
arch/powerpc/kernel/exceptions-64s.S
View file @
4f0dbc27
...
@@ -731,6 +731,11 @@ BEGIN_FTR_SECTION
...
@@ -731,6 +731,11 @@ BEGIN_FTR_SECTION
bne
-
do_ste_alloc
/*
If
so
handle
it
*/
bne
-
do_ste_alloc
/*
If
so
handle
it
*/
END_FTR_SECTION_IFCLR
(
CPU_FTR_SLB
)
END_FTR_SECTION_IFCLR
(
CPU_FTR_SLB
)
clrrdi
r11
,
r1
,
THREAD_SHIFT
lwz
r0
,
TI_PREEMPT
(
r11
)
/*
If
we
're in an "NMI" */
andis
.
r0
,
r0
,
NMI_MASK
@
h
/*
(
i
.
e
.
an
irq
when
soft
-
disabled
)
*/
bne
77
f
/*
then
don
't call hash_page now */
/
*
/
*
*
On
iSeries
,
we
soft
-
disable
interrupts
here
,
then
*
On
iSeries
,
we
soft
-
disable
interrupts
here
,
then
*
hard
-
enable
interrupts
so
that
the
hash_page
code
can
spin
on
*
hard
-
enable
interrupts
so
that
the
hash_page
code
can
spin
on
...
@@ -835,6 +840,20 @@ handle_page_fault:
...
@@ -835,6 +840,20 @@ handle_page_fault:
bl
.
low_hash_fault
bl
.
low_hash_fault
b
.
ret_from_except
b
.
ret_from_except
/*
*
We
come
here
as
a
result
of
a
DSI
at
a
point
where
we
don
't want
*
to
call
hash_page
,
such
as
when
we
are
accessing
memory
(
possibly
*
user
memory
)
inside
a
PMU
interrupt
that
occurred
while
interrupts
*
were
soft
-
disabled
.
We
want
to
invoke
the
exception
handler
for
*
the
access
,
or
panic
if
there
isn
't a handler.
*/
77
:
bl
.
save_nvgprs
mr
r4
,
r3
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
li
r5
,
SIGSEGV
bl
.
bad_page_fault
b
.
ret_from_except
/
*
here
we
have
a
segment
miss
*/
/
*
here
we
have
a
segment
miss
*/
do_ste_alloc
:
do_ste_alloc
:
bl
.
ste_allocate
/*
try
to
insert
stab
entry
*/
bl
.
ste_allocate
/*
try
to
insert
stab
entry
*/
...
...
arch/powerpc/kernel/perf_callchain.c
0 → 100644
View file @
4f0dbc27
This diff is collapsed.
Click to expand it.
arch/powerpc/mm/slb.c
View file @
4f0dbc27
...
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
...
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
:
"memory"
);
:
"memory"
);
}
}
void
slb_flush_and_rebolt
(
void
)
static
void
__
slb_flush_and_rebolt
(
void
)
{
{
/* If you change this make sure you change SLB_NUM_BOLTED
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */
* appropriately too. */
unsigned
long
linear_llp
,
vmalloc_llp
,
lflags
,
vflags
;
unsigned
long
linear_llp
,
vmalloc_llp
,
lflags
,
vflags
;
unsigned
long
ksp_esid_data
,
ksp_vsid_data
;
unsigned
long
ksp_esid_data
,
ksp_vsid_data
;
WARN_ON
(
!
irqs_disabled
());
linear_llp
=
mmu_psize_defs
[
mmu_linear_psize
].
sllp
;
linear_llp
=
mmu_psize_defs
[
mmu_linear_psize
].
sllp
;
vmalloc_llp
=
mmu_psize_defs
[
mmu_vmalloc_psize
].
sllp
;
vmalloc_llp
=
mmu_psize_defs
[
mmu_vmalloc_psize
].
sllp
;
lflags
=
SLB_VSID_KERNEL
|
linear_llp
;
lflags
=
SLB_VSID_KERNEL
|
linear_llp
;
...
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void)
...
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void)
ksp_vsid_data
=
get_slb_shadow
()
->
save_area
[
2
].
vsid
;
ksp_vsid_data
=
get_slb_shadow
()
->
save_area
[
2
].
vsid
;
}
}
/*
* We can't take a PMU exception in the following code, so hard
* disable interrupts.
*/
hard_irq_disable
();
/* We need to do this all in asm, so we're sure we don't touch
/* We need to do this all in asm, so we're sure we don't touch
* the stack between the slbia and rebolting it. */
* the stack between the slbia and rebolting it. */
asm
volatile
(
"isync
\n
"
asm
volatile
(
"isync
\n
"
...
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void)
...
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void)
:
"memory"
);
:
"memory"
);
}
}
void
slb_flush_and_rebolt
(
void
)
{
WARN_ON
(
!
irqs_disabled
());
/*
* We can't take a PMU exception in the following code, so hard
* disable interrupts.
*/
hard_irq_disable
();
__slb_flush_and_rebolt
();
get_paca
()
->
slb_cache_ptr
=
0
;
}
void
slb_vmalloc_update
(
void
)
void
slb_vmalloc_update
(
void
)
{
{
unsigned
long
vflags
;
unsigned
long
vflags
;
...
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
...
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
/* Flush all user entries from the segment table of the current processor. */
/* Flush all user entries from the segment table of the current processor. */
void
switch_slb
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
void
switch_slb
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
{
unsigned
long
offset
=
get_paca
()
->
slb_cache_ptr
;
unsigned
long
offset
;
unsigned
long
slbie_data
=
0
;
unsigned
long
slbie_data
=
0
;
unsigned
long
pc
=
KSTK_EIP
(
tsk
);
unsigned
long
pc
=
KSTK_EIP
(
tsk
);
unsigned
long
stack
=
KSTK_ESP
(
tsk
);
unsigned
long
stack
=
KSTK_ESP
(
tsk
);
unsigned
long
exec_base
;
unsigned
long
exec_base
;
/*
* We need interrupts hard-disabled here, not just soft-disabled,
* so that a PMU interrupt can't occur, which might try to access
* user memory (to get a stack trace) and possible cause an SLB miss
* which would update the slb_cache/slb_cache_ptr fields in the PACA.
*/
hard_irq_disable
();
offset
=
get_paca
()
->
slb_cache_ptr
;
if
(
!
cpu_has_feature
(
CPU_FTR_NO_SLBIE_B
)
&&
if
(
!
cpu_has_feature
(
CPU_FTR_NO_SLBIE_B
)
&&
offset
<=
SLB_CACHE_ENTRIES
)
{
offset
<=
SLB_CACHE_ENTRIES
)
{
int
i
;
int
i
;
...
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
...
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
}
}
asm
volatile
(
"isync"
:
:
:
"memory"
);
asm
volatile
(
"isync"
:
:
:
"memory"
);
}
else
{
}
else
{
slb_flush_and_rebolt
();
__
slb_flush_and_rebolt
();
}
}
/* Workaround POWER5 < DD2.1 issue */
/* Workaround POWER5 < DD2.1 issue */
...
...
arch/powerpc/mm/stab.c
View file @
4f0dbc27
...
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
...
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
{
{
struct
stab_entry
*
stab
=
(
struct
stab_entry
*
)
get_paca
()
->
stab_addr
;
struct
stab_entry
*
stab
=
(
struct
stab_entry
*
)
get_paca
()
->
stab_addr
;
struct
stab_entry
*
ste
;
struct
stab_entry
*
ste
;
unsigned
long
offset
=
__get_cpu_var
(
stab_cache_ptr
)
;
unsigned
long
offset
;
unsigned
long
pc
=
KSTK_EIP
(
tsk
);
unsigned
long
pc
=
KSTK_EIP
(
tsk
);
unsigned
long
stack
=
KSTK_ESP
(
tsk
);
unsigned
long
stack
=
KSTK_ESP
(
tsk
);
unsigned
long
unmapped_base
;
unsigned
long
unmapped_base
;
...
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
...
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Force previous translations to complete. DRENG */
/* Force previous translations to complete. DRENG */
asm
volatile
(
"isync"
:
:
:
"memory"
);
asm
volatile
(
"isync"
:
:
:
"memory"
);
/*
* We need interrupts hard-disabled here, not just soft-disabled,
* so that a PMU interrupt can't occur, which might try to access
* user memory (to get a stack trace) and possible cause an STAB miss
* which would update the stab_cache/stab_cache_ptr per-cpu variables.
*/
hard_irq_disable
();
offset
=
__get_cpu_var
(
stab_cache_ptr
);
if
(
offset
<=
NR_STAB_CACHE_ENTRIES
)
{
if
(
offset
<=
NR_STAB_CACHE_ENTRIES
)
{
int
i
;
int
i
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment