Commit 8a1ca8ce authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perfcounters-for-linus' of...

Merge branch 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (574 commits)
  perf_counter: Turn off by default
  perf_counter: Add counter->id to the throttle event
  perf_counter: Better align code
  perf_counter: Rename L2 to LL cache
  perf_counter: Standardize event names
  perf_counter: Rename enums
  perf_counter tools: Clean up u64 usage
  perf_counter: Rename perf_counter_limit sysctl
  perf_counter: More paranoia settings
  perf_counter: powerpc: Implement generalized cache events for POWER processors
  perf_counters: powerpc: Add support for POWER7 processors
  perf_counter: Accurate period data
  perf_counter: Introduce struct for sample data
  perf_counter tools: Normalize data using per sample period data
  perf_counter: Annotate exit ctx recursion
  perf_counter tools: Propagate signals properly
  perf_counter tools: Small frequency related fixes
  perf_counter: More aggressive frequency adjustment
  perf_counter/x86: Fix the model number of Intel Core2 processors
  perf_counter, x86: Correct some event and umask values for Intel processors
  ...
parents b640f042 940010c5
......@@ -4403,6 +4403,16 @@ S: Maintained
F: include/linux/delayacct.h
F: kernel/delayacct.c
PERFORMANCE COUNTER SUBSYSTEM
P: Peter Zijlstra
M: a.p.zijlstra@chello.nl
P: Paul Mackerras
M: paulus@samba.org
P: Ingo Molnar
M: mingo@elte.hu
L: linux-kernel@vger.kernel.org
S: Supported
PERSONALITY HANDLING
P: Christoph Hellwig
M: hch@infradead.org
......
......@@ -131,5 +131,44 @@ static inline int irqs_disabled_flags(unsigned long flags)
*/
struct irq_chip;
#ifdef CONFIG_PERF_COUNTERS
static inline unsigned long test_perf_counter_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
: "i" (offsetof(struct paca_struct, perf_counter_pending)));
return x;
}
static inline void set_perf_counter_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
"i" (offsetof(struct paca_struct, perf_counter_pending)));
}
static inline void clear_perf_counter_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
"i" (offsetof(struct paca_struct, perf_counter_pending)));
}
extern void perf_counter_do_pending(void);
#else
static inline unsigned long test_perf_counter_pending(void)
{
return 0;
}
static inline void set_perf_counter_pending(void) {}
static inline void clear_perf_counter_pending(void) {}
static inline void perf_counter_do_pending(void) {}
#endif /* CONFIG_PERF_COUNTERS */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
......@@ -99,6 +99,7 @@ struct paca_struct {
u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 perf_counter_pending; /* PM interrupt while soft-disabled */
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
......
/*
* Performance counter support - PowerPC-specific definitions.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#define MAX_HWCOUNTERS 8
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
/*
* This struct provides the constants and functions needed to
* describe the PMU on a particular POWER-family CPU.
*/
struct power_pmu {
int n_counter;
int max_alternatives;
u64 add_fields;
u64 test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
unsigned int hwc[], u64 mmcr[]);
int (*get_constraint)(u64 event, u64 *mskp, u64 *valp);
int (*get_alternatives)(u64 event, unsigned int flags,
u64 alt[]);
void (*disable_pmc)(unsigned int pmc, u64 mmcr[]);
int (*limited_pmc_event)(u64 event);
u32 flags;
int n_generic;
int *generic_events;
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
extern struct power_pmu *ppmu;
/*
* Values for power_pmu.flags
*/
#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
/*
* Values for flags to get_alternatives()
*/
#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
/*
* The power_pmu.get_constraint function returns a 64-bit value and
* a 64-bit mask that express the constraints between this event and
* other events.
*
* The value and mask are divided up into (non-overlapping) bitfields
* of three different types:
*
* Select field: this expresses the constraint that some set of bits
* in MMCR* needs to be set to a specific value for this event. For a
* select field, the mask contains 1s in every bit of the field, and
* the value contains a unique value for each possible setting of the
* MMCR* bits. The constraint checking code will ensure that two events
* that set the same field in their masks have the same value in their
* value dwords.
*
* Add field: this expresses the constraint that there can be at most
* N events in a particular class. A field of k bits can be used for
* N <= 2^(k-1) - 1. The mask has the most significant bit of the field
* set (and the other bits 0), and the value has only the least significant
* bit of the field set. In addition, the 'add_fields' and 'test_adder'
* in the struct power_pmu for this processor come into play. The
* add_fields value contains 1 in the LSB of the field, and the
* test_adder contains 2^(k-1) - 1 - N in the field.
*
* NAND field: this expresses the constraint that you may not have events
* in all of a set of classes. (For example, on PPC970, you can't select
* events from the FPU, ISU and IDU simultaneously, although any two are
* possible.) For N classes, the field is N+1 bits wide, and each class
* is assigned one bit from the least-significant N bits. The mask has
* only the most-significant bit set, and the value has only the bit
* for the event's class set. The test_adder has the least significant
* bit set in the field.
*
* If an event is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
......@@ -492,11 +492,13 @@
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
#define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */
#define MMCRA_SLOT_SHIFT 24
#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */
#define POWER6_MMCRA_SIHV 0x0000040000000000ULL
#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
#define POWER6_MMCRA_THRM 0x00000020UL
......
......@@ -322,6 +322,6 @@ SYSCALL_SPU(epoll_create1)
SYSCALL_SPU(dup3)
SYSCALL_SPU(pipe2)
SYSCALL(inotify_init1)
SYSCALL(ni_syscall)
SYSCALL_SPU(perf_counter_open)
COMPAT_SYS_SPU(preadv)
COMPAT_SYS_SPU(pwritev)
......@@ -341,6 +341,7 @@
#define __NR_dup3 316
#define __NR_pipe2 317
#define __NR_inotify_init1 318
#define __NR_perf_counter_open 319
#define __NR_preadv 320
#define __NR_pwritev 321
......
......@@ -94,6 +94,9 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \
power5-pmu.o power5+-pmu.o power6-pmu.o \
power7-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
......
......@@ -131,6 +131,7 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
......
......@@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
2:
TRACE_AND_RESTORE_IRQ(r5);
#ifdef CONFIG_PERF_COUNTERS
/* check paca->perf_counter_pending if we're enabling ints */
lbz r3,PACAPERFPEND(r13)
and. r3,r3,r5
beq 27f
bl .perf_counter_do_pending
27:
#endif /* CONFIG_PERF_COUNTERS */
/* extract EE bit and use it to restore paca->hard_enabled */
ld r3,_MSR(r1)
rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
......
......@@ -135,6 +135,11 @@ notrace void raw_local_irq_restore(unsigned long en)
iseries_handle_interrupts();
}
if (test_perf_counter_pending()) {
clear_perf_counter_pending();
perf_counter_do_pending();
}
/*
* if (get_paca()->hard_enabled) return;
* But again we need to take care that gcc gets hard_enabled directly
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Performance counter support for POWER7 processors.
*
* Copyright 2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
#include <asm/reg.h>
/*
* Bits in event code for POWER7
*/
#define PM_PMC_SH 16 /* PMC number (1-based) for direct events */
#define PM_PMC_MSK 0xf
#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
#define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */
#define PM_UNIT_MSK 0xf
#define PM_COMBINE_SH 11 /* Combined event bit */
#define PM_COMBINE_MSK 1
#define PM_COMBINE_MSKS 0x800
#define PM_L2SEL_SH 8 /* L2 event select */
#define PM_L2SEL_MSK 7
#define PM_PMCSEL_MSK 0xff
/*
* Bits in MMCR1 for POWER7
*/
#define MMCR1_TTM0SEL_SH 60
#define MMCR1_TTM1SEL_SH 56
#define MMCR1_TTM2SEL_SH 52
#define MMCR1_TTM3SEL_SH 48
#define MMCR1_TTMSEL_MSK 0xf
#define MMCR1_L2SEL_SH 45
#define MMCR1_L2SEL_MSK 7
#define MMCR1_PMC1_COMBINE_SH 35
#define MMCR1_PMC2_COMBINE_SH 34
#define MMCR1_PMC3_COMBINE_SH 33
#define MMCR1_PMC4_COMBINE_SH 32
#define MMCR1_PMC1SEL_SH 24
#define MMCR1_PMC2SEL_SH 16
#define MMCR1_PMC3SEL_SH 8
#define MMCR1_PMC4SEL_SH 0
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
#define MMCR1_PMCSEL_MSK 0xff
/*
* Bits in MMCRA
*/
/*
* Layout of constraint bits:
* 6666555555555544444444443333333333222222222211111111110000000000
* 3210987654321098765432109876543210987654321098765432109876543210
* [ ><><><><><><>
* NC P6P5P4P3P2P1
*
* NC - number of counters
* 15: NC error 0x8000
* 12-14: number of events needing PMC1-4 0x7000
*
* P6
* 11: P6 error 0x800
* 10-11: Count of events needing PMC6
*
* P1..P5
* 0-9: Count of events needing PMC1..PMC5
*/
static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp)
{
int pmc, sh;
u64 mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > 6)
return -1;
sh = (pmc - 1) * 2;
mask |= 2 << sh;
value |= 1 << sh;
if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4))
return -1;
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
mask |= 0x8000;
value |= 0x1000;
}
*maskp = mask;
*valp = value;
return 0;
}
#define MAX_ALT 2 /* at most 2 alternatives for any event */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ 0x200f2, 0x300f2 }, /* PM_INST_DISP */
{ 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
{ 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
};
/*
* Scan the alternatives table for a match and return the
* index into the alternatives table if found, else -1.
*/
static int find_alternative(u64 event)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
if (event < event_alternatives[i][0])
break;
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
if (event == event_alternatives[i][j])
return i;
}
return -1;
}
static s64 find_alternative_decode(u64 event)
{
int pmc, psel;
/* this only handles the 4x decode events */
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
psel = event & PM_PMCSEL_MSK;
if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40)
return event - (1 << PM_PMC_SH) + 8;
if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48)
return event + (1 << PM_PMC_SH) - 8;
return -1;
}
static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int i, j, nalt = 1;
s64 ae;
alt[0] = event;
nalt = 1;
i = find_alternative(event);
if (i >= 0) {
for (j = 0; j < MAX_ALT; ++j) {
ae = event_alternatives[i][j];
if (ae && ae != event)
alt[nalt++] = ae;
}
} else {
ae = find_alternative_decode(event);
if (ae > 0)
alt[nalt++] = ae;
}
if (flags & PPMU_ONLY_COUNT_RUN) {
/*
* We're only counting in RUN state,
* so PM_CYC is equivalent to PM_RUN_CYC
* and PM_INST_CMPL === PM_RUN_INST_CMPL.
* This doesn't include alternatives that don't provide
* any extra flexibility in assigning PMCs.
*/
j = nalt;
for (i = 0; i < nalt; ++i) {
switch (alt[i]) {
case 0x1e: /* PM_CYC */
alt[j++] = 0x600f4; /* PM_RUN_CYC */
break;
case 0x600f4: /* PM_RUN_CYC */
alt[j++] = 0x1e;
break;
case 0x2: /* PM_PPC_CMPL */
alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
break;
case 0x500fa: /* PM_RUN_INST_CMPL */
alt[j++] = 0x2; /* PM_PPC_CMPL */
break;
}
}
nalt = j;
}
return nalt;
}
/*
* Returns 1 if event counts things relating to marked instructions
* and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
*/
static int power7_marked_instr_event(u64 event)
{
int pmc, psel;
int unit;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */
if (pmc >= 5)
return 0;
switch (psel >> 4) {
case 2:
return pmc == 2 || pmc == 4;
case 3:
if (psel == 0x3c)
return pmc == 1;
if (psel == 0x3e)
return pmc != 2;
return 1;
case 4:
case 5:
return unit == 0xd;
case 6:
if (psel == 0x64)
return pmc >= 3;
case 8:
return unit == 0xd;
}
return 0;
}
static int power7_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], u64 mmcr[])
{
u64 mmcr1 = 0;
u64 mmcra = 0;
unsigned int pmc, unit, combine, l2sel, psel;
unsigned int pmc_inuse = 0;
int i;
/* First pass to count resource use */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > 6)
return -1;
if (pmc_inuse & (1 << (pmc - 1)))
return -1;
pmc_inuse |= 1 << (pmc - 1);
}
}
/* Second pass: assign PMCs, set all MMCR1 fields */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK;
l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK;
psel = event[i] & PM_PMCSEL_MSK;
if (!pmc) {
/* Bus event or any-PMC direct event */
for (pmc = 0; pmc < 4; ++pmc) {
if (!(pmc_inuse & (1 << pmc)))
break;
}
if (pmc >= 4)
return -1;
pmc_inuse |= 1 << pmc;
} else {
/* Direct or decoded event */
--pmc;
}
if (pmc <= 3) {
mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc);
mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc);
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
if (unit == 6) /* L2 events */
mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH;
}
if (power7_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
hwc[i] = pmc;
}
/* Return MMCRx values */
mmcr[0] = 0;
if (pmc_inuse & 1)
mmcr[0] = MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
mmcr[0] |= MMCR0_PMCjCE;
mmcr[1] = mmcr1;
mmcr[2] = mmcra;
return 0;
}
static void power7_disable_pmc(unsigned int pmc, u64 mmcr[])
{
if (pmc <= 3)
mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc));
}
static int power7_generic_events[] = {
[PERF_COUNT_CPU_CYCLES] = 0x1e,
[PERF_COUNT_INSTRUCTIONS] = 2,
[PERF_COUNT_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU */
[PERF_COUNT_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */
[PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */
[PERF_COUNT_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x400f0, 0xc880 },
[C(OP_WRITE)] = { 0, 0x300f0 },
[C(OP_PREFETCH)] = { 0xd8b8, 0 },
},
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x200fc },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 0x408a, 0 },
},
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x6080, 0x6084 },
[C(OP_WRITE)] = { 0x6082, 0x6086 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x300fc },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x400fc },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x10068, 0x400f6 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
struct power_pmu power7_pmu = {
.n_counter = 6,
.max_alternatives = MAX_ALT + 1,
.add_fields = 0x1555ull,
.test_adder = 0x3000ull,
.compute_mmcr = power7_compute_mmcr,
.get_constraint = power7_get_constraint,
.get_alternatives = power7_get_alternatives,
.disable_pmc = power7_disable_pmc,
.n_generic = ARRAY_SIZE(power7_generic_events),
.generic_events = power7_generic_events,
.cache_events = &power7_cache_events,
};
This diff is collapsed.
......@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_counter.h>
#include <asm/firmware.h>
#include <asm/page.h>
......@@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
......@@ -309,6 +312,8 @@ good_area:
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
preempt_disable();
......@@ -316,8 +321,11 @@ good_area:
preempt_enable();
}
#endif
} else
} else {
current->min_flt++;
perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
return 0;
......
config PPC64
bool "64-bit kernel"
default n
select HAVE_PERF_COUNTERS
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
......
......@@ -739,6 +739,7 @@ config X86_UP_IOAPIC
config X86_LOCAL_APIC
def_bool y
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
select HAVE_PERF_COUNTERS if (!M386 && !M486)
config X86_IO_APIC
def_bool y
......
......@@ -831,4 +831,5 @@ ia32_sys_call_table:
.quad compat_sys_preadv
.quad compat_sys_pwritev
.quad compat_sys_rt_tgsigqueueinfo /* 335 */
.quad sys_perf_counter_open
ia32_syscall_end:
This diff is collapsed.
......@@ -49,7 +49,7 @@ BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
#ifdef CONFIG_PERF_COUNTERS
BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
#endif
#ifdef CONFIG_X86_MCE_P4THERMAL
......
......@@ -13,6 +13,8 @@ typedef struct {
unsigned int irq_spurious_count;
#endif
unsigned int generic_irqs; /* arch dependent */
unsigned int apic_perf_irqs;
unsigned int apic_pending_irqs;
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
......
......@@ -29,6 +29,8 @@
extern void apic_timer_interrupt(void);
extern void generic_interrupt(void);
extern void error_interrupt(void);
extern void perf_pending_interrupt(void);
extern void spurious_interrupt(void);
extern void thermal_interrupt(void);
extern void reschedule_interrupt(void);
......
#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H
#define _ASM_X86_INTEL_ARCH_PERFMON_H
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
union cpuid10_eax {
struct {
unsigned int version_id:8;
unsigned int num_counters:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
unsigned int full;
};
#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */
......@@ -108,14 +108,14 @@
#define LOCAL_TIMER_VECTOR 0xef
/*
* Performance monitoring interrupt vector:
* Generic system vector for platform specific use
*/
#define LOCAL_PERF_VECTOR 0xee
#define GENERIC_INTERRUPT_VECTOR 0xed
/*
* Generic system vector for platform specific use
* Performance monitoring pending work vector:
*/
#define GENERIC_INTERRUPT_VECTOR 0xed
#define LOCAL_PENDING_VECTOR 0xec
/*
* First APIC vector available to drivers: (vectors 0x30-0xee) we
......
#ifndef _ASM_X86_PERF_COUNTER_H
#define _ASM_X86_PERF_COUNTER_H
/*
* Performance counter hw details:
*/
#define X86_PMC_MAX_GENERIC 8
#define X86_PMC_MAX_FIXED 3
#define X86_PMC_IDX_GENERIC 0
#define X86_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
/*
* Includes eventsel and unit mask as well:
*/
#define ARCH_PERFMON_EVENT_MASK 0xffff
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
*/
union cpuid10_eax {
struct {
unsigned int version_id:8;
unsigned int num_counters:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
unsigned int full;
};
union cpuid10_edx {
struct {
unsigned int num_counters_fixed:4;
unsigned int reserved:28;
} split;
unsigned int full;
};
/*
* Fixed-purpose performance counters:
*/
/*
* All 3 fixed-mode PMCs are configured via this single MSR:
*/
#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
/*
* The counts are available in three separate MSRs:
*/
/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
extern void set_perf_counter_pending(void);
#define clear_perf_counter_pending() do { } while (0)
#define test_perf_counter_pending() (0)
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
extern void perf_counters_lapic_init(void);
#else
static inline void init_hw_perf_counters(void) { }
static inline void perf_counters_lapic_init(void) { }
#endif
#endif /* _ASM_X86_PERF_COUNTER_H */
......@@ -341,6 +341,7 @@
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_counter_open 336
#ifdef __KERNEL__
......
......@@ -659,7 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
__SYSCALL(__NR_pwritev, sys_pwritev)
#define __NR_rt_tgsigqueueinfo 297
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
#define __NR_perf_counter_open 298
__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -6,7 +6,6 @@
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-2002 x86-64 support by Andi Kleen
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment