Commit 0e078e2f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: prepare merging arch/x86/kernel/apic_32/64.c

Shuffle code around, so we get a readable diff.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3a12d93d
...@@ -563,6 +563,9 @@ static void local_apic_timer_interrupt(void) ...@@ -563,6 +563,9 @@ static void local_apic_timer_interrupt(void)
return; return;
} }
/*
* the NMI deadlock-detector uses this.
*/
per_cpu(irq_stat, cpu).apic_timer_irqs++; per_cpu(irq_stat, cpu).apic_timer_irqs++;
evt->event_handler(evt); evt->event_handler(evt);
...@@ -617,7 +620,7 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -617,7 +620,7 @@ int setup_profiling_timer(unsigned int multiplier)
void clear_local_APIC(void) void clear_local_APIC(void)
{ {
int maxlvt = lapic_get_maxlvt(); int maxlvt = lapic_get_maxlvt();
unsigned long v; u32 v;
/* /*
* Masking an LVT entry can trigger a local APIC error * Masking an LVT entry can trigger a local APIC error
...@@ -1209,50 +1212,6 @@ int __init APIC_init_uniprocessor (void) ...@@ -1209,50 +1212,6 @@ int __init APIC_init_uniprocessor (void)
return 0; return 0;
} }
/*
* APIC command line parameters
*/
static int __init parse_lapic(char *arg)
{
enable_local_apic = 1;
return 0;
}
early_param("lapic", parse_lapic);
static int __init parse_nolapic(char *arg)
{
enable_local_apic = -1;
clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
return 0;
}
early_param("nolapic", parse_nolapic);
static int __init parse_disable_lapic_timer(char *arg)
{
local_apic_timer_disabled = 1;
return 0;
}
early_param("nolapic_timer", parse_disable_lapic_timer);
static int __init parse_lapic_timer_c2_ok(char *arg)
{
local_apic_timer_c2_ok = 1;
return 0;
}
early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
static int __init apic_set_verbosity(char *str)
{
if (strcmp("debug", str) == 0)
apic_verbosity = APIC_DEBUG;
else if (strcmp("verbose", str) == 0)
apic_verbosity = APIC_VERBOSE;
return 1;
}
__setup("apic=", apic_set_verbosity);
/* /*
* Local APIC interrupts * Local APIC interrupts
*/ */
...@@ -1565,3 +1524,46 @@ device_initcall(init_lapic_sysfs); ...@@ -1565,3 +1524,46 @@ device_initcall(init_lapic_sysfs);
static void apic_pm_activate(void) { } static void apic_pm_activate(void) { }
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
/*
* APIC command line parameters
*/
static int __init parse_lapic(char *arg)
{
enable_local_apic = 1;
return 0;
}
early_param("lapic", parse_lapic);
static int __init parse_nolapic(char *arg)
{
enable_local_apic = -1;
clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
return 0;
}
early_param("nolapic", parse_nolapic);
static int __init parse_disable_lapic_timer(char *arg)
{
local_apic_timer_disabled = 1;
return 0;
}
early_param("nolapic_timer", parse_disable_lapic_timer);
static int __init parse_lapic_timer_c2_ok(char *arg)
{
local_apic_timer_c2_ok = 1;
return 0;
}
early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
static int __init apic_set_verbosity(char *str)
{
if (strcmp("debug", str) == 0)
apic_verbosity = APIC_DEBUG;
else if (strcmp("verbose", str) == 0)
apic_verbosity = APIC_VERBOSE;
return 1;
}
__setup("apic=", apic_set_verbosity);
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
int apic_verbosity; int apic_verbosity;
int disable_apic_timer __cpuinitdata; int disable_apic_timer __cpuinitdata;
static int apic_calibrate_pmtmr __initdata; static int apic_calibrate_pmtmr __initdata;
int disable_apic;
/* Local APIC timer works in C2? */ /* Local APIC timer works in C2? */
int local_apic_timer_c2_ok; int local_apic_timer_c2_ok;
...@@ -60,10 +61,8 @@ static int lapic_next_event(unsigned long delta, ...@@ -60,10 +61,8 @@ static int lapic_next_event(unsigned long delta,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_setup(enum clock_event_mode mode, static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_broadcast(cpumask_t mask); static void lapic_timer_broadcast(cpumask_t mask);
static void apic_pm_activate(void);
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen);
static struct clock_event_device lapic_clockevent = { static struct clock_event_device lapic_clockevent = {
.name = "lapic", .name = "lapic",
...@@ -78,57 +77,34 @@ static struct clock_event_device lapic_clockevent = { ...@@ -78,57 +77,34 @@ static struct clock_event_device lapic_clockevent = {
}; };
static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
static int lapic_next_event(unsigned long delta, /*
struct clock_event_device *evt) * Get the LAPIC version
*/
static inline int lapic_get_version(void)
{ {
apic_write(APIC_TMICT, delta); return GET_APIC_VERSION(apic_read(APIC_LVR));
return 0;
} }
static void lapic_timer_setup(enum clock_event_mode mode, /*
struct clock_event_device *evt) * Check, if the APIC is integrated or a seperate chip
*/
static inline int lapic_is_integrated(void)
{ {
unsigned long flags; return 1;
unsigned int v;
/* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return;
local_irq_save(flags);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
__setup_APIC_LVTT(calibration_result,
mode != CLOCK_EVT_MODE_PERIODIC, 1);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
break;
case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here */
break;
}
local_irq_restore(flags);
} }
/* /*
* Local APIC timer broadcast function * Check, whether this is a modern or a first generation APIC
*/ */
static void lapic_timer_broadcast(cpumask_t mask) static int modern_apic(void)
{ {
#ifdef CONFIG_SMP /* AMD systems use old APIC versions, so check the CPU */
send_IPI_mask(mask, LOCAL_TIMER_VECTOR); if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
#endif boot_cpu_data.x86 >= 0xf)
return 1;
return lapic_get_version() >= 0x14;
} }
static void apic_pm_activate(void);
void apic_wait_icr_idle(void) void apic_wait_icr_idle(void)
{ {
while (apic_read(APIC_ICR) & APIC_ICR_BUSY) while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
...@@ -151,7 +127,10 @@ u32 safe_apic_wait_icr_idle(void) ...@@ -151,7 +127,10 @@ u32 safe_apic_wait_icr_idle(void)
return send_status; return send_status;
} }
void enable_NMI_through_LVT0 (void * dummy) /**
* enable_NMI_through_LVT0 - enable NMI through local vector table 0
*/
void enable_NMI_through_LVT0(void *dummy)
{ {
unsigned int v; unsigned int v;
...@@ -160,6 +139,9 @@ void enable_NMI_through_LVT0 (void * dummy) ...@@ -160,6 +139,9 @@ void enable_NMI_through_LVT0 (void * dummy)
apic_write(APIC_LVT0, v); apic_write(APIC_LVT0, v);
} }
/**
* lapic_get_maxlvt - get the maximum number of local vector table entries
*/
int lapic_get_maxlvt(void) int lapic_get_maxlvt(void)
{ {
unsigned int v, maxlvt; unsigned int v, maxlvt;
...@@ -169,184 +151,476 @@ int lapic_get_maxlvt(void) ...@@ -169,184 +151,476 @@ int lapic_get_maxlvt(void)
return maxlvt; return maxlvt;
} }
void clear_local_APIC(void) /*
* This function sets up the local APIC timer, with a timeout of
* 'clocks' APIC bus clock. During calibration we actually call
* this function twice on the boot CPU, once with a bogus timeout
* value, second time for real. The other (noncalibrating) CPUs
* call this function only once, with the real, calibrated value.
*
* We do reads before writes even if unnecessary, to get around the
* P5 APIC double write bug.
*/
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
{ {
int maxlvt; unsigned int lvtt_value, tmp_value;
unsigned int v;
maxlvt = lapic_get_maxlvt(); lvtt_value = LOCAL_TIMER_VECTOR;
if (!oneshot)
lvtt_value |= APIC_LVT_TIMER_PERIODIC;
if (!irqen)
lvtt_value |= APIC_LVT_MASKED;
/* apic_write(APIC_LVTT, lvtt_value);
* Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this.
*/
if (maxlvt >= 3) {
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
}
/*
* Careful: we have to set masks only first to deassert
* any level-triggered sources.
*/
v = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT0);
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT1);
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
if (maxlvt >= 4) {
v = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
}
/* /*
* Clean APIC state for other OSs: * Divide PICLK by 16
*/ */
apic_write(APIC_LVTT, APIC_LVT_MASKED); tmp_value = apic_read(APIC_TDCR);
apic_write(APIC_LVT0, APIC_LVT_MASKED); apic_write(APIC_TDCR, (tmp_value
apic_write(APIC_LVT1, APIC_LVT_MASKED); & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
if (maxlvt >= 3) | APIC_TDR_DIV_16);
apic_write(APIC_LVTERR, APIC_LVT_MASKED);
if (maxlvt >= 4) if (!oneshot)
apic_write(APIC_LVTPC, APIC_LVT_MASKED); apic_write(APIC_TMICT, clocks);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
} }
void disconnect_bsp_APIC(int virt_wire_setup) /*
* Setup extended LVT (K8 specific)
*/
void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
unsigned char msg_type, unsigned char mask)
{ {
/* Go back to Virtual Wire compatibility mode */ unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
unsigned long value; unsigned int v = (mask << 16) | (msg_type << 8) | vector;
/* For the spurious interrupt use vector F, and enable it */
value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
value |= APIC_SPIV_APIC_ENABLED;
value |= 0xf;
apic_write(APIC_SPIV, value);
if (!virt_wire_setup) {
/*
* For LVT0 make it edge triggered, active high,
* external and enabled
*/
value = apic_read(APIC_LVT0);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
apic_write(APIC_LVT0, value);
} else {
/* Disable LVT0 */
apic_write(APIC_LVT0, APIC_LVT_MASKED);
}
/* For LVT1 make it edge triggered, active high, nmi and enabled */ apic_write(reg, v);
value = apic_read(APIC_LVT1);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
apic_write(APIC_LVT1, value);
} }
void disable_local_APIC(void) /*
* Program the next event, relative to now
*/
static int lapic_next_event(unsigned long delta,
struct clock_event_device *evt)
{ {
unsigned int value; apic_write(APIC_TMICT, delta);
return 0;
clear_local_APIC();
/*
* Disable APIC (implies clearing of registers
* for 82489DX!).
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_SPIV_APIC_ENABLED;
apic_write(APIC_SPIV, value);
} }
void lapic_shutdown(void) /*
* Setup the lapic timer in periodic or oneshot mode
*/
static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt)
{ {
unsigned long flags; unsigned long flags;
unsigned int v;
if (!cpu_has_apic) /* Lapic used as dummy for broadcast ? */
if (evt->features & CLOCK_EVT_FEAT_DUMMY)
return; return;
local_irq_save(flags); local_irq_save(flags);
disable_local_APIC(); switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
case CLOCK_EVT_MODE_ONESHOT:
__setup_APIC_LVTT(calibration_result,
mode != CLOCK_EVT_MODE_PERIODIC, 1);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
break;
case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here */
break;
}
local_irq_restore(flags); local_irq_restore(flags);
} }
/* /*
* This is to verify that we're looking at a real local APIC. * Local APIC timer broadcast function
* Check these against your board if the CPUs aren't getting
* started for no apparent reason.
*/ */
int __init verify_local_APIC(void) static void lapic_timer_broadcast(cpumask_t mask)
{ {
unsigned int reg0, reg1; #ifdef CONFIG_SMP
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
/* #endif
* The version register is read-only in a real APIC. }
*/
reg0 = apic_read(APIC_LVR);
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
reg1 = apic_read(APIC_LVR);
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
/* /*
* The two version reads above should print the same * Setup the local APIC timer for this CPU. Copy the initilized values
* numbers. If the second one is different, then we * of the boot CPU and register the clock event in the framework.
* poke at a non-APIC. */
*/ static void setup_APIC_timer(void)
if (reg1 != reg0) {
return 0; struct clock_event_device *levt = &__get_cpu_var(lapic_events);
/* memcpy(levt, &lapic_clockevent, sizeof(*levt));
* Check if the version looks reasonably. levt->cpumask = cpumask_of_cpu(smp_processor_id());
*/
reg1 = GET_APIC_VERSION(reg0);
if (reg1 == 0x00 || reg1 == 0xff)
return 0;
reg1 = lapic_get_maxlvt();
if (reg1 < 0x02 || reg1 == 0xff)
return 0;
/* clockevents_register_device(levt);
* The ID register is read/write in a real APIC. }
*/
reg0 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
reg1 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
apic_write(APIC_ID, reg0);
if (reg1 != (reg0 ^ APIC_ID_MASK))
return 0;
/* /*
* In this function we calibrate APIC bus clocks to the external
* timer. Unfortunately we cannot use jiffies and the timer irq
* to calibrate, since some later bootup code depends on getting
* the first irq? Ugh.
*
* We want to do the calibration only once since we
* want to have local timer irqs syncron. CPUs connected
* by the same APIC bus have the very same bus frequency.
* And we want to have irqs off anyways, no accidental
* APIC irq that way.
*/
#define TICK_COUNT 100000000
static void __init calibrate_APIC_clock(void)
{
unsigned apic, apic_start;
unsigned long tsc, tsc_start;
int result;
local_irq_disable();
/*
* Put whatever arbitrary (but long enough) timeout
* value into the APIC clock, we just want to get the
* counter running for calibration.
*
* No interrupt enable !
*/
__setup_APIC_LVTT(250000000, 0, 0);
apic_start = apic_read(APIC_TMCCT);
#ifdef CONFIG_X86_PM_TIMER
if (apic_calibrate_pmtmr && pmtmr_ioport) {
pmtimer_wait(5000); /* 5ms wait */
apic = apic_read(APIC_TMCCT);
result = (apic_start - apic) * 1000L / 5;
} else
#endif
{
rdtscll(tsc_start);
do {
apic = apic_read(APIC_TMCCT);
rdtscll(tsc);
} while ((tsc - tsc_start) < TICK_COUNT &&
(apic_start - apic) < TICK_COUNT);
result = (apic_start - apic) * 1000L * tsc_khz /
(tsc - tsc_start);
}
local_irq_enable();
printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
result / 1000 / 1000, result / 1000 % 1000);
/* Calculate the scaled math multiplication factor */
lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
lapic_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
lapic_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &lapic_clockevent);
calibration_result = result / HZ;
}
void __init setup_boot_APIC_clock(void)
{
/*
* The local apic timer can be disabled via the kernel commandline.
* Register the lapic timer as a dummy clock event source on SMP
* systems, so the broadcast mechanism is used. On UP systems simply
* ignore it.
*/
if (disable_apic_timer) {
printk(KERN_INFO "Disabling APIC timer\n");
/* No broadcast on UP ! */
if (num_possible_cpus() > 1)
setup_APIC_timer();
return;
}
printk(KERN_INFO "Using local APIC timer interrupts.\n");
calibrate_APIC_clock();
/*
* If nmi_watchdog is set to IO_APIC, we need the
* PIT/HPET going. Otherwise register lapic as a dummy
* device.
*/
if (nmi_watchdog != NMI_IO_APIC)
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
else
printk(KERN_WARNING "APIC timer registered as dummy,"
" due to nmi_watchdog=1!\n");
setup_APIC_timer();
}
/*
* AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
* C1E flag only in the secondary CPU, so when we detect the wreckage
* we already have enabled the boot CPU local apic timer. Check, if
* disable_apic_timer is set and the DUMMY flag is cleared. If yes,
* set the DUMMY flag again and force the broadcast mode in the
* clockevents layer.
*/
void __cpuinit check_boot_apic_timer_broadcast(void)
{
if (!disable_apic_timer ||
(lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
return;
printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
local_irq_enable();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
local_irq_disable();
}
void __cpuinit setup_secondary_APIC_clock(void)
{
check_boot_apic_timer_broadcast();
setup_APIC_timer();
}
/*
* The guts of the apic timer interrupt
*/
static void local_apic_timer_interrupt(void)
{
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
/*
* Normally we should not be here till LAPIC has been initialized but
* in some cases like kdump, its possible that there is a pending LAPIC
* timer interrupt from previous kernel's context and is delivered in
* new kernel the moment interrupts are enabled.
*
* Interrupts are enabled early and LAPIC is setup much later, hence
* its possible that when we get here evt->event_handler is NULL.
* Check for event_handler being NULL and discard the interrupt as
* spurious.
*/
if (!evt->event_handler) {
printk(KERN_WARNING
"Spurious LAPIC timer interrupt on cpu %d\n", cpu);
/* Switch it off */
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
return;
}
/*
* the NMI deadlock-detector uses this.
*/
add_pda(apic_timer_irqs, 1);
evt->event_handler(evt);
}
/*
* Local APIC timer interrupt. This is the most natural way for doing
* local interrupts, but local timer interrupts can be emulated by
* broadcast interrupts too. [in case the hw doesn't support APIC timers]
*
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
void smp_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
/*
* NOTE! We'd better ACK the irq immediately,
* because timer handling can be slow.
*/
ack_APIC_irq();
/*
* update_process_times() expects us to have done irq_enter().
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
exit_idle();
irq_enter();
local_apic_timer_interrupt();
irq_exit();
set_irq_regs(old_regs);
}
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/*
* Local APIC start and shutdown
*/
/**
* clear_local_APIC - shutdown the local APIC
*
* This is called, when a CPU is disabled and before rebooting, so the state of
* the local APIC has no dangling leftovers. Also used to cleanout any BIOS
* leftovers during boot.
*/
void clear_local_APIC(void)
{
int maxlvt = lapic_get_maxlvt();
u32 v;
/*
* Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this.
*/
if (maxlvt >= 3) {
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
}
/*
* Careful: we have to set masks only first to deassert
* any level-triggered sources.
*/
v = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT0);
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT1);
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
if (maxlvt >= 4) {
v = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
}
/*
* Clean APIC state for other OSs:
*/
apic_write(APIC_LVTT, APIC_LVT_MASKED);
apic_write(APIC_LVT0, APIC_LVT_MASKED);
apic_write(APIC_LVT1, APIC_LVT_MASKED);
if (maxlvt >= 3)
apic_write(APIC_LVTERR, APIC_LVT_MASKED);
if (maxlvt >= 4)
apic_write(APIC_LVTPC, APIC_LVT_MASKED);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
}
/**
* disable_local_APIC - clear and disable the local APIC
*/
void disable_local_APIC(void)
{
unsigned int value;
clear_local_APIC();
/*
* Disable APIC (implies clearing of registers
* for 82489DX!).
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_SPIV_APIC_ENABLED;
apic_write(APIC_SPIV, value);
}
void lapic_shutdown(void)
{
unsigned long flags;
if (!cpu_has_apic)
return;
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
}
/*
* This is to verify that we're looking at a real local APIC.
* Check these against your board if the CPUs aren't getting
* started for no apparent reason.
*/
int __init verify_local_APIC(void)
{
unsigned int reg0, reg1;
/*
* The version register is read-only in a real APIC.
*/
reg0 = apic_read(APIC_LVR);
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
reg1 = apic_read(APIC_LVR);
apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
/*
* The two version reads above should print the same
* numbers. If the second one is different, then we
* poke at a non-APIC.
*/
if (reg1 != reg0)
return 0;
/*
* Check if the version looks reasonably.
*/
reg1 = GET_APIC_VERSION(reg0);
if (reg1 == 0x00 || reg1 == 0xff)
return 0;
reg1 = lapic_get_maxlvt();
if (reg1 < 0x02 || reg1 == 0xff)
return 0;
/*
* The ID register is read/write in a real APIC.
*/
reg0 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
reg1 = apic_read(APIC_ID);
apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
apic_write(APIC_ID, reg0);
if (reg1 != (reg0 ^ APIC_ID_MASK))
return 0;
/*
* The next two are just to see if we have sane values. * The next two are just to see if we have sane values.
* They're only really relevant if we're in Virtual Wire * They're only really relevant if we're in Virtual Wire
* compatibility mode, but most boxes are anymore. * compatibility mode, but most boxes are anymore.
*/ */
reg0 = apic_read(APIC_LVT0); reg0 = apic_read(APIC_LVT0);
apic_printk(APIC_DEBUG,"Getting LVT0: %x\n", reg0); apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
reg1 = apic_read(APIC_LVT1); reg1 = apic_read(APIC_LVT1);
apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1); apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
return 1; return 1;
} }
/**
* sync_Arb_IDs - synchronize APIC bus arbitration IDs
*/
void __init sync_Arb_IDs(void) void __init sync_Arb_IDs(void)
{ {
/* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */ /* Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 */
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR)); if (modern_apic())
if (ver >= 0x14) /* P4 or higher */
return; return;
/* /*
...@@ -398,7 +672,10 @@ void __init init_bsp_APIC(void) ...@@ -398,7 +672,10 @@ void __init init_bsp_APIC(void)
apic_write(APIC_LVT1, value); apic_write(APIC_LVT1, value);
} }
void __cpuinit setup_local_APIC (void) /**
* setup_local_APIC - setup the local APIC
*/
void __cpuinit setup_local_APIC(void)
{ {
unsigned int value, maxlvt; unsigned int value, maxlvt;
int i, j; int i, j;
...@@ -517,162 +794,8 @@ void __cpuinit setup_local_APIC (void) ...@@ -517,162 +794,8 @@ void __cpuinit setup_local_APIC (void)
nmi_watchdog_default(); nmi_watchdog_default();
setup_apic_nmi_watchdog(NULL); setup_apic_nmi_watchdog(NULL);
apic_pm_activate(); apic_pm_activate();
}
#ifdef CONFIG_PM
static struct {
/* 'active' is true if the local APIC was enabled by us and
not the BIOS; this signifies that we are also responsible
for disabling it before entering apm/acpi suspend */
int active;
/* r/w apic fields */
unsigned int apic_id;
unsigned int apic_taskpri;
unsigned int apic_ldr;
unsigned int apic_dfr;
unsigned int apic_spiv;
unsigned int apic_lvtt;
unsigned int apic_lvtpc;
unsigned int apic_lvt0;
unsigned int apic_lvt1;
unsigned int apic_lvterr;
unsigned int apic_tmict;
unsigned int apic_tdcr;
unsigned int apic_thmr;
} apic_pm_state;
static int lapic_suspend(struct sys_device *dev, pm_message_t state)
{
unsigned long flags;
int maxlvt;
if (!apic_pm_state.active)
return 0;
maxlvt = lapic_get_maxlvt();
apic_pm_state.apic_id = apic_read(APIC_ID);
apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
apic_pm_state.apic_ldr = apic_read(APIC_LDR);
apic_pm_state.apic_dfr = apic_read(APIC_DFR);
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
if (maxlvt >= 4)
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
#ifdef CONFIG_X86_MCE_INTEL
if (maxlvt >= 5)
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
#endif
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
return 0;
}
static int lapic_resume(struct sys_device *dev)
{
unsigned int l, h;
unsigned long flags;
int maxlvt;
if (!apic_pm_state.active)
return 0;
maxlvt = lapic_get_maxlvt();
local_irq_save(flags);
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
apic_write(APIC_LDR, apic_pm_state.apic_ldr);
apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
#ifdef CONFIG_X86_MCE_INTEL
if (maxlvt >= 5)
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
#endif
if (maxlvt >= 4)
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
local_irq_restore(flags);
return 0;
}
static struct sysdev_class lapic_sysclass = {
.name = "lapic",
.resume = lapic_resume,
.suspend = lapic_suspend,
};
static struct sys_device device_lapic = {
.id = 0,
.cls = &lapic_sysclass,
};
static void __cpuinit apic_pm_activate(void)
{
apic_pm_state.active = 1;
}
static int __init init_lapic_sysfs(void)
{
int error;
if (!cpu_has_apic)
return 0;
/* XXX: remove suspend/resume procs if !apic_pm_state.active? */
error = sysdev_class_register(&lapic_sysclass);
if (!error)
error = sysdev_register(&device_lapic);
return error;
}
device_initcall(init_lapic_sysfs);
#else /* CONFIG_PM */
static void apic_pm_activate(void) { }
#endif /* CONFIG_PM */
static int __init apic_set_verbosity(char *str)
{
if (str == NULL) {
skip_ioapic_setup = 0;
ioapic_force = 1;
return 0;
}
if (strcmp("debug", str) == 0)
apic_verbosity = APIC_DEBUG;
else if (strcmp("verbose", str) == 0)
apic_verbosity = APIC_VERBOSE;
else {
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug\n", str);
return -EINVAL;
}
return 0;
} }
early_param("apic", apic_set_verbosity);
/* /*
* Detect and enable local APICs on non-SMP boards. * Detect and enable local APICs on non-SMP boards.
...@@ -680,8 +803,7 @@ early_param("apic", apic_set_verbosity); ...@@ -680,8 +803,7 @@ early_param("apic", apic_set_verbosity);
* On AMD64 we trust the BIOS - if it says no APIC it is likely * On AMD64 we trust the BIOS - if it says no APIC it is likely
* not correctly set up (usually the APIC timer won't work etc.) * not correctly set up (usually the APIC timer won't work etc.)
*/ */
static int __init detect_init_APIC(void)
static int __init detect_init_APIC (void)
{ {
if (!cpu_has_apic) { if (!cpu_has_apic) {
printk(KERN_INFO "No local APIC present\n"); printk(KERN_INFO "No local APIC present\n");
...@@ -693,6 +815,9 @@ static int __init detect_init_APIC (void) ...@@ -693,6 +815,9 @@ static int __init detect_init_APIC (void)
return 0; return 0;
} }
/**
* init_apic_mappings - initialize APIC mappings
*/
void __init init_apic_mappings(void) void __init init_apic_mappings(void)
{ {
unsigned long apic_phys; unsigned long apic_phys;
...@@ -725,264 +850,267 @@ void __init init_apic_mappings(void) ...@@ -725,264 +850,267 @@ void __init init_apic_mappings(void)
} }
/* /*
* This function sets up the local APIC timer, with a timeout of * This initializes the IO-APIC and APIC hardware if this is
* 'clocks' APIC bus clock. During calibration we actually call * a UP kernel.
* this function twice on the boot CPU, once with a bogus timeout
* value, second time for real. The other (noncalibrating) CPUs
* call this function only once, with the real, calibrated value.
*
* We do reads before writes even if unnecessary, to get around the
* P5 APIC double write bug.
*/ */
int __init APIC_init_uniprocessor(void)
static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
{ {
unsigned int lvtt_value, tmp_value; if (disable_apic) {
printk(KERN_INFO "Apic disabled\n");
lvtt_value = LOCAL_TIMER_VECTOR; return -1;
if (!oneshot) }
lvtt_value |= APIC_LVT_TIMER_PERIODIC; if (!cpu_has_apic) {
if (!irqen) disable_apic = 1;
lvtt_value |= APIC_LVT_MASKED; printk(KERN_INFO "Apic disabled by BIOS\n");
return -1;
apic_write(APIC_LVTT, lvtt_value); }
/*
* Divide PICLK by 16
*/
tmp_value = apic_read(APIC_TDCR);
apic_write(APIC_TDCR, (tmp_value
& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
| APIC_TDR_DIV_16);
if (!oneshot) verify_local_APIC();
apic_write(APIC_TMICT, clocks);
}
static void setup_APIC_timer(void) phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
{ apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
memcpy(levt, &lapic_clockevent, sizeof(*levt)); setup_local_APIC();
levt->cpumask = cpumask_of_cpu(smp_processor_id());
clockevents_register_device(levt); if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
else
nr_ioapics = 0;
setup_boot_APIC_clock();
check_nmi_watchdog();
return 0;
} }
/* /*
* In this function we calibrate APIC bus clocks to the external * Local APIC interrupts
* timer. Unfortunately we cannot use jiffies and the timer irq
* to calibrate, since some later bootup code depends on getting
* the first irq? Ugh.
*
* We want to do the calibration only once since we
* want to have local timer irqs syncron. CPUs connected
* by the same APIC bus have the very same bus frequency.
* And we want to have irqs off anyways, no accidental
* APIC irq that way.
*/ */
#define TICK_COUNT 100000000 /*
* This interrupt should _never_ happen with our APIC/SMP architecture
static void __init calibrate_APIC_clock(void) */
asmlinkage void smp_spurious_interrupt(void)
{ {
unsigned apic, apic_start; unsigned int v;
unsigned long tsc, tsc_start; exit_idle();
int result; irq_enter();
local_irq_disable();
/* /*
* Put whatever arbitrary (but long enough) timeout * Check if this really is a spurious interrupt and ACK it
* value into the APIC clock, we just want to get the * if it is a vectored one. Just in case...
* counter running for calibration. * Spurious interrupts should not be ACKed.
*
* No interrupt enable !
*/ */
__setup_APIC_LVTT(250000000, 0, 0); v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
apic_start = apic_read(APIC_TMCCT); ack_APIC_irq();
#ifdef CONFIG_X86_PM_TIMER
if (apic_calibrate_pmtmr && pmtmr_ioport) {
pmtimer_wait(5000); /* 5ms wait */
apic = apic_read(APIC_TMCCT);
result = (apic_start - apic) * 1000L / 5;
} else
#endif
{
rdtscll(tsc_start);
do {
apic = apic_read(APIC_TMCCT);
rdtscll(tsc);
} while ((tsc - tsc_start) < TICK_COUNT &&
(apic_start - apic) < TICK_COUNT);
result = (apic_start - apic) * 1000L * tsc_khz /
(tsc - tsc_start);
}
local_irq_enable();
printk(KERN_DEBUG "APIC timer calibration result %d\n", result); add_pda(irq_spurious_count, 1);
irq_exit();
}
printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", /*
result / 1000 / 1000, result / 1000 % 1000); * This interrupt should never happen with our APIC/SMP architecture
*/
asmlinkage void smp_error_interrupt(void)
{
unsigned int v, v1;
/* Calculate the scaled math multiplication factor */ exit_idle();
lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32); irq_enter();
lapic_clockevent.max_delta_ns = /* First tickle the hardware, only then report what went on. -- REW */
clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); v = apic_read(APIC_ESR);
lapic_clockevent.min_delta_ns = apic_write(APIC_ESR, 0);
clockevent_delta2ns(0xF, &lapic_clockevent); v1 = apic_read(APIC_ESR);
ack_APIC_irq();
atomic_inc(&irq_err_count);
calibration_result = result / HZ; /* Here is what the APIC error bits mean:
0: Send CS error
1: Receive CS error
2: Send accept error
3: Receive accept error
4: Reserved
5: Send illegal vector
6: Received illegal vector
7: Illegal register address
*/
printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
smp_processor_id(), v , v1);
irq_exit();
} }
void __init setup_boot_APIC_clock (void) void disconnect_bsp_APIC(int virt_wire_setup)
{ {
/* /* Go back to Virtual Wire compatibility mode */
* The local apic timer can be disabled via the kernel commandline. unsigned long value;
* Register the lapic timer as a dummy clock event source on SMP
* systems, so the broadcast mechanism is used. On UP systems simply
* ignore it.
*/
if (disable_apic_timer) {
printk(KERN_INFO "Disabling APIC timer\n");
/* No broadcast on UP ! */
if (num_possible_cpus() > 1)
setup_APIC_timer();
return;
}
printk(KERN_INFO "Using local APIC timer interrupts.\n"); /* For the spurious interrupt use vector F, and enable it */
calibrate_APIC_clock(); value = apic_read(APIC_SPIV);
value &= ~APIC_VECTOR_MASK;
value |= APIC_SPIV_APIC_ENABLED;
value |= 0xf;
apic_write(APIC_SPIV, value);
/* if (!virt_wire_setup) {
* If nmi_watchdog is set to IO_APIC, we need the /*
* PIT/HPET going. Otherwise register lapic as a dummy * For LVT0 make it edge triggered, active high,
* device. * external and enabled
*/ */
if (nmi_watchdog != NMI_IO_APIC) value = apic_read(APIC_LVT0);
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY; value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
else APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
printk(KERN_WARNING "APIC timer registered as dummy," APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
" due to nmi_watchdog=1!\n"); value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
apic_write(APIC_LVT0, value);
} else {
/* Disable LVT0 */
apic_write(APIC_LVT0, APIC_LVT_MASKED);
}
setup_APIC_timer(); /* For LVT1 make it edge triggered, active high, nmi and enabled */
value = apic_read(APIC_LVT1);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
apic_write(APIC_LVT1, value);
} }
/* /*
* AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the * Power management
* C1E flag only in the secondary CPU, so when we detect the wreckage
* we already have enabled the boot CPU local apic timer. Check, if
* disable_apic_timer is set and the DUMMY flag is cleared. If yes,
* set the DUMMY flag again and force the broadcast mode in the
* clockevents layer.
*/ */
void __cpuinit check_boot_apic_timer_broadcast(void) #ifdef CONFIG_PM
{
if (!disable_apic_timer || static struct {
(lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY)) /* 'active' is true if the local APIC was enabled by us and
return; not the BIOS; this signifies that we are also responsible
for disabling it before entering apm/acpi suspend */
int active;
/* r/w apic fields */
unsigned int apic_id;
unsigned int apic_taskpri;
unsigned int apic_ldr;
unsigned int apic_dfr;
unsigned int apic_spiv;
unsigned int apic_lvtt;
unsigned int apic_lvtpc;
unsigned int apic_lvt0;
unsigned int apic_lvt1;
unsigned int apic_lvterr;
unsigned int apic_tmict;
unsigned int apic_tdcr;
unsigned int apic_thmr;
} apic_pm_state;
static int lapic_suspend(struct sys_device *dev, pm_message_t state)
{
unsigned long flags;
int maxlvt;
printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n"); if (!apic_pm_state.active)
lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY; return 0;
local_irq_enable(); maxlvt = lapic_get_maxlvt();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
local_irq_disable();
}
void __cpuinit setup_secondary_APIC_clock(void) apic_pm_state.apic_id = apic_read(APIC_ID);
{ apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
check_boot_apic_timer_broadcast(); apic_pm_state.apic_ldr = apic_read(APIC_LDR);
setup_APIC_timer(); apic_pm_state.apic_dfr = apic_read(APIC_DFR);
apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
if (maxlvt >= 4)
apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
#ifdef CONFIG_X86_MCE_INTEL
if (maxlvt >= 5)
apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
#endif
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
return 0;
} }
int setup_profiling_timer(unsigned int multiplier) static int lapic_resume(struct sys_device *dev)
{ {
return -EINVAL; unsigned int l, h;
} unsigned long flags;
int maxlvt;
void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector, if (!apic_pm_state.active)
unsigned char msg_type, unsigned char mask) return 0;
{
unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
unsigned int v = (mask << 16) | (msg_type << 8) | vector;
apic_write(reg, v);
}
/* maxlvt = lapic_get_maxlvt();
* Local timer interrupt handler. It does both profiling and
* process statistics/rescheduling.
*
* We do profiling in every local tick, statistics/rescheduling
* happen only every 'profiling multiplier' ticks. The default
* multiplier is 1 and it can be changed by writing the new multiplier
* value into /proc/profile.
*/
static void smp_local_timer_interrupt(void) local_irq_save(flags);
{ rdmsr(MSR_IA32_APICBASE, l, h);
int cpu = smp_processor_id(); l &= ~MSR_IA32_APICBASE_BASE;
struct clock_event_device *evt = &per_cpu(lapic_events, cpu); l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
apic_write(APIC_DFR, apic_pm_state.apic_dfr);
apic_write(APIC_LDR, apic_pm_state.apic_ldr);
apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
#ifdef CONFIG_X86_MCE_INTEL
if (maxlvt >= 5)
apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
#endif
if (maxlvt >= 4)
apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
local_irq_restore(flags);
return 0;
}
/* static struct sysdev_class lapic_sysclass = {
* Normally we should not be here till LAPIC has been initialized but .name = "lapic",
* in some cases like kdump, its possible that there is a pending LAPIC .resume = lapic_resume,
* timer interrupt from previous kernel's context and is delivered in .suspend = lapic_suspend,
* new kernel the moment interrupts are enabled. };
*
* Interrupts are enabled early and LAPIC is setup much later, hence
* its possible that when we get here evt->event_handler is NULL.
* Check for event_handler being NULL and discard the interrupt as
* spurious.
*/
if (!evt->event_handler) {
printk(KERN_WARNING
"Spurious LAPIC timer interrupt on cpu %d\n", cpu);
/* Switch it off */
lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
return;
}
/* static struct sys_device device_lapic = {
* the NMI deadlock-detector uses this. .id = 0,
*/ .cls = &lapic_sysclass,
add_pda(apic_timer_irqs, 1); };
evt->event_handler(evt); static void __cpuinit apic_pm_activate(void)
{
apic_pm_state.active = 1;
} }
/* static int __init init_lapic_sysfs(void)
* Local APIC timer interrupt. This is the most natural way for doing
* local interrupts, but local timer interrupts can be emulated by
* broadcast interrupts too. [in case the hw doesn't support APIC timers]
*
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
void smp_apic_timer_interrupt(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); int error;
if (!cpu_has_apic)
/* return 0;
* NOTE! We'd better ACK the irq immediately, /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
* because timer handling can be slow. error = sysdev_class_register(&lapic_sysclass);
*/ if (!error)
ack_APIC_irq(); error = sysdev_register(&device_lapic);
/* return error;
* update_process_times() expects us to have done irq_enter().
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
exit_idle();
irq_enter();
smp_local_timer_interrupt();
irq_exit();
set_irq_regs(old_regs);
} }
device_initcall(init_lapic_sysfs);
#else /* CONFIG_PM */
static void apic_pm_activate(void) { }
#endif /* CONFIG_PM */
/* /*
* apic_is_clustered_box() -- Check if we can expect good TSC * apic_is_clustered_box() -- Check if we can expect good TSC
...@@ -1032,91 +1160,28 @@ __cpuinit int apic_is_clustered_box(void) ...@@ -1032,91 +1160,28 @@ __cpuinit int apic_is_clustered_box(void)
} }
/* /*
* This interrupt should _never_ happen with our APIC/SMP architecture * APIC command line parameters
*/
asmlinkage void smp_spurious_interrupt(void)
{
unsigned int v;
exit_idle();
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case...
* Spurious interrupts should not be ACKed.
*/
v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
ack_APIC_irq();
add_pda(irq_spurious_count, 1);
irq_exit();
}
/*
* This interrupt should never happen with our APIC/SMP architecture
*/
asmlinkage void smp_error_interrupt(void)
{
unsigned int v, v1;
exit_idle();
irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
v1 = apic_read(APIC_ESR);
ack_APIC_irq();
atomic_inc(&irq_err_count);
/* Here is what the APIC error bits mean:
0: Send CS error
1: Receive CS error
2: Send accept error
3: Receive accept error
4: Reserved
5: Send illegal vector
6: Received illegal vector
7: Illegal register address
*/
printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
smp_processor_id(), v , v1);
irq_exit();
}
int disable_apic;
/*
* This initializes the IO-APIC and APIC hardware if this is
* a UP kernel.
*/ */
int __init APIC_init_uniprocessor (void) static int __init apic_set_verbosity(char *str)
{ {
if (disable_apic) { if (str == NULL) {
printk(KERN_INFO "Apic disabled\n"); skip_ioapic_setup = 0;
return -1; ioapic_force = 1;
return 0;
} }
if (!cpu_has_apic) { if (strcmp("debug", str) == 0)
disable_apic = 1; apic_verbosity = APIC_DEBUG;
printk(KERN_INFO "Apic disabled by BIOS\n"); else if (strcmp("verbose", str) == 0)
return -1; apic_verbosity = APIC_VERBOSE;
else {
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug\n", str);
return -EINVAL;
} }
verify_local_APIC();
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
setup_local_APIC();
if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
setup_IO_APIC();
else
nr_ioapics = 0;
setup_boot_APIC_clock();
check_nmi_watchdog();
return 0; return 0;
} }
early_param("apic", apic_set_verbosity);
static __init int setup_disableapic(char *str) static __init int setup_disableapic(char *str)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment