Commit 4420471f authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/apic' into irq/numa

Conflicts:
	arch/x86/kernel/apic/io_apic.c

Merge reason: non-trivial interaction between ongoing work in io_apic.c
              and the NUMA migration feature in the irq tree.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 15e957d0 e0e42142
...@@ -1565,6 +1565,9 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -1565,6 +1565,9 @@ and is between 256 and 4096 characters. It is defined in the file
noinitrd [RAM] Tells the kernel not to load any configured noinitrd [RAM] Tells the kernel not to load any configured
initial RAM disk. initial RAM disk.
nointremap [X86-64, Intel-IOMMU] Do not enable interrupt
remapping.
nointroute [IA-64] nointroute [IA-64]
nojitter [IA64] Disables jitter checking for ITC timers. nojitter [IA64] Disables jitter checking for ITC timers.
......
...@@ -349,7 +349,7 @@ config X86_UV ...@@ -349,7 +349,7 @@ config X86_UV
depends on X86_64 depends on X86_64
depends on X86_EXTENDED_PLATFORM depends on X86_EXTENDED_PLATFORM
depends on NUMA depends on NUMA
select X86_X2APIC depends on X86_X2APIC
---help--- ---help---
This option is needed in order to support SGI Ultraviolet systems. This option is needed in order to support SGI Ultraviolet systems.
If you don't have one of these, you should say N here. If you don't have one of these, you should say N here.
......
...@@ -107,8 +107,7 @@ extern u32 native_safe_apic_wait_icr_idle(void); ...@@ -107,8 +107,7 @@ extern u32 native_safe_apic_wait_icr_idle(void);
extern void native_apic_icr_write(u32 low, u32 id); extern void native_apic_icr_write(u32 low, u32 id);
extern u64 native_apic_icr_read(void); extern u64 native_apic_icr_read(void);
#define EIM_8BIT_APIC_ID 0 extern int x2apic_mode;
#define EIM_32BIT_APIC_ID 1
#ifdef CONFIG_X86_X2APIC #ifdef CONFIG_X86_X2APIC
/* /*
...@@ -166,10 +165,9 @@ static inline u64 native_x2apic_icr_read(void) ...@@ -166,10 +165,9 @@ static inline u64 native_x2apic_icr_read(void)
return val; return val;
} }
extern int x2apic, x2apic_phys; extern int x2apic_phys;
extern void check_x2apic(void); extern void check_x2apic(void);
extern void enable_x2apic(void); extern void enable_x2apic(void);
extern void enable_IR_x2apic(void);
extern void x2apic_icr_write(u32 low, u32 id); extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void) static inline int x2apic_enabled(void)
{ {
...@@ -183,6 +181,8 @@ static inline int x2apic_enabled(void) ...@@ -183,6 +181,8 @@ static inline int x2apic_enabled(void)
return 1; return 1;
return 0; return 0;
} }
#define x2apic_supported() (cpu_has_x2apic)
#else #else
static inline void check_x2apic(void) static inline void check_x2apic(void)
{ {
...@@ -190,28 +190,20 @@ static inline void check_x2apic(void) ...@@ -190,28 +190,20 @@ static inline void check_x2apic(void)
static inline void enable_x2apic(void) static inline void enable_x2apic(void)
{ {
} }
static inline void enable_IR_x2apic(void)
{
}
static inline int x2apic_enabled(void) static inline int x2apic_enabled(void)
{ {
return 0; return 0;
} }
#define x2apic 0 #define x2apic_preenabled 0
#define x2apic_supported() 0
#endif #endif
extern int get_physical_broadcast(void); extern void enable_IR_x2apic(void);
#ifdef CONFIG_X86_X2APIC extern int get_physical_broadcast(void);
static inline void ack_x2APIC_irq(void)
{
/* Docs say use 0 for future compatibility */
native_apic_msr_write(APIC_EOI, 0);
}
#endif
extern void apic_disable(void);
extern int lapic_get_maxlvt(void); extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void); extern void clear_local_APIC(void);
extern void connect_bsp_APIC(void); extern void connect_bsp_APIC(void);
...@@ -252,7 +244,7 @@ static inline void lapic_shutdown(void) { } ...@@ -252,7 +244,7 @@ static inline void lapic_shutdown(void) { }
#define local_apic_timer_c2_ok 1 #define local_apic_timer_c2_ok 1
static inline void init_apic_mappings(void) { } static inline void init_apic_mappings(void) { }
static inline void disable_local_APIC(void) { } static inline void disable_local_APIC(void) { }
static inline void apic_disable(void) { }
#endif /* !CONFIG_X86_LOCAL_APIC */ #endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -60,8 +60,4 @@ extern struct irq_chip i8259A_chip; ...@@ -60,8 +60,4 @@ extern struct irq_chip i8259A_chip;
extern void mask_8259A(void); extern void mask_8259A(void);
extern void unmask_8259A(void); extern void unmask_8259A(void);
#ifdef CONFIG_X86_32
extern void init_ISA_irqs(void);
#endif
#endif /* _ASM_X86_I8259_H */ #endif /* _ASM_X86_I8259_H */
...@@ -161,15 +161,11 @@ extern int io_apic_set_pci_routing(struct device *dev, int ioapic, int pin, ...@@ -161,15 +161,11 @@ extern int io_apic_set_pci_routing(struct device *dev, int ioapic, int pin,
extern int (*ioapic_renumber_irq)(int ioapic, int irq); extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void); extern void ioapic_init_mappings(void);
#ifdef CONFIG_X86_64
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries); extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void reinit_intr_remapped_IO_APIC(int intr_remapping,
struct IO_APIC_route_entry **ioapic_entries);
#endif
extern void probe_nr_irqs_gsi(void); extern void probe_nr_irqs_gsi(void);
......
#ifndef _ASM_X86_IRQ_REMAPPING_H #ifndef _ASM_X86_IRQ_REMAPPING_H
#define _ASM_X86_IRQ_REMAPPING_H #define _ASM_X86_IRQ_REMAPPING_H
#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
#endif /* _ASM_X86_IRQ_REMAPPING_H */ #endif /* _ASM_X86_IRQ_REMAPPING_H */
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80 # define SYSCALL_VECTOR 0x80
# define IA32_SYSCALL_VECTOR 0x80
#else #else
# define IA32_SYSCALL_VECTOR 0x80 # define IA32_SYSCALL_VECTOR 0x80
#endif #endif
......
...@@ -33,7 +33,6 @@ struct x86_quirks { ...@@ -33,7 +33,6 @@ struct x86_quirks {
int (*setup_ioapic_ids)(void); int (*setup_ioapic_ids)(void);
}; };
extern void x86_quirk_pre_intr_init(void);
extern void x86_quirk_intr_init(void); extern void x86_quirk_intr_init(void);
extern void x86_quirk_trap_init(void); extern void x86_quirk_trap_init(void);
......
...@@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp) ...@@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp)
obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y := process_$(BITS).o signal.o entry_$(BITS).o
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
obj-y += setup.o i8259.o irqinit_$(BITS).o obj-y += setup.o i8259.o irqinit.o
obj-$(CONFIG_X86_VISWS) += visws_quirks.o obj-$(CONFIG_X86_VISWS) += visws_quirks.o
obj-$(CONFIG_X86_32) += probe_roms_32.o obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
......
This diff is collapsed.
...@@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi) ...@@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi)
return gsi; return gsi;
} }
static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
{ {
unsigned long vect = 0, psaival = 0; unsigned long vect = 0, psaival = 0;
......
...@@ -489,121 +489,6 @@ static void ioapic_mask_entry(int apic, int pin) ...@@ -489,121 +489,6 @@ static void ioapic_mask_entry(int apic, int pin)
spin_unlock_irqrestore(&ioapic_lock, flags); spin_unlock_irqrestore(&ioapic_lock, flags);
} }
#ifdef CONFIG_SMP
static void send_cleanup_vector(struct irq_cfg *cfg)
{
cpumask_var_t cleanup_mask;
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
cfg->move_cleanup_count = 0;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
cfg->move_cleanup_count++;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
} else {
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
free_cpumask_var(cleanup_mask);
}
cfg->move_in_progress = 0;
}
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
u8 vector = cfg->vector;
entry = cfg->irq_2_pin;
for (;;) {
unsigned int reg;
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
/*
* With interrupt-remapping, destination information comes
* from interrupt-remapping table entry.
*/
if (!irq_remapped(irq))
io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
io_apic_modify(apic, 0x10 + pin*2, reg);
if (!entry->next)
break;
entry = entry->next;
}
}
static int
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
/*
* Either sets desc->affinity to a valid value, and returns
* ->cpu_mask_to_apicid of that, or returns BAD_APICID and
* leaves desc->affinity untouched.
*/
static unsigned int
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned int irq;
if (!cpumask_intersects(mask, cpu_online_mask))
return BAD_APICID;
irq = desc->irq;
cfg = desc->chip_data;
if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID;
cpumask_copy(desc->affinity, mask);
return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
}
static int
set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int dest;
unsigned int irq;
int ret = -1;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
dest = set_desc_affinity(desc, mask);
if (dest != BAD_APICID) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, cfg);
ret = 0;
}
spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
static int
set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
return set_ioapic_affinity_irq_desc(desc, mask);
}
#endif /* CONFIG_SMP */
/* /*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super * shared ISA-space IRQs, so we have to support them. We are super
...@@ -822,7 +707,6 @@ static int __init ioapic_pirq_setup(char *str) ...@@ -822,7 +707,6 @@ static int __init ioapic_pirq_setup(char *str)
__setup("pirq=", ioapic_pirq_setup); __setup("pirq=", ioapic_pirq_setup);
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
#ifdef CONFIG_INTR_REMAP
struct IO_APIC_route_entry **alloc_ioapic_entries(void) struct IO_APIC_route_entry **alloc_ioapic_entries(void)
{ {
int apic; int apic;
...@@ -920,20 +804,6 @@ int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) ...@@ -920,20 +804,6 @@ int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
return 0; return 0;
} }
void reinit_intr_remapped_IO_APIC(int intr_remapping,
struct IO_APIC_route_entry **ioapic_entries)
{
/*
* for now plain restore of previous settings.
* TBD: In the case of OS enabling interrupt-remapping,
* IO-APIC RTE's need to be setup to point to interrupt-remapping
* table entries. for now, do a plain restore, and wait for
* the setup_IO_APIC_irqs() to do proper initialization.
*/
restore_IO_APIC_setup(ioapic_entries);
}
void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
{ {
int apic; int apic;
...@@ -943,7 +813,6 @@ void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) ...@@ -943,7 +813,6 @@ void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
kfree(ioapic_entries); kfree(ioapic_entries);
} }
#endif
/* /*
* Find the IRQ entry number of a certain pin. * Find the IRQ entry number of a certain pin.
...@@ -2332,6 +2201,118 @@ static int ioapic_retrigger_irq(unsigned int irq) ...@@ -2332,6 +2201,118 @@ static int ioapic_retrigger_irq(unsigned int irq)
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void send_cleanup_vector(struct irq_cfg *cfg)
{
cpumask_var_t cleanup_mask;
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
cfg->move_cleanup_count = 0;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
cfg->move_cleanup_count++;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
} else {
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
free_cpumask_var(cleanup_mask);
}
cfg->move_in_progress = 0;
}
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
u8 vector = cfg->vector;
entry = cfg->irq_2_pin;
for (;;) {
unsigned int reg;
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
/*
* With interrupt-remapping, destination information comes
* from interrupt-remapping table entry.
*/
if (!irq_remapped(irq))
io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
io_apic_modify(apic, 0x10 + pin*2, reg);
if (!entry->next)
break;
entry = entry->next;
}
}
static int
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
/*
* Either sets desc->affinity to a valid value, and returns
* ->cpu_mask_to_apicid of that, or returns BAD_APICID and
* leaves desc->affinity untouched.
*/
static unsigned int
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned int irq;
if (!cpumask_intersects(mask, cpu_online_mask))
return BAD_APICID;
irq = desc->irq;
cfg = desc->chip_data;
if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID;
cpumask_copy(desc->affinity, mask);
return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
}
static int
set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int dest;
unsigned int irq;
int ret = -1;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
dest = set_desc_affinity(desc, mask);
if (dest != BAD_APICID) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, cfg);
ret = 0;
}
spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
}
static int
set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
return set_ioapic_affinity_irq_desc(desc, mask);
}
#ifdef CONFIG_INTR_REMAP #ifdef CONFIG_INTR_REMAP
...@@ -2478,53 +2459,6 @@ static void irq_complete_move(struct irq_desc **descp) ...@@ -2478,53 +2459,6 @@ static void irq_complete_move(struct irq_desc **descp)
static inline void irq_complete_move(struct irq_desc **descp) {} static inline void irq_complete_move(struct irq_desc **descp) {}
#endif #endif
static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
entry = cfg->irq_2_pin;
for (;;) {
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
io_apic_eoi(apic, pin);
entry = entry->next;
}
}
static void
eoi_ioapic_irq(struct irq_desc *desc)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int irq;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
#ifdef CONFIG_X86_X2APIC
static void ack_x2apic_level(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
ack_x2APIC_irq();
eoi_ioapic_irq(desc);
}
static void ack_x2apic_edge(unsigned int irq)
{
ack_x2APIC_irq();
}
#endif
static void ack_apic_edge(unsigned int irq) static void ack_apic_edge(unsigned int irq)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct irq_desc *desc = irq_to_desc(irq);
...@@ -2588,9 +2522,6 @@ static void ack_apic_level(unsigned int irq) ...@@ -2588,9 +2522,6 @@ static void ack_apic_level(unsigned int irq)
*/ */
ack_APIC_irq(); ack_APIC_irq();
if (irq_remapped(irq))
eoi_ioapic_irq(desc);
/* Now we can move and renable the irq */ /* Now we can move and renable the irq */
if (unlikely(do_unmask_irq)) { if (unlikely(do_unmask_irq)) {
/* Only migrate the irq if the ack has been received. /* Only migrate the irq if the ack has been received.
...@@ -2637,22 +2568,50 @@ static void ack_apic_level(unsigned int irq) ...@@ -2637,22 +2568,50 @@ static void ack_apic_level(unsigned int irq)
} }
#ifdef CONFIG_INTR_REMAP #ifdef CONFIG_INTR_REMAP
static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
entry = cfg->irq_2_pin;
for (;;) {
if (!entry)
break;
apic = entry->apic;
pin = entry->pin;
io_apic_eoi(apic, pin);
entry = entry->next;
}
}
static void
eoi_ioapic_irq(struct irq_desc *desc)
{
struct irq_cfg *cfg;
unsigned long flags;
unsigned int irq;
irq = desc->irq;
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
__eoi_ioapic_irq(irq, cfg);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
static void ir_ack_apic_edge(unsigned int irq) static void ir_ack_apic_edge(unsigned int irq)
{ {
#ifdef CONFIG_X86_X2APIC ack_APIC_irq();
if (x2apic_enabled())
return ack_x2apic_edge(irq);
#endif
return ack_apic_edge(irq);
} }
static void ir_ack_apic_level(unsigned int irq) static void ir_ack_apic_level(unsigned int irq)
{ {
#ifdef CONFIG_X86_X2APIC struct irq_desc *desc = irq_to_desc(irq);
if (x2apic_enabled())
return ack_x2apic_level(irq); ack_APIC_irq();
#endif eoi_ioapic_irq(desc);
return ack_apic_level(irq);
} }
#endif /* CONFIG_INTR_REMAP */ #endif /* CONFIG_INTR_REMAP */
......
...@@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = { ...@@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = {
void __init default_setup_apic_routing(void) void __init default_setup_apic_routing(void)
{ {
#ifdef CONFIG_X86_X2APIC #ifdef CONFIG_X86_X2APIC
if (x2apic && (apic != &apic_x2apic_phys && if (x2apic_mode && (apic != &apic_x2apic_phys &&
#ifdef CONFIG_X86_UV #ifdef CONFIG_X86_UV
apic != &apic_x2apic_uv_x && apic != &apic_x2apic_uv_x &&
#endif #endif
......
...@@ -173,13 +173,6 @@ static inline int is_WPEG(struct rio_detail *rio){ ...@@ -173,13 +173,6 @@ static inline int is_WPEG(struct rio_detail *rio){
rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
} }
/* In clustered mode, the high nibble of APIC ID is a cluster number.
* The low nibble is a 4-bit bitmap. */
#define XAPIC_DEST_CPUS_SHIFT 4
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) #define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static const struct cpumask *summit_target_cpus(void) static const struct cpumask *summit_target_cpus(void)
......
...@@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) ...@@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
cpumask_set_cpu(cpu, retmask); cpumask_set_cpu(cpu, retmask);
} }
static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long val; unsigned long val;
......
...@@ -24,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL; ...@@ -24,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL;
*/ */
void ack_bad_irq(unsigned int irq) void ack_bad_irq(unsigned int irq)
{ {
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); if (printk_ratelimit())
pr_err("unexpected IRQ trap at vector %02x\n", irq);
#ifdef CONFIG_X86_LOCAL_APIC
/* /*
* Currently unexpected vectors happen only on SMP and APIC. * Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N * We _must_ ack these because every local APIC has only N
...@@ -36,9 +36,7 @@ void ack_bad_irq(unsigned int irq) ...@@ -36,9 +36,7 @@ void ack_bad_irq(unsigned int irq)
* completely. * completely.
* But only ack when the APIC is enabled -AK * But only ack when the APIC is enabled -AK
*/ */
if (cpu_has_apic) ack_APIC_irq();
ack_APIC_irq();
#endif
} }
#define irq_stats(x) (&per_cpu(irq_stat, x)) #define irq_stats(x) (&per_cpu(irq_stat, x))
...@@ -178,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu) ...@@ -178,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += irq_stats(cpu)->irq_thermal_count; sum += irq_stats(cpu)->irq_thermal_count;
# ifdef CONFIG_X86_64 # ifdef CONFIG_X86_64
sum += irq_stats(cpu)->irq_threshold_count; sum += irq_stats(cpu)->irq_threshold_count;
#endif # endif
#endif #endif
return sum; return sum;
} }
...@@ -213,14 +211,11 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -213,14 +211,11 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
irq = __get_cpu_var(vector_irq)[vector]; irq = __get_cpu_var(vector_irq)[vector];
if (!handle_irq(irq, regs)) { if (!handle_irq(irq, regs)) {
#ifdef CONFIG_X86_64 ack_APIC_irq();
if (!disable_apic)
ack_APIC_irq();
#endif
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n", pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
__func__, smp_processor_id(), vector, irq); __func__, smp_processor_id(), vector, irq);
} }
irq_exit(); irq_exit();
......
#include <linux/linkage.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/kprobes.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/hw_irq.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -22,7 +27,23 @@ ...@@ -22,7 +27,23 @@
#include <asm/i8259.h> #include <asm/i8259.h>
#include <asm/traps.h> #include <asm/traps.h>
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
* (these are usually mapped to vectors 0x30-0x3f)
*/
/*
* The IO-APIC gives us many more interrupt sources. Most of these
* are unused but an SMP system is supposed to have enough memory ...
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
* across the spectrum, so we really want to be prepared to get all
* of these. Plus, more powerful systems might have more than 64
* IO-APIC registers.
*
* (these are usually mapped into the 0x30-0xff vector range)
*/
#ifdef CONFIG_X86_32
/* /*
* Note that on a 486, we don't want to do a SIGFPE on an irq13 * Note that on a 486, we don't want to do a SIGFPE on an irq13
* as the irq is unreliable, and exception 16 works correctly * as the irq is unreliable, and exception 16 works correctly
...@@ -52,30 +73,7 @@ static struct irqaction fpu_irq = { ...@@ -52,30 +73,7 @@ static struct irqaction fpu_irq = {
.handler = math_error_irq, .handler = math_error_irq,
.name = "fpu", .name = "fpu",
}; };
void __init init_ISA_irqs(void)
{
int i;
#ifdef CONFIG_X86_LOCAL_APIC
init_bsp_APIC();
#endif #endif
init_8259A(0);
/*
* 16 old-style INTA-cycle interrupts:
*/
for (i = 0; i < NR_IRQS_LEGACY; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT");
}
}
/* /*
* IRQ2 is cascade interrupt to second interrupt controller * IRQ2 is cascade interrupt to second interrupt controller
...@@ -118,29 +116,37 @@ int vector_used_by_percpu_irq(unsigned int vector) ...@@ -118,29 +116,37 @@ int vector_used_by_percpu_irq(unsigned int vector)
return 0; return 0;
} }
/* Overridden in paravirt.c */ static void __init init_ISA_irqs(void)
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
void __init native_init_IRQ(void)
{ {
int i; int i;
/* Execute any quirks before the call gates are initialised: */ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
x86_quirk_pre_intr_init(); init_bsp_APIC();
#endif
init_8259A(0);
/* /*
* Cover the whole vector space, no vector can escape * 16 old-style INTA-cycle interrupts:
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/ */
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { for (i = 0; i < NR_IRQS_LEGACY; i++) {
/* SYSCALL_VECTOR was reserved in trap_init. */ struct irq_desc *desc = irq_to_desc(i);
if (i != SYSCALL_VECTOR)
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]); desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT");
} }
}
/* Overridden in paravirt.c */
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP) static void __init smp_intr_init(void)
{
#ifdef CONFIG_SMP
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
/* /*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper * The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup. * IPI, driven by wakeup.
...@@ -160,16 +166,27 @@ void __init native_init_IRQ(void) ...@@ -160,16 +166,27 @@ void __init native_init_IRQ(void)
/* IPI for generic function call */ /* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for single call function */ /* IPI for generic single function call */
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt); call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */ /* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
#endif #endif
#endif /* CONFIG_SMP */
}
static void __init apic_intr_init(void)
{
smp_intr_init();
#ifdef CONFIG_X86_64
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#endif
#ifdef CONFIG_X86_LOCAL_APIC #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
/* self generated IPI for local APIC timer */ /* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
...@@ -179,16 +196,67 @@ void __init native_init_IRQ(void) ...@@ -179,16 +196,67 @@ void __init native_init_IRQ(void)
/* IPI vectors for APIC spurious and error interrupts */ /* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt); alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
/* Performance monitoring interrupts: */
# ifdef CONFIG_PERF_COUNTERS
alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
# endif
#endif #endif
#ifdef CONFIG_X86_32
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
/* thermal monitor LVT interrupt */ /* thermal monitor LVT interrupt */
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif #endif
#endif
}
/**
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
*
* Description:
* Perform any necessary interrupt initialisation prior to setting up
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
* interrupts should be initialised here if the machine emulates a PC
* in any way.
**/
static void __init x86_quirk_pre_intr_init(void)
{
#ifdef CONFIG_X86_32
if (x86_quirks->arch_pre_intr_init) {
if (x86_quirks->arch_pre_intr_init())
return;
}
#endif
init_ISA_irqs();
}
void __init native_init_IRQ(void)
{
int i;
/* Execute any quirks before the call gates are initialised: */
x86_quirk_pre_intr_init();
apic_intr_init();
/*
* Cover the whole vector space, no vector can escape
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
if (!test_bit(i, used_vectors))
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
}
if (!acpi_ioapic) if (!acpi_ioapic)
setup_irq(2, &irq2); setup_irq(2, &irq2);
#ifdef CONFIG_X86_32
/* /*
* Call quirks after call gates are initialised (usually add in * Call quirks after call gates are initialised (usually add in
* the architecture specific gates): * the architecture specific gates):
...@@ -203,4 +271,5 @@ void __init native_init_IRQ(void) ...@@ -203,4 +271,5 @@ void __init native_init_IRQ(void)
setup_irq(FPU_IRQ, &fpu_irq); setup_irq(FPU_IRQ, &fpu_irq);
irq_ctx_init(smp_processor_id()); irq_ctx_init(smp_processor_id());
#endif
} }
#include <linux/linkage.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/hw_irq.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/i8259.h>
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
* (these are usually mapped to vectors 0x30-0x3f)
*/
/*
* The IO-APIC gives us many more interrupt sources. Most of these
* are unused but an SMP system is supposed to have enough memory ...
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
* across the spectrum, so we really want to be prepared to get all
* of these. Plus, more powerful systems might have more than 64
* IO-APIC registers.
*
* (these are usually mapped into the 0x30-0xff vector range)
*/
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,
[IRQ1_VECTOR] = 1,
[IRQ2_VECTOR] = 2,
[IRQ3_VECTOR] = 3,
[IRQ4_VECTOR] = 4,
[IRQ5_VECTOR] = 5,
[IRQ6_VECTOR] = 6,
[IRQ7_VECTOR] = 7,
[IRQ8_VECTOR] = 8,
[IRQ9_VECTOR] = 9,
[IRQ10_VECTOR] = 10,
[IRQ11_VECTOR] = 11,
[IRQ12_VECTOR] = 12,
[IRQ13_VECTOR] = 13,
[IRQ14_VECTOR] = 14,
[IRQ15_VECTOR] = 15,
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
};
int vector_used_by_percpu_irq(unsigned int vector)
{
int cpu;
for_each_online_cpu(cpu) {
if (per_cpu(vector_irq, cpu)[vector] != -1)
return 1;
}
return 0;
}
static void __init init_ISA_irqs(void)
{
int i;
init_bsp_APIC();
init_8259A(0);
for (i = 0; i < NR_IRQS_LEGACY; i++) {
struct irq_desc *desc = irq_to_desc(i);
desc->status = IRQ_DISABLED;
desc->action = NULL;
desc->depth = 1;
/*
* 16 old-style INTA-cycle interrupts:
*/
set_irq_chip_and_handler_name(i, &i8259A_chip,
handle_level_irq, "XT");
}
}
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
static void __init smp_intr_init(void)
{
#ifdef CONFIG_SMP
/*
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
* IPI, driven by wakeup.
*/
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPIs for invalidation */
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
/* IPI for generic function call */
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for generic single function call */
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
#endif
}
static void __init apic_intr_init(void)
{
smp_intr_init();
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
/* self generated IPI for local APIC timer */
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
/* generic IPI for platform specific use */
alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
/* IPI vectors for APIC spurious and error interrupts */
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
}
void __init native_init_IRQ(void)
{
int i;
init_ISA_irqs();
/*
* Cover the whole vector space, no vector can escape
* us. (some of these will be overridden and become
* 'special' SMP interrupts)
*/
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
int vector = FIRST_EXTERNAL_VECTOR + i;
if (vector != IA32_SYSCALL_VECTOR)
set_intr_gate(vector, interrupt[i]);
}
apic_intr_init();
if (!acpi_ioapic)
setup_irq(2, &irq2);
}
...@@ -996,24 +996,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -996,24 +996,6 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/**
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
*
* Description:
* Perform any necessary interrupt initialisation prior to setting up
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
* interrupts should be initialised here if the machine emulates a PC
* in any way.
**/
void __init x86_quirk_pre_intr_init(void)
{
if (x86_quirks->arch_pre_intr_init) {
if (x86_quirks->arch_pre_intr_init())
return;
}
init_ISA_irqs();
}
/** /**
* x86_quirk_intr_init - post gate setup interrupt initialisation * x86_quirk_intr_init - post gate setup interrupt initialisation
* *
......
...@@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) ...@@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
} }
struct smp_ops smp_ops = { struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
.smp_prepare_cpus = native_smp_prepare_cpus, .smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done, .smp_cpus_done = native_smp_cpus_done,
.smp_send_stop = native_smp_send_stop, .smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule, .smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up, .cpu_up = native_cpu_up,
.cpu_die = native_cpu_die, .cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable, .cpu_disable = native_cpu_disable,
.play_dead = native_play_dead, .play_dead = native_play_dead,
.send_call_func_ipi = native_send_call_func_ipi, .send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi, .send_call_func_single_ipi = native_send_call_func_single_ipi,
}; };
EXPORT_SYMBOL_GPL(smp_ops); EXPORT_SYMBOL_GPL(smp_ops);
...@@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid) ...@@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid)
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
* won't ... remember to clear down the APIC, etc later. * won't ... remember to clear down the APIC, etc later.
*/ */
int __devinit int __cpuinit
wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
{ {
unsigned long send_status, accept_status = 0; unsigned long send_status, accept_status = 0;
...@@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip) ...@@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
return (send_status | accept_status); return (send_status | accept_status);
} }
int __devinit static int __cpuinit
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
{ {
unsigned long send_status, accept_status = 0; unsigned long send_status, accept_status = 0;
...@@ -822,10 +822,12 @@ do_rest: ...@@ -822,10 +822,12 @@ do_rest:
/* mark "stuck" area as not stuck */ /* mark "stuck" area as not stuck */
*((volatile unsigned long *)trampoline_base) = 0; *((volatile unsigned long *)trampoline_base) = 0;
/* if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
* Cleanup possible dangling ends... /*
*/ * Cleanup possible dangling ends...
smpboot_restore_warm_reset_vector(); */
smpboot_restore_warm_reset_vector();
}
return boot_error; return boot_error;
} }
......
...@@ -969,11 +969,8 @@ void __init trap_init(void) ...@@ -969,11 +969,8 @@ void __init trap_init(void)
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
set_bit(i, used_vectors); set_bit(i, used_vectors);
#ifdef CONFIG_X86_64
set_bit(IA32_SYSCALL_VECTOR, used_vectors); set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#else
set_bit(SYSCALL_VECTOR, used_vectors);
#endif
/* /*
* Should be a barrier for any external CPU state: * Should be a barrier for any external CPU state:
*/ */
......
...@@ -1968,15 +1968,6 @@ static int __init init_dmars(void) ...@@ -1968,15 +1968,6 @@ static int __init init_dmars(void)
} }
} }
#ifdef CONFIG_INTR_REMAP
if (!intr_remapping_enabled) {
ret = enable_intr_remapping(0);
if (ret)
printk(KERN_ERR
"IOMMU: enable interrupt remapping failed\n");
}
#endif
/* /*
* For each rmrr * For each rmrr
* for each dev attached to rmrr * for each dev attached to rmrr
......
...@@ -15,6 +15,14 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; ...@@ -15,6 +15,14 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num; static int ir_ioapic_num;
int intr_remapping_enabled; int intr_remapping_enabled;
static int disable_intremap;
static __init int setup_nointremap(char *str)
{
disable_intremap = 1;
return 0;
}
early_param("nointremap", setup_nointremap);
struct irq_2_iommu { struct irq_2_iommu {
struct intel_iommu *iommu; struct intel_iommu *iommu;
u16 irte_index; u16 irte_index;
...@@ -420,20 +428,6 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) ...@@ -420,20 +428,6 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
readl, (sts & DMA_GSTS_IRTPS), sts); readl, (sts & DMA_GSTS_IRTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags); spin_unlock_irqrestore(&iommu->register_lock, flags);
if (mode == 0) {
spin_lock_irqsave(&iommu->register_lock, flags);
/* enable comaptiblity format interrupt pass through */
cmd = iommu->gcmd | DMA_GCMD_CFI;
iommu->gcmd |= DMA_GCMD_CFI;
writel(cmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_CFIS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/* /*
* global invalidation of interrupt entry cache before enabling * global invalidation of interrupt entry cache before enabling
* interrupt-remapping. * interrupt-remapping.
...@@ -513,6 +507,23 @@ end: ...@@ -513,6 +507,23 @@ end:
spin_unlock_irqrestore(&iommu->register_lock, flags); spin_unlock_irqrestore(&iommu->register_lock, flags);
} }
int __init intr_remapping_supported(void)
{
struct dmar_drhd_unit *drhd;
if (disable_intremap)
return 0;
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
if (!ecap_ir_support(iommu->ecap))
return 0;
}
return 1;
}
int __init enable_intr_remapping(int eim) int __init enable_intr_remapping(int eim)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
......
...@@ -108,6 +108,7 @@ struct irte { ...@@ -108,6 +108,7 @@ struct irte {
}; };
#ifdef CONFIG_INTR_REMAP #ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled; extern int intr_remapping_enabled;
extern int intr_remapping_supported(void);
extern int enable_intr_remapping(int); extern int enable_intr_remapping(int);
extern void disable_intr_remapping(void); extern void disable_intr_remapping(void);
extern int reenable_intr_remapping(int); extern int reenable_intr_remapping(int);
...@@ -157,6 +158,8 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic) ...@@ -157,6 +158,8 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
} }
#define irq_remapped(irq) (0) #define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1) #define enable_intr_remapping(mode) (-1)
#define disable_intr_remapping() (0)
#define reenable_intr_remapping(mode) (0)
#define intr_remapping_enabled (0) #define intr_remapping_enabled (0)
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment