Commit a77d2e08 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-apic-for-linus' of...

Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (30 commits)
  x86, apic: Enable lapic nmi watchdog on AMD Family 11h
  x86: Remove unnecessary mdelay() from cpu_disable_common()
  x86, ioapic: Document another case when level irq is seen as an edge
  x86, ioapic: Fix the EOI register detection mechanism
  x86, io-apic: Move the effort of clearing remoteIRR explicitly before migrating the irq
  x86: SGI UV: Map low MMR ranges
  x86: apic: Print out SRAT table APIC id in hex
  x86: Re-get cfg_new in case reuse/move irq_desc
  x86: apic: Remove not needed #ifdef
  x86: io-apic: IO-APIC MMIO should not fail on resource insertion
  x86: Remove asm/apicnum.h
  x86: apic: Do not use stacked physid_mask_t
  x86, apic: Get rid of apicid_to_cpu_present assign on 64-bit
  x86, ioapic: Use snrpintf while set names for IO-APIC resourses
  x86, apic: Use PAGE_SIZE instead of numbers
  x86: Remove local_irq_enable()/local_irq_disable() in fixup_irqs()
  x86: Use EOI register in io-apic on intel platforms
  x86: Force irq complete move during cpu offline
  x86: Remove move_cleanup_count from irq_cfg
  x86, intr-remap: Avoid irq_chip mask/unmask in fixup_irqs() for intr-remapping
  ...
parents 897e81be 7d1849af
...@@ -344,6 +344,15 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -344,6 +344,15 @@ and is between 256 and 4096 characters. It is defined in the file
Change the amount of debugging information output Change the amount of debugging information output
when initialising the APIC and IO-APIC components. when initialising the APIC and IO-APIC components.
show_lapic= [APIC,X86] Advanced Programmable Interrupt Controller
Limit apic dumping. The parameter defines the maximal
number of local apics being dumped. Also it is possible
to set it to "all" by meaning -- no limit here.
Format: { 1 (default) | 2 | ... | all }.
The parameter valid if only apic=debug or
apic=verbose is specified.
Example: apic=debug show_lapic=all
apm= [APM] Advanced Power Management apm= [APM] Advanced Power Management
See header of arch/x86/kernel/apm_32.c. See header of arch/x86/kernel/apm_32.c.
......
...@@ -297,20 +297,20 @@ struct apic { ...@@ -297,20 +297,20 @@ struct apic {
int disable_esr; int disable_esr;
int dest_logical; int dest_logical;
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
unsigned long (*check_apicid_present)(int apicid); unsigned long (*check_apicid_present)(int apicid);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*init_apic_ldr)(void); void (*init_apic_ldr)(void);
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
void (*setup_apic_routing)(void); void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq); int (*multi_timer_check)(int apic, int irq);
int (*apicid_to_node)(int logical_apicid); int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu); int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu); int (*cpu_present_to_apicid)(int mps_cpu);
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
void (*setup_portio_remap)(void); void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int phys_apicid); int (*check_phys_apicid_present)(int phys_apicid);
void (*enable_apic_mode)(void); void (*enable_apic_mode)(void);
...@@ -488,6 +488,8 @@ static inline unsigned int read_apic_id(void) ...@@ -488,6 +488,8 @@ static inline unsigned int read_apic_id(void)
extern void default_setup_apic_routing(void); extern void default_setup_apic_routing(void);
extern struct apic apic_noop;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
extern struct apic apic_default; extern struct apic apic_default;
...@@ -532,9 +534,9 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, ...@@ -532,9 +534,9 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
return (unsigned int)(mask1 & mask2 & mask3); return (unsigned int)(mask1 & mask2 & mask3);
} }
static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
{ {
return physid_isset(apicid, bitmap); return physid_isset(apicid, *map);
} }
static inline unsigned long default_check_apicid_present(int bit) static inline unsigned long default_check_apicid_present(int bit)
...@@ -542,9 +544,9 @@ static inline unsigned long default_check_apicid_present(int bit) ...@@ -542,9 +544,9 @@ static inline unsigned long default_check_apicid_present(int bit)
return physid_isset(bit, phys_cpu_present_map); return physid_isset(bit, phys_cpu_present_map);
} }
static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{ {
return phys_map; *retmap = *phys_map;
} }
/* Mapping from cpu number to logical apicid */ /* Mapping from cpu number to logical apicid */
...@@ -583,11 +585,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu); ...@@ -583,11 +585,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid); extern int default_check_phys_apicid_present(int phys_apicid);
#endif #endif
static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
{
return physid_mask_of_physid(phys_apicid);
}
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -11,6 +11,12 @@ ...@@ -11,6 +11,12 @@
#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
#define APIC_DEFAULT_PHYS_BASE 0xfee00000 #define APIC_DEFAULT_PHYS_BASE 0xfee00000
/*
* This is the IO-APIC register space as specified
* by Intel docs:
*/
#define IO_APIC_SLOT_SIZE 1024
#define APIC_ID 0x20 #define APIC_ID 0x20
#define APIC_LVR 0x30 #define APIC_LVR 0x30
......
#ifndef _ASM_X86_APICNUM_H
#define _ASM_X86_APICNUM_H
/* define MAX_IO_APICS */
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
#endif /* _ASM_X86_APICNUM_H */
...@@ -85,8 +85,26 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, ...@@ -85,8 +85,26 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
irq_attr->polarity = polarity; irq_attr->polarity = polarity;
} }
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, /*
struct io_apic_irq_attr *irq_attr); * This is performance-critical, we want to do it O(1)
*
* Most irqs are mapped 1:1 with pins.
*/
struct irq_cfg {
struct irq_pin_list *irq_2_pin;
cpumask_var_t domain;
cpumask_var_t old_domain;
u8 vector;
u8 move_in_progress : 1;
};
extern struct irq_cfg *irq_cfg(unsigned int);
extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
extern void send_cleanup_vector(struct irq_cfg *);
struct irq_desc;
extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *);
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
extern void setup_ioapic_dest(void); extern void setup_ioapic_dest(void);
extern void enable_IO_APIC(void); extern void enable_IO_APIC(void);
......
...@@ -34,6 +34,7 @@ static inline int irq_canonicalize(int irq) ...@@ -34,6 +34,7 @@ static inline int irq_canonicalize(int irq)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h> #include <linux/cpumask.h>
extern void fixup_irqs(void); extern void fixup_irqs(void);
extern void irq_force_complete_move(int);
#endif #endif
extern void (*generic_interrupt_extension)(void); extern void (*generic_interrupt_extension)(void);
......
...@@ -163,14 +163,16 @@ typedef struct physid_mask physid_mask_t; ...@@ -163,14 +163,16 @@ typedef struct physid_mask physid_mask_t;
#define physids_shift_left(d, s, n) \ #define physids_shift_left(d, s, n) \
bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
#define physids_coerce(map) ((map).mask[0]) static inline unsigned long physids_coerce(physid_mask_t *map)
{
return map->mask[0];
}
#define physids_promote(physids) \ static inline void physids_promote(unsigned long physids, physid_mask_t *map)
({ \ {
physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ physids_clear(*map);
__physid_mask.mask[0] = physids; \ map->mask[0] = physids;
__physid_mask; \ }
})
/* Note: will create very large stack frames if physid_mask_t is big */ /* Note: will create very large stack frames if physid_mask_t is big */
#define physid_mask_of_physid(physid) \ #define physid_mask_of_physid(physid) \
......
...@@ -25,12 +25,14 @@ struct uv_IO_APIC_route_entry { ...@@ -25,12 +25,14 @@ struct uv_IO_APIC_route_entry {
dest : 32; dest : 32;
}; };
extern struct irq_chip uv_irq_chip; enum {
UV_AFFINITY_ALL,
extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long); UV_AFFINITY_NODE,
extern void arch_disable_uv_irq(int, unsigned long); UV_AFFINITY_CPU
};
extern int uv_setup_irq(char *, int, int, unsigned long); extern int uv_irq_2_mmr_info(int, unsigned long *, int *);
extern void uv_teardown_irq(unsigned int, int, unsigned long); extern int uv_setup_irq(char *, int, int, unsigned long, int);
extern void uv_teardown_irq(unsigned int);
#endif /* _ASM_X86_UV_UV_IRQ_H */ #endif /* _ASM_X86_UV_UV_IRQ_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for local APIC drivers and for the IO-APIC code # Makefile for local APIC drivers and for the IO-APIC code
# #
obj-$(CONFIG_X86_LOCAL_APIC) += apic.o probe_$(BITS).o ipi.o nmi.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o probe_$(BITS).o ipi.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_SMP) += ipi.o obj-$(CONFIG_SMP) += ipi.o
......
...@@ -241,28 +241,13 @@ static int modern_apic(void) ...@@ -241,28 +241,13 @@ static int modern_apic(void)
} }
/* /*
* bare function to substitute write operation * right after this call apic become NOOP driven
* and it's _that_ fast :) * so apic->write/read doesn't do anything
*/
static void native_apic_write_dummy(u32 reg, u32 v)
{
WARN_ON_ONCE((cpu_has_apic || !disable_apic));
}
static u32 native_apic_read_dummy(u32 reg)
{
WARN_ON_ONCE((cpu_has_apic && !disable_apic));
return 0;
}
/*
* right after this call apic->write/read doesn't do anything
* note that there is no restore operation it works one way
*/ */
void apic_disable(void) void apic_disable(void)
{ {
apic->read = native_apic_read_dummy; pr_info("APIC: switched to apic NOOP\n");
apic->write = native_apic_write_dummy; apic = &apic_noop;
} }
void native_apic_wait_icr_idle(void) void native_apic_wait_icr_idle(void)
...@@ -459,7 +444,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, ...@@ -459,7 +444,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
v = apic_read(APIC_LVTT); v = apic_read(APIC_LVTT);
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v); apic_write(APIC_LVTT, v);
apic_write(APIC_TMICT, 0xffffffff); apic_write(APIC_TMICT, 0);
break; break;
case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_RESUME:
/* Nothing to do here */ /* Nothing to do here */
...@@ -1392,14 +1377,11 @@ void __init enable_IR_x2apic(void) ...@@ -1392,14 +1377,11 @@ void __init enable_IR_x2apic(void)
unsigned long flags; unsigned long flags;
struct IO_APIC_route_entry **ioapic_entries = NULL; struct IO_APIC_route_entry **ioapic_entries = NULL;
int ret, x2apic_enabled = 0; int ret, x2apic_enabled = 0;
int dmar_table_init_ret = 0; int dmar_table_init_ret;
#ifdef CONFIG_INTR_REMAP
dmar_table_init_ret = dmar_table_init(); dmar_table_init_ret = dmar_table_init();
if (dmar_table_init_ret) if (dmar_table_init_ret && !x2apic_supported())
pr_debug("dmar_table_init() failed with %d:\n", return;
dmar_table_init_ret);
#endif
ioapic_entries = alloc_ioapic_entries(); ioapic_entries = alloc_ioapic_entries();
if (!ioapic_entries) { if (!ioapic_entries) {
......
/*
* NOOP APIC driver.
*
* Does almost nothing and should be substituted by a real apic driver via
* probe routine.
*
* Though in case if apic is disabled (for some reason) we try
* to not uglify the caller's code and allow to call (some) apic routines
* like self-ipi, etc...
*/
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
#include <asm/setup.h>
#include <linux/smp.h>
#include <asm/ipi.h>
#include <linux/interrupt.h>
#include <asm/acpi.h>
#include <asm/e820.h>
static void noop_init_apic_ldr(void) { }
static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { }
static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { }
static void noop_send_IPI_allbutself(int vector) { }
static void noop_send_IPI_all(int vector) { }
static void noop_send_IPI_self(int vector) { }
static void noop_apic_wait_icr_idle(void) { }
static void noop_apic_icr_write(u32 low, u32 id) { }
static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
{
return -1;
}
static u32 noop_safe_apic_wait_icr_idle(void)
{
return 0;
}
static u64 noop_apic_icr_read(void)
{
return 0;
}
static int noop_cpu_to_logical_apicid(int cpu)
{
return 0;
}
static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
{
return 0;
}
static unsigned int noop_get_apic_id(unsigned long x)
{
return 0;
}
static int noop_probe(void)
{
/*
* NOOP apic should not ever be
* enabled via probe routine
*/
return 0;
}
static int noop_apic_id_registered(void)
{
/*
* if we would be really "pedantic"
* we should pass read_apic_id() here
* but since NOOP suppose APIC ID = 0
* lets save a few cycles
*/
return physid_isset(0, phys_cpu_present_map);
}
static const struct cpumask *noop_target_cpus(void)
{
/* only BSP here */
return cpumask_of(0);
}
static unsigned long noop_check_apicid_used(physid_mask_t *map, int apicid)
{
return physid_isset(apicid, *map);
}
static unsigned long noop_check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
if (cpu != 0)
pr_warning("APIC: Vector allocated for non-BSP cpu\n");
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
int noop_apicid_to_node(int logical_apicid)
{
/* we're always on node 0 */
return 0;
}
static u32 noop_apic_read(u32 reg)
{
WARN_ON_ONCE((cpu_has_apic && !disable_apic));
return 0;
}
static void noop_apic_write(u32 reg, u32 v)
{
WARN_ON_ONCE((cpu_has_apic || !disable_apic));
}
struct apic apic_noop = {
.name = "noop",
.probe = noop_probe,
.acpi_madt_oem_check = NULL,
.apic_id_registered = noop_apic_id_registered,
.irq_delivery_mode = dest_LowestPrio,
/* logical delivery broadcast to all CPUs: */
.irq_dest_mode = 1,
.target_cpus = noop_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = noop_check_apicid_used,
.check_apicid_present = noop_check_apicid_present,
.vector_allocation_domain = noop_vector_allocation_domain,
.init_apic_ldr = noop_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
.apicid_to_node = noop_apicid_to_node,
.cpu_to_logical_apicid = noop_cpu_to_logical_apicid,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL,
.check_phys_apicid_present = default_check_phys_apicid_present,
.enable_apic_mode = NULL,
.phys_pkg_id = noop_phys_pkg_id,
.mps_oem_check = NULL,
.get_apic_id = noop_get_apic_id,
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
.cpu_mask_to_apicid = default_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = noop_send_IPI_mask,
.send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
.send_IPI_allbutself = noop_send_IPI_allbutself,
.send_IPI_all = noop_send_IPI_all,
.send_IPI_self = noop_send_IPI_self,
.wakeup_secondary_cpu = noop_wakeup_secondary_cpu,
/* should be safe */
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL,
.read = noop_apic_read,
.write = noop_apic_write,
.icr_read = noop_apic_icr_read,
.icr_write = noop_apic_icr_write,
.wait_icr_idle = noop_apic_wait_icr_idle,
.safe_wait_icr_idle = noop_safe_apic_wait_icr_idle,
};
...@@ -35,7 +35,7 @@ static const struct cpumask *bigsmp_target_cpus(void) ...@@ -35,7 +35,7 @@ static const struct cpumask *bigsmp_target_cpus(void)
#endif #endif
} }
static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
{ {
return 0; return 0;
} }
...@@ -93,11 +93,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) ...@@ -93,11 +93,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu)
return BAD_APICID; return BAD_APICID;
} }
static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
{
return physid_mask_of_physid(phys_apicid);
}
/* Mapping from cpu number to logical apicid */ /* Mapping from cpu number to logical apicid */
static inline int bigsmp_cpu_to_logical_apicid(int cpu) static inline int bigsmp_cpu_to_logical_apicid(int cpu)
{ {
...@@ -106,10 +101,10 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu) ...@@ -106,10 +101,10 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu)
return cpu_physical_id(cpu); return cpu_physical_id(cpu);
} }
static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{ {
/* For clustered we don't have a good way to do this yet - hack */ /* For clustered we don't have a good way to do this yet - hack */
return physids_promote(0xFFL); physids_promote(0xFFL, retmap);
} }
static int bigsmp_check_phys_apicid_present(int phys_apicid) static int bigsmp_check_phys_apicid_present(int phys_apicid)
...@@ -230,7 +225,7 @@ struct apic apic_bigsmp = { ...@@ -230,7 +225,7 @@ struct apic apic_bigsmp = {
.apicid_to_node = bigsmp_apicid_to_node, .apicid_to_node = bigsmp_apicid_to_node,
.cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
.cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
.apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, .apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
.check_phys_apicid_present = bigsmp_check_phys_apicid_present, .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
.enable_apic_mode = NULL, .enable_apic_mode = NULL,
......
...@@ -466,11 +466,11 @@ static const struct cpumask *es7000_target_cpus(void) ...@@ -466,11 +466,11 @@ static const struct cpumask *es7000_target_cpus(void)
return cpumask_of(smp_processor_id()); return cpumask_of(smp_processor_id());
} }
static unsigned long static unsigned long es7000_check_apicid_used(physid_mask_t *map, int apicid)
es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
{ {
return 0; return 0;
} }
static unsigned long es7000_check_apicid_present(int bit) static unsigned long es7000_check_apicid_present(int bit)
{ {
return physid_isset(bit, phys_cpu_present_map); return physid_isset(bit, phys_cpu_present_map);
...@@ -539,14 +539,10 @@ static int es7000_cpu_present_to_apicid(int mps_cpu) ...@@ -539,14 +539,10 @@ static int es7000_cpu_present_to_apicid(int mps_cpu)
static int cpu_id; static int cpu_id;
static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap)
{ {
physid_mask_t mask; physid_set_mask_of_physid(cpu_id, retmap);
mask = physid_mask_of_physid(cpu_id);
++cpu_id; ++cpu_id;
return mask;
} }
/* Mapping from cpu number to logical apicid */ /* Mapping from cpu number to logical apicid */
...@@ -561,10 +557,10 @@ static int es7000_cpu_to_logical_apicid(int cpu) ...@@ -561,10 +557,10 @@ static int es7000_cpu_to_logical_apicid(int cpu)
#endif #endif
} }
static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{ {
/* For clustered we don't have a good way to do this yet - hack */ /* For clustered we don't have a good way to do this yet - hack */
return physids_promote(0xff); physids_promote(0xFFL, retmap);
} }
static int es7000_check_phys_apicid_present(int cpu_physical_apicid) static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
......
This diff is collapsed.
...@@ -334,10 +334,9 @@ static inline const struct cpumask *numaq_target_cpus(void) ...@@ -334,10 +334,9 @@ static inline const struct cpumask *numaq_target_cpus(void)
return cpu_all_mask; return cpu_all_mask;
} }
static inline unsigned long static unsigned long numaq_check_apicid_used(physid_mask_t *map, int apicid)
numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
{ {
return physid_isset(apicid, bitmap); return physid_isset(apicid, *map);
} }
static inline unsigned long numaq_check_apicid_present(int bit) static inline unsigned long numaq_check_apicid_present(int bit)
...@@ -371,10 +370,10 @@ static inline int numaq_multi_timer_check(int apic, int irq) ...@@ -371,10 +370,10 @@ static inline int numaq_multi_timer_check(int apic, int irq)
return apic != 0 && irq == 0; return apic != 0 && irq == 0;
} }
static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{ {
/* We don't have a good way to do this yet - hack */ /* We don't have a good way to do this yet - hack */
return physids_promote(0xFUL); return physids_promote(0xFUL, retmap);
} }
static inline int numaq_cpu_to_logical_apicid(int cpu) static inline int numaq_cpu_to_logical_apicid(int cpu)
...@@ -402,12 +401,12 @@ static inline int numaq_apicid_to_node(int logical_apicid) ...@@ -402,12 +401,12 @@ static inline int numaq_apicid_to_node(int logical_apicid)
return logical_apicid >> 4; return logical_apicid >> 4;
} }
static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
{ {
int node = numaq_apicid_to_node(logical_apicid); int node = numaq_apicid_to_node(logical_apicid);
int cpu = __ffs(logical_apicid & 0xf); int cpu = __ffs(logical_apicid & 0xf);
return physid_mask_of_physid(cpu + 4*node); physid_set_mask_of_physid(cpu + 4*node, retmap);
} }
/* Where the IO area was mapped on multiquad, always 0 otherwise */ /* Where the IO area was mapped on multiquad, always 0 otherwise */
......
...@@ -108,7 +108,7 @@ struct apic apic_default = { ...@@ -108,7 +108,7 @@ struct apic apic_default = {
.apicid_to_node = default_apicid_to_node, .apicid_to_node = default_apicid_to_node,
.cpu_to_logical_apicid = default_cpu_to_logical_apicid, .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
.cpu_present_to_apicid = default_cpu_present_to_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = default_apicid_to_cpu_present, .apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL, .setup_portio_remap = NULL,
.check_phys_apicid_present = default_check_phys_apicid_present, .check_phys_apicid_present = default_check_phys_apicid_present,
.enable_apic_mode = NULL, .enable_apic_mode = NULL,
......
...@@ -183,7 +183,7 @@ static const struct cpumask *summit_target_cpus(void) ...@@ -183,7 +183,7 @@ static const struct cpumask *summit_target_cpus(void)
return cpumask_of(0); return cpumask_of(0);
} }
static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) static unsigned long summit_check_apicid_used(physid_mask_t *map, int apicid)
{ {
return 0; return 0;
} }
...@@ -261,15 +261,15 @@ static int summit_cpu_present_to_apicid(int mps_cpu) ...@@ -261,15 +261,15 @@ static int summit_cpu_present_to_apicid(int mps_cpu)
return BAD_APICID; return BAD_APICID;
} }
static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map) static void summit_ioapic_phys_id_map(physid_mask_t *phys_id_map, physid_mask_t *retmap)
{ {
/* For clustered we don't have a good way to do this yet - hack */ /* For clustered we don't have a good way to do this yet - hack */
return physids_promote(0x0F); physids_promote(0x0FL, retmap);
} }
static physid_mask_t summit_apicid_to_cpu_present(int apicid) static void summit_apicid_to_cpu_present(int apicid, physid_mask_t *retmap)
{ {
return physid_mask_of_physid(0); physid_set_mask_of_physid(0, retmap);
} }
static int summit_check_phys_apicid_present(int physical_apicid) static int summit_check_phys_apicid_present(int physical_apicid)
......
...@@ -409,6 +409,12 @@ static __init void map_mmioh_high(int max_pnode) ...@@ -409,6 +409,12 @@ static __init void map_mmioh_high(int max_pnode)
map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
} }
static __init void map_low_mmrs(void)
{
init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
}
static __init void uv_rtc_init(void) static __init void uv_rtc_init(void)
{ {
long status; long status;
...@@ -550,6 +556,8 @@ void __init uv_system_init(void) ...@@ -550,6 +556,8 @@ void __init uv_system_init(void)
unsigned long mmr_base, present, paddr; unsigned long mmr_base, present, paddr;
unsigned short pnode_mask; unsigned short pnode_mask;
map_low_mmrs();
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
m_val = m_n_config.s.m_skt; m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt; n_val = m_n_config.s.n_skt;
......
...@@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void) ...@@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void)
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
boot_cpu_data.x86 != 16) boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17)
return; return;
wd_ops = &k7_wd_ops; wd_ops = &k7_wd_ops;
break; break;
......
...@@ -274,3 +274,93 @@ void smp_generic_interrupt(struct pt_regs *regs) ...@@ -274,3 +274,93 @@ void smp_generic_interrupt(struct pt_regs *regs)
} }
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
unsigned int irq, vector;
static int warned;
struct irq_desc *desc;
for_each_irq_desc(irq, desc) {
int break_affinity = 0;
int set_affinity = 1;
const struct cpumask *affinity;
if (!desc)
continue;
if (irq == 2)
continue;
/* interrupt's are disabled at this point */
spin_lock(&desc->lock);
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock);
continue;
}
/*
* Complete the irq move. This cpu is going down and for
* non intr-remapping case, we can't wait till this interrupt
* arrives at this cpu before completing the irq move.
*/
irq_force_complete_move(irq);
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
affinity = cpu_all_mask;
}
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
desc->chip->mask(irq);
if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, affinity);
else if (!(warned++))
set_affinity = 0;
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
desc->chip->unmask(irq);
spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
else if (!set_affinity)
printk("Cannot set affinity for irq %i\n", irq);
}
/*
* We can remove mdelay() and then send spuriuous interrupts to
* new cpu targets for all the irqs that were handled previously by
* this cpu. While it works, I have seen spurious interrupt messages
* (nothing wrong but still...).
*
* So for now, retain mdelay(1) and check the IRR and then send those
* interrupts to new targets as this cpu is already offlined...
*/
mdelay(1);
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr;
if (__get_cpu_var(vector_irq)[vector] < 0)
continue;
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
if (irr & (1 << (vector % 32))) {
irq = __get_cpu_var(vector_irq)[vector];
desc = irq_to_desc(irq);
spin_lock(&desc->lock);
if (desc->chip->retrigger)
desc->chip->retrigger(irq);
spin_unlock(&desc->lock);
}
}
}
#endif
...@@ -211,48 +211,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -211,48 +211,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
return true; return true;
} }
#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
unsigned int irq;
struct irq_desc *desc;
for_each_irq_desc(irq, desc) {
const struct cpumask *affinity;
if (!desc)
continue;
if (irq == 2)
continue;
affinity = desc->affinity;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
affinity = cpu_all_mask;
}
if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, affinity);
else if (desc->action)
printk_once("Cannot set affinity for irq %i\n", irq);
}
#if 0
barrier();
/* Ingo Molnar says: "after the IO-APIC masks have been redirected
[note the nop - the interrupt-enable boundary on x86 is two
instructions from sti] - to flush out pending hardirqs and
IPIs. After this point nothing is supposed to reach this CPU." */
__asm__ __volatile__("sti; nop; cli");
barrier();
#else
/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable();
mdelay(1);
local_irq_disable();
#endif
}
#endif
...@@ -62,64 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -62,64 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
return true; return true;
} }
#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
unsigned int irq;
static int warned;
struct irq_desc *desc;
for_each_irq_desc(irq, desc) {
int break_affinity = 0;
int set_affinity = 1;
const struct cpumask *affinity;
if (!desc)
continue;
if (irq == 2)
continue;
/* interrupt's are disabled at this point */
spin_lock(&desc->lock);
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock);
continue;
}
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
affinity = cpu_all_mask;
}
if (desc->chip->mask)
desc->chip->mask(irq);
if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, affinity);
else if (!(warned++))
set_affinity = 0;
if (desc->chip->unmask)
desc->chip->unmask(irq);
spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
else if (!set_affinity)
printk("Cannot set affinity for irq %i\n", irq);
}
/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable();
mdelay(1);
local_irq_disable();
}
#endif
extern void call_softirq(void); extern void call_softirq(void);
......
...@@ -1250,16 +1250,7 @@ static void __ref remove_cpu_from_maps(int cpu) ...@@ -1250,16 +1250,7 @@ static void __ref remove_cpu_from_maps(int cpu)
void cpu_disable_common(void) void cpu_disable_common(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
/*
* HACK:
* Allow any queued timer interrupts to get serviced
* This is only a temporary solution until we cleanup
* fixup_irqs as we do for IA64.
*/
local_irq_enable();
mdelay(1);
local_irq_disable();
remove_siblinginfo(cpu); remove_siblinginfo(cpu);
/* It's now safe to remove this processor from the online map */ /* It's now safe to remove this processor from the online map */
......
...@@ -9,10 +9,25 @@ ...@@ -9,10 +9,25 @@
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/uv/uv_irq.h> #include <asm/uv/uv_irq.h>
#include <asm/uv/uv_hub.h>
/* MMR offset and pnode of hub sourcing interrupts for a given irq */
struct uv_irq_2_mmr_pnode{
struct rb_node list;
unsigned long offset;
int pnode;
int irq;
};
static spinlock_t uv_irq_lock;
static struct rb_root uv_irq_root;
static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
static void uv_noop(unsigned int irq) static void uv_noop(unsigned int irq)
{ {
...@@ -39,25 +54,214 @@ struct irq_chip uv_irq_chip = { ...@@ -39,25 +54,214 @@ struct irq_chip uv_irq_chip = {
.unmask = uv_noop, .unmask = uv_noop,
.eoi = uv_ack_apic, .eoi = uv_ack_apic,
.end = uv_noop, .end = uv_noop,
.set_affinity = uv_set_irq_affinity,
}; };
/*
* Add offset and pnode information of the hub sourcing interrupts to the
* rb tree for a specific irq.
*/
static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
{
struct rb_node **link = &uv_irq_root.rb_node;
struct rb_node *parent = NULL;
struct uv_irq_2_mmr_pnode *n;
struct uv_irq_2_mmr_pnode *e;
unsigned long irqflags;
n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
uv_blade_to_memory_nid(blade));
if (!n)
return -ENOMEM;
n->irq = irq;
n->offset = offset;
n->pnode = uv_blade_to_pnode(blade);
spin_lock_irqsave(&uv_irq_lock, irqflags);
/* Find the right place in the rbtree: */
while (*link) {
parent = *link;
e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
if (unlikely(irq == e->irq)) {
/* irq entry exists */
e->pnode = uv_blade_to_pnode(blade);
e->offset = offset;
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
kfree(n);
return 0;
}
if (irq < e->irq)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
/* Insert the node into the rbtree. */
rb_link_node(&n->list, parent, link);
rb_insert_color(&n->list, &uv_irq_root);
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
return 0;
}
/* Retrieve offset and pnode information from the rb tree for a specific irq */
int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
{
struct uv_irq_2_mmr_pnode *e;
struct rb_node *n;
unsigned long irqflags;
spin_lock_irqsave(&uv_irq_lock, irqflags);
n = uv_irq_root.rb_node;
while (n) {
e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
if (e->irq == irq) {
*offset = e->offset;
*pnode = e->pnode;
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
return 0;
}
if (irq < e->irq)
n = n->rb_left;
else
n = n->rb_right;
}
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
return -1;
}
/*
* Re-target the irq to the specified CPU and enable the specified MMR located
* on the specified blade to allow the sending of MSIs to the specified CPU.
*/
static int
arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int restrict)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
int mmr_pnode;
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
int err;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, eligible_cpu);
if (err != 0)
return err;
if (restrict == UV_AFFINITY_CPU)
desc->status |= IRQ_NO_BALANCING;
else
desc->status |= IRQ_MOVE_PCNTXT;
set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
irq_name);
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->vector = cfg->vector;
entry->delivery_mode = apic->irq_delivery_mode;
entry->dest_mode = apic->irq_dest_mode;
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
return irq;
}
/*
* Disable the specified MMR located on the specified blade so that MSIs are
* longer allowed to be sent.
*/
static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
{
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->mask = 1;
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
}
static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest;
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
unsigned long mmr_offset;
unsigned mmr_pnode;
dest = set_desc_affinity(desc, mask);
if (dest == BAD_APICID)
return -1;
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->vector = cfg->vector;
entry->delivery_mode = apic->irq_delivery_mode;
entry->dest_mode = apic->irq_dest_mode;
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
entry->dest = dest;
/* Get previously stored MMR and pnode of hub sourcing interrupts */
if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
return -1;
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
return 0;
}
/* /*
* Set up a mapping of an available irq and vector, and enable the specified * Set up a mapping of an available irq and vector, and enable the specified
* MMR that defines the MSI that is to be sent to the specified CPU when an * MMR that defines the MSI that is to be sent to the specified CPU when an
* interrupt is raised. * interrupt is raised.
*/ */
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
unsigned long mmr_offset) unsigned long mmr_offset, int restrict)
{ {
int irq; int irq, ret;
int ret;
irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
irq = create_irq();
if (irq <= 0) if (irq <= 0)
return -EBUSY; return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset); ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
if (ret != irq) restrict);
if (ret == irq)
uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
else
destroy_irq(irq); destroy_irq(irq);
return ret; return ret;
...@@ -71,9 +275,28 @@ EXPORT_SYMBOL_GPL(uv_setup_irq); ...@@ -71,9 +275,28 @@ EXPORT_SYMBOL_GPL(uv_setup_irq);
* *
* Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
*/ */
void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset) void uv_teardown_irq(unsigned int irq)
{ {
arch_disable_uv_irq(mmr_blade, mmr_offset); struct uv_irq_2_mmr_pnode *e;
struct rb_node *n;
unsigned long irqflags;
spin_lock_irqsave(&uv_irq_lock, irqflags);
n = uv_irq_root.rb_node;
while (n) {
e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
if (e->irq == irq) {
arch_disable_uv_irq(e->pnode, e->offset);
rb_erase(n, &uv_irq_root);
kfree(e);
break;
}
if (irq < e->irq)
n = n->rb_left;
else
n = n->rb_right;
}
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
destroy_irq(irq); destroy_irq(irq);
} }
EXPORT_SYMBOL_GPL(uv_teardown_irq); EXPORT_SYMBOL_GPL(uv_teardown_irq);
...@@ -183,7 +183,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) ...@@ -183,7 +183,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
return; return;
} }
apic_cpus = apic->apicid_to_cpu_present(m->apicid); apic->apicid_to_cpu_present(m->apicid, &apic_cpus);
physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
/* /*
* Validate version * Validate version
......
...@@ -136,7 +136,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) ...@@ -136,7 +136,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
apicid_to_node[apic_id] = node; apicid_to_node[apic_id] = node;
node_set(node, cpu_nodes_parsed); node_set(node, cpu_nodes_parsed);
acpi_numa = 1; acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
pxm, apic_id, node); pxm, apic_id, node);
} }
...@@ -170,7 +170,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) ...@@ -170,7 +170,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
apicid_to_node[apic_id] = node; apicid_to_node[apic_id] = node;
node_set(node, cpu_nodes_parsed); node_set(node, cpu_nodes_parsed);
acpi_numa = 1; acpi_numa = 1;
printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
pxm, apic_id, node); pxm, apic_id, node);
} }
......
...@@ -106,7 +106,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) ...@@ -106,7 +106,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64 #if defined CONFIG_X86_64
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
UV_AFFINITY_CPU);
if (mq->irq < 0) { if (mq->irq < 0) {
dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
-mq->irq); -mq->irq);
...@@ -136,7 +137,7 @@ static void ...@@ -136,7 +137,7 @@ static void
xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
{ {
#if defined CONFIG_X86_64 #if defined CONFIG_X86_64
uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); uv_teardown_irq(mq->irq);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode; int mmr_pnode;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment