Commit e8c48efd authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: mach_summit to summit

Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c7e7964c
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/bios_ebda.h> #include <asm/bios_ebda.h>
#include <asm/mach-summit/mach_mpparse.h> #include <asm/summit/mpparse.h>
static struct rio_table_hdr *rio_table_hdr __initdata; static struct rio_table_hdr *rio_table_hdr __initdata;
static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata;
......
...@@ -11,11 +11,11 @@ ...@@ -11,11 +11,11 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/mach-summit/mach_apicdef.h> #include <asm/summit/apicdef.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/mach-summit/mach_apic.h> #include <asm/summit/apic.h>
#include <asm/mach-summit/mach_ipi.h> #include <asm/summit/ipi.h>
#include <asm/mach-summit/mach_mpparse.h> #include <asm/summit/mpparse.h>
static int probe_summit(void) static int probe_summit(void)
{ {
......
#ifndef ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
#define ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
#define APIC_ID_MASK (0xFF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
return (((x)>>24)&0xFF);
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif /* ASM_X86__MACH_SUMMIT__MACH_APICDEF_H */
#ifndef ASM_X86__MACH_SUMMIT__MACH_APIC_H #ifndef __ASM_SUMMIT_APIC_H
#define ASM_X86__MACH_SUMMIT__MACH_APIC_H #define __ASM_SUMMIT_APIC_H
#include <asm/smp.h> #include <asm/smp.h>
...@@ -21,7 +21,7 @@ static inline cpumask_t target_cpus(void) ...@@ -21,7 +21,7 @@ static inline cpumask_t target_cpus(void)
* Just start on cpu 0. IRQ balancing will spread load * Just start on cpu 0. IRQ balancing will spread load
*/ */
return cpumask_of_cpu(0); return cpumask_of_cpu(0);
} }
#define TARGET_CPUS (target_cpus()) #define TARGET_CPUS (target_cpus())
#define INT_DELIVERY_MODE (dest_LowestPrio) #define INT_DELIVERY_MODE (dest_LowestPrio)
...@@ -30,10 +30,10 @@ static inline cpumask_t target_cpus(void) ...@@ -30,10 +30,10 @@ static inline cpumask_t target_cpus(void)
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{ {
return 0; return 0;
} }
/* we don't use the phys_cpu_present_map to indicate apicid presence */ /* we don't use the phys_cpu_present_map to indicate apicid presence */
static inline unsigned long check_apicid_present(int bit) static inline unsigned long check_apicid_present(int bit)
{ {
return 1; return 1;
} }
...@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) ...@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
static inline physid_mask_t apicid_to_cpu_present(int apicid) static inline physid_mask_t apicid_to_cpu_present(int apicid)
{ {
return physid_mask_of_physid(apicid); return physid_mask_of_physid(0);
} }
static inline void setup_portio_remap(void) static inline void setup_portio_remap(void)
...@@ -143,22 +143,22 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -143,22 +143,22 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight(cpumask); num_bits_set = cpus_weight(cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return (int) 0xFF; return (int) 0xFF;
/* /*
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) { if (cpu_isset(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n",__FUNCTION__); printk ("%s: Not a valid mask!\n",__FUNCTION__);
return 0xFF; return 0xFF;
...@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) ...@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
return hard_smp_processor_id() >> index_msb; return hard_smp_processor_id() >> index_msb;
} }
#endif /* ASM_X86__MACH_SUMMIT__MACH_APIC_H */ #endif /* __ASM_SUMMIT_APIC_H */
#ifndef __ASM_SUMMIT_APICDEF_H
#define __ASM_SUMMIT_APICDEF_H
#define APIC_ID_MASK (0xFF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
return (x>>24)&0xFF;
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif
#ifndef ASM_X86__MACH_SUMMIT__MACH_IPI_H #ifndef __ASM_SUMMIT_IPI_H
#define ASM_X86__MACH_SUMMIT__MACH_IPI_H #define __ASM_SUMMIT_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(cpumask_t mask, int vector);
...@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector) ...@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_map, vector);
} }
#endif /* ASM_X86__MACH_SUMMIT__MACH_IPI_H */ #endif /* __ASM_SUMMIT_IPI_H */
#ifndef ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H #ifndef _ASM_IRQ_VECTORS_LIMITS_H
#define ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H #define _ASM_IRQ_VECTORS_LIMITS_H
/* /*
* For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
...@@ -11,4 +11,4 @@ ...@@ -11,4 +11,4 @@
#define NR_IRQS 224 #define NR_IRQS 224
#define NR_IRQ_VECTORS 1024 #define NR_IRQ_VECTORS 1024
#endif /* ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H */ #endif /* _ASM_IRQ_VECTORS_LIMITS_H */
#ifndef ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H #ifndef __ASM_SUMMIT_MPPARSE_H
#define ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H #define __ASM_SUMMIT_MPPARSE_H
#include <mach_apic.h>
#include <asm/tsc.h> #include <asm/tsc.h>
extern int use_cyclone; extern int use_cyclone;
...@@ -12,11 +11,11 @@ extern void setup_summit(void); ...@@ -12,11 +11,11 @@ extern void setup_summit(void);
#define setup_summit() {} #define setup_summit() {}
#endif #endif
static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid) char *productid)
{ {
if (!strncmp(oem, "IBM ENSW", 8) && if (!strncmp(oem, "IBM ENSW", 8) &&
(!strncmp(productid, "VIGIL SMP", 9) (!strncmp(productid, "VIGIL SMP", 9)
|| !strncmp(productid, "EXA", 3) || !strncmp(productid, "EXA", 3)
|| !strncmp(productid, "RUTHLESS SMP", 12))){ || !strncmp(productid, "RUTHLESS SMP", 12))){
mark_tsc_unstable("Summit based system"); mark_tsc_unstable("Summit based system");
...@@ -107,4 +106,4 @@ static inline int is_WPEG(struct rio_detail *rio){ ...@@ -107,4 +106,4 @@ static inline int is_WPEG(struct rio_detail *rio){
rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
} }
#endif /* ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H */ #endif /* __ASM_SUMMIT_MPPARSE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment