Commit 5ad57078 authored by Paul Mackerras's avatar Paul Mackerras

powerpc: Merge smp.c and smp.h

This also moves setup_cpu_maps to setup-common.c (calling it
smp_setup_cpu_maps) and uses it on both 32-bit and 64-bit.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent c3df69cd
......@@ -41,6 +41,7 @@ obj-$(CONFIG_PPC_OF) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_6xx) += idle_6xx.o
obj-$(CONFIG_SMP) += smp.o
ifeq ($(CONFIG_PPC_ISERIES),y)
$(obj)/head_64.o: $(obj)/lparmap.s
......@@ -49,8 +50,9 @@ endif
else
# stuff used from here for ARCH=ppc or ARCH=ppc64
smpobj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
setup-common.o
setup-common.o $(smpobj-y)
endif
......
......@@ -170,11 +170,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC64 /* XXX for now */
pvr = per_cpu(pvr, cpu_id);
#else
pvr = cpu_data[cpu_id].pvr;
#endif
#else
pvr = mfspr(SPRN_PVR);
#endif
......@@ -408,3 +404,118 @@ static int __init set_preferred_console(void)
}
console_initcall(set_preferred_console);
#endif /* CONFIG_PPC_MULTIPLATFORM */
#ifdef CONFIG_SMP
/**
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_map
* cpu_present_map
* cpu_sibling_map
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
* cpu_online_map as they come up.
*
* This function is valid only for Open Firmware systems. finish_device_tree
* must be called before using this.
*
* While we're here, we may as well set the "physical" cpu ids in the paca.
*/
void __init smp_setup_cpu_maps(void)
{
struct device_node *dn = NULL;
int cpu = 0;
int swap_cpuid = 0;
while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
int *intserv;
int j, len = sizeof(u32), nthreads = 1;
intserv = (int *)get_property(dn, "ibm,ppc-interrupt-server#s",
&len);
if (intserv)
nthreads = len / sizeof(int);
else {
intserv = (int *) get_property(dn, "reg", NULL);
if (!intserv)
intserv = &cpu; /* assume logical == phys */
}
for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
cpu_set(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, intserv[j]);
if (intserv[j] == boot_cpuid_phys)
swap_cpuid = cpu;
cpu_set(cpu, cpu_possible_map);
cpu++;
}
}
/* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
* boot cpu is logical 0.
*/
if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
u32 tmp;
tmp = get_hard_smp_processor_id(0);
set_hard_smp_processor_id(0, boot_cpuid_phys);
set_hard_smp_processor_id(swap_cpuid, tmp);
}
#ifdef CONFIG_PPC64
/*
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
unsigned int *ireg;
num_addr_cell = prom_n_addr_cells(dn);
num_size_cell = prom_n_size_cells(dn);
ireg = (unsigned int *)
get_property(dn, "ibm,lrdr-capacity", NULL);
if (!ireg)
goto out;
maxcpus = ireg[num_addr_cell + num_size_cell];
/* Double maxcpus for processors which have SMT capability */
if (cpu_has_feature(CPU_FTR_SMT))
maxcpus *= 2;
if (maxcpus > NR_CPUS) {
printk(KERN_WARNING
"Partition configured for %d cpus, "
"operating system maximum is %d.\n",
maxcpus, NR_CPUS);
maxcpus = NR_CPUS;
} else
printk(KERN_INFO "Partition configured for %d cpus.\n",
maxcpus);
for (cpu = 0; cpu < maxcpus; cpu++)
cpu_set(cpu, cpu_possible_map);
out:
of_node_put(dn);
}
/*
* Do the sibling map; assume only two threads per processor.
*/
for_each_cpu(cpu) {
cpu_set(cpu, cpu_sibling_map[cpu]);
if (cpu_has_feature(CPU_FTR_SMT))
cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
}
systemcfg->processorCount = num_present_cpus();
#endif /* CONFIG_PPC64 */
}
#endif /* CONFIG_SMP */
......@@ -288,6 +288,8 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
finish_device_tree();
smp_setup_cpu_maps();
#ifdef CONFIG_BOOTX_TEXT
init_boot_display();
#endif
......
......@@ -181,114 +181,8 @@ static int __init early_smt_enabled(char *p)
}
early_param("smt-enabled", early_smt_enabled);
/**
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_map
* cpu_present_map
* cpu_sibling_map
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
* cpu_online_map as they come up.
*
* This function is valid only for Open Firmware systems. finish_device_tree
* must be called before using this.
*
* While we're here, we may as well set the "physical" cpu ids in the paca.
*/
static void __init setup_cpu_maps(void)
{
struct device_node *dn = NULL;
int cpu = 0;
int swap_cpuid = 0;
check_smt_enabled();
while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
u32 *intserv;
int j, len = sizeof(u32), nthreads;
intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
&len);
if (!intserv)
intserv = (u32 *)get_property(dn, "reg", NULL);
nthreads = len / sizeof(u32);
for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
cpu_set(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, intserv[j]);
if (intserv[j] == boot_cpuid_phys)
swap_cpuid = cpu;
cpu_set(cpu, cpu_possible_map);
cpu++;
}
}
/* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
* boot cpu is logical 0.
*/
if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
u32 tmp;
tmp = get_hard_smp_processor_id(0);
set_hard_smp_processor_id(0, boot_cpuid_phys);
set_hard_smp_processor_id(swap_cpuid, tmp);
}
/*
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
unsigned int *ireg;
num_addr_cell = prom_n_addr_cells(dn);
num_size_cell = prom_n_size_cells(dn);
ireg = (unsigned int *)
get_property(dn, "ibm,lrdr-capacity", NULL);
if (!ireg)
goto out;
maxcpus = ireg[num_addr_cell + num_size_cell];
/* Double maxcpus for processors which have SMT capability */
if (cpu_has_feature(CPU_FTR_SMT))
maxcpus *= 2;
if (maxcpus > NR_CPUS) {
printk(KERN_WARNING
"Partition configured for %d cpus, "
"operating system maximum is %d.\n",
maxcpus, NR_CPUS);
maxcpus = NR_CPUS;
} else
printk(KERN_INFO "Partition configured for %d cpus.\n",
maxcpus);
for (cpu = 0; cpu < maxcpus; cpu++)
cpu_set(cpu, cpu_possible_map);
out:
of_node_put(dn);
}
/*
* Do the sibling map; assume only two threads per processor.
*/
for_each_cpu(cpu) {
cpu_set(cpu, cpu_sibling_map[cpu]);
if (cpu_has_feature(CPU_FTR_SMT))
cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
}
systemcfg->processorCount = num_present_cpus();
}
#else
#define check_smt_enabled()
#endif /* CONFIG_SMP */
extern struct machdep_calls pSeries_md;
......@@ -417,6 +311,8 @@ void smp_release_cpus(void)
DBG(" <- smp_release_cpus()\n");
}
#else
#define smp_release_cpus()
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
......@@ -608,17 +504,13 @@ void __init setup_system(void)
parse_early_param();
#ifdef CONFIG_SMP
/*
* iSeries has already initialized the cpu maps at this point.
*/
setup_cpu_maps();
check_smt_enabled();
smp_setup_cpu_maps();
/* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
smp_release_cpus();
#endif
printk("Starting Linux PPC64 %s\n", system_utsname.version);
......
......@@ -39,13 +39,18 @@
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/xmon.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/system.h>
#include <asm/abs_addr.h>
#include <asm/mpic.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
int smp_hw_index[NR_CPUS];
struct thread_info *secondary_ti;
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
......@@ -60,6 +65,7 @@ cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
static volatile unsigned int cpu_callin_map[NR_CPUS];
......@@ -89,7 +95,9 @@ void __devinit smp_mpic_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
#endif /* CONFIG_MPIC */
#ifdef CONFIG_PPC64
void __devinit smp_generic_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
......@@ -102,8 +110,7 @@ void __devinit smp_generic_kick_cpu(int nr)
paca[nr].cpu_start = 1;
smp_mb();
}
#endif /* CONFIG_MPIC */
#endif
void smp_message_recv(int msg, struct pt_regs *regs)
{
......@@ -111,15 +118,10 @@ void smp_message_recv(int msg, struct pt_regs *regs)
case PPC_MSG_CALL_FUNCTION:
smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
case PPC_MSG_RESCHEDULE:
/* XXX Do we have to do this? */
set_need_resched();
break;
#if 0
case PPC_MSG_MIGRATE_TASK:
/* spare */
break;
#endif
#ifdef CONFIG_DEBUGGER
case PPC_MSG_DEBUGGER_BREAK:
debugger_ipi(regs);
......@@ -171,8 +173,8 @@ static struct call_data_struct {
int wait;
} *call_data;
/* delay of at least 8 seconds on 1GHz cpu */
#define SMP_CALL_TIMEOUT (1UL << (30 + 3))
/* delay of at least 8 seconds */
#define SMP_CALL_TIMEOUT 8
/*
* This function sends a 'generic call function' IPI to all other CPUs
......@@ -194,7 +196,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
{
struct call_data_struct data;
int ret = -1, cpus;
unsigned long timeout;
u64 timeout;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
......@@ -220,11 +222,12 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
/* Send a message to all other CPUs and wait for them to respond */
smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
/* Wait for response */
timeout = SMP_CALL_TIMEOUT;
while (atomic_read(&data.started) != cpus) {
HMT_low();
if (--timeout == 0) {
if (get_tb() >= timeout) {
printk("smp_call_function on cpu %d: other cpus not "
"responding (%d)\n", smp_processor_id(),
atomic_read(&data.started));
......@@ -234,10 +237,9 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
}
if (wait) {
timeout = SMP_CALL_TIMEOUT;
while (atomic_read(&data.finished) != cpus) {
HMT_low();
if (--timeout == 0) {
if (get_tb() >= timeout) {
printk("smp_call_function on cpu %d: other "
"cpus not finishing (%d/%d)\n",
smp_processor_id(),
......@@ -251,7 +253,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
ret = 0;
out:
out:
call_data = NULL;
HMT_medium();
spin_unlock(&call_lock);
......@@ -313,8 +315,11 @@ static void __init smp_create_idle(unsigned int cpu)
p = fork_idle(cpu);
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
#ifdef CONFIG_PPC64
paca[cpu].__current = p;
#endif
current_set[cpu] = p->thread_info;
p->thread_info->cpu = cpu;
}
void __init smp_prepare_cpus(unsigned int max_cpus)
......@@ -333,18 +338,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(boot_cpuid);
cpu_callin_map[boot_cpuid] = 1;
#ifndef CONFIG_PPC_ISERIES
paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
/*
* Should update do_gtod.stamp_xsec.
* For now we leave it which means the time can be some
* number of msecs off until someone does a settimeofday()
*/
do_gtod.varp->tb_orig_stamp = tb_last_stamp;
systemcfg->tb_orig_stamp = tb_last_stamp;
#endif
max_cpus = smp_ops->probe();
smp_space_timers(max_cpus);
......@@ -359,8 +352,9 @@ void __devinit smp_prepare_boot_cpu(void)
BUG_ON(smp_processor_id() != boot_cpuid);
cpu_set(boot_cpuid, cpu_online_map);
#ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current;
#endif
current_set[boot_cpuid] = current->thread_info;
}
......@@ -444,13 +438,16 @@ int __devinit __cpu_up(unsigned int cpu)
{
int c;
secondary_ti = current_set[cpu];
if (!cpu_enable(cpu))
return 0;
if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))
return -EINVAL;
#ifdef CONFIG_PPC64
paca[cpu].default_decr = tb_ticks_per_jiffy;
#endif
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
......@@ -513,7 +510,7 @@ int __devinit start_secondary(void *unused)
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
set_dec(paca[cpu].default_decr);
set_dec(tb_ticks_per_jiffy);
cpu_callin_map[cpu] = 1;
smp_ops->setup_cpu(cpu);
......
......@@ -35,43 +35,6 @@
#include <asm/smp.h>
#include <asm/mpic.h>
extern unsigned long smp_chrp_cpu_nr;
static int __init smp_chrp_probe(void)
{
struct device_node *cpus = NULL;
unsigned int *reg;
int reglen;
int ncpus = 0;
int cpuid;
unsigned int phys;
/* Count CPUs in the device-tree */
cpuid = 1; /* the boot cpu is logical cpu 0 */
while ((cpus = of_find_node_by_type(cpus, "cpu")) != NULL) {
phys = ncpus;
reg = (unsigned int *) get_property(cpus, "reg", &reglen);
if (reg && reglen >= sizeof(unsigned int))
/* hmmm, not having a reg property would be bad */
phys = *reg;
if (phys != boot_cpuid_phys) {
set_hard_smp_processor_id(cpuid, phys);
++cpuid;
}
++ncpus;
}
printk(KERN_INFO "CHRP SMP probe found %d cpus\n", ncpus);
/* Nothing more to do if less than 2 of them */
if (ncpus <= 1)
return 1;
mpic_request_ipis();
return ncpus;
}
static void __devinit smp_chrp_kick_cpu(int nr)
{
*(unsigned long *)KERNELBASE = nr;
......@@ -114,7 +77,7 @@ void __devinit smp_chrp_take_timebase(void)
/* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_chrp_probe,
.probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu,
.setup_cpu = smp_chrp_setup_cpu,
.give_timebase = smp_chrp_give_timebase,
......
......@@ -45,7 +45,6 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
obj-$(CONFIG_TAU) += temp.o
ifndef CONFIG_E200
obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
......
......@@ -126,7 +126,7 @@ skip:
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
#endif
#ifdef CONFIG_SMP
#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
/* should this be per processor send/receive? */
seq_printf(p, "IPI (recv/sent): %10u/%u\n",
atomic_read(&ipi_recv), atomic_read(&ipi_sent));
......
......@@ -33,7 +33,6 @@ obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o
obj-$(CONFIG_KEXEC) += machine_kexec.o
obj-$(CONFIG_EEH) += eeh.o
obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_MODULES) += ppc_ksyms.o
......
/*
* smp.h: PPC64 specific SMP code.
* smp.h: PowerPC-specific SMP code.
*
* Original was a copy of sparc smp.h. Now heavily modified
* for PPC.
......@@ -13,9 +13,9 @@
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_POWERPC_SMP_H
#define _ASM_POWERPC_SMP_H
#ifdef __KERNEL__
#ifndef _PPC64_SMP_H
#define _PPC64_SMP_H
#include <linux/config.h>
#include <linux/threads.h>
......@@ -24,7 +24,9 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
extern int boot_cpuid;
extern int boot_cpuid_phys;
......@@ -45,8 +47,19 @@ void generic_cpu_die(unsigned int cpu);
void generic_mach_cpu_die(void);
#endif
#ifdef CONFIG_PPC64
#define raw_smp_processor_id() (get_paca()->paca_index)
#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
#else
/* 32-bit */
extern int smp_hw_index[];
#define raw_smp_processor_id() (current_thread_info()->cpu)
#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
#define set_hard_smp_processor_id(cpu, phys)\
(smp_hw_index[(cpu)] = (phys))
#endif
extern cpumask_t cpu_sibling_map[NR_CPUS];
......@@ -65,21 +78,35 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
void smp_init_iSeries(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
void smp_setup_cpu_maps(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
#else
/* for UP */
#define smp_setup_cpu_maps()
#define smp_release_cpus()
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC64
#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id)
#define set_hard_smp_processor_id(CPU, VAL) \
do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
#else
/* 32-bit */
#ifndef CONFIG_SMP
#define get_hard_smp_processor_id(cpu) boot_cpuid_phys
#define set_hard_smp_processor_id(cpu, phys)
#endif
#endif
extern int smt_enabled_at_boot;
extern int smp_mpic_probe(void);
extern void smp_mpic_setup_cpu(int cpu);
extern void smp_generic_kick_cpu(int nr);
extern void smp_release_cpus(void);
extern void smp_generic_give_timebase(void);
extern void smp_generic_take_timebase(void);
......@@ -88,5 +115,5 @@ extern struct smp_ops_t *smp_ops;
#endif /* __ASSEMBLY__ */
#endif /* !(_PPC64_SMP_H) */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SMP_H) */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment