Commit b142eb3a authored by Paul Mackerras's avatar Paul Mackerras

Merge branch 'for-2.6.22' of...

Merge branch 'for-2.6.22' of master.kernel.org:/pub/scm/linux/kernel/git/arnd/cell-2.6 into for-2.6.22
parents 13177c8b c6d34481
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.21-rc3 # Linux kernel version: 2.6.21-rc6
# Fri Mar 9 23:34:53 2007 # Mon Apr 23 20:46:48 2007
# #
CONFIG_PPC64=y CONFIG_PPC64=y
CONFIG_64BIT=y CONFIG_64BIT=y
...@@ -139,11 +139,31 @@ CONFIG_PPC_MULTIPLATFORM=y ...@@ -139,11 +139,31 @@ CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_PMAC is not set # CONFIG_PPC_PMAC is not set
# CONFIG_PPC_MAPLE is not set # CONFIG_PPC_MAPLE is not set
# CONFIG_PPC_PASEMI is not set # CONFIG_PPC_PASEMI is not set
CONFIG_PPC_CELLEB=y
CONFIG_PPC_PS3=y
#
# PS3 Platform Options
#
# CONFIG_PS3_ADVANCED is not set
CONFIG_PS3_HTAB_SIZE=20
# CONFIG_PS3_DYNAMIC_DMA is not set
CONFIG_PS3_USE_LPAR_ADDR=y
CONFIG_PS3_VUART=y
CONFIG_PS3_PS3AV=y
CONFIG_PS3_SYS_MANAGER=y
CONFIG_PPC_CELL=y CONFIG_PPC_CELL=y
CONFIG_PPC_CELL_NATIVE=y CONFIG_PPC_CELL_NATIVE=y
CONFIG_PPC_IBM_CELL_BLADE=y CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_PPC_PS3=y
CONFIG_PPC_CELLEB=y #
# Cell Broadband Engine options
#
CONFIG_SPU_FS=m
CONFIG_SPU_BASE=y
CONFIG_CBE_RAS=y
CONFIG_CBE_THERM=m
CONFIG_CBE_CPUFREQ=m
CONFIG_PPC_NATIVE=y CONFIG_PPC_NATIVE=y
CONFIG_UDBG_RTAS_CONSOLE=y CONFIG_UDBG_RTAS_CONSOLE=y
CONFIG_PPC_UDBG_BEAT=y CONFIG_PPC_UDBG_BEAT=y
...@@ -174,26 +194,6 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y ...@@ -174,26 +194,6 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
# CONFIG_WANT_EARLY_SERIAL is not set # CONFIG_WANT_EARLY_SERIAL is not set
CONFIG_MPIC=y CONFIG_MPIC=y
#
# Cell Broadband Engine options
#
CONFIG_SPU_FS=m
CONFIG_SPU_BASE=y
CONFIG_CBE_RAS=y
CONFIG_CBE_THERM=m
CONFIG_CBE_CPUFREQ=m
#
# PS3 Platform Options
#
# CONFIG_PS3_ADVANCED is not set
CONFIG_PS3_HTAB_SIZE=20
# CONFIG_PS3_DYNAMIC_DMA is not set
CONFIG_PS3_USE_LPAR_ADDR=y
CONFIG_PS3_VUART=y
CONFIG_PS3_PS3AV=y
CONFIG_PS3_SYS_MANAGER=y
# #
# Kernel options # Kernel options
# #
...@@ -534,7 +534,6 @@ CONFIG_BLK_DEV_GENERIC=y ...@@ -534,7 +534,6 @@ CONFIG_BLK_DEV_GENERIC=y
# CONFIG_BLK_DEV_OPTI621 is not set # CONFIG_BLK_DEV_OPTI621 is not set
CONFIG_BLK_DEV_IDEDMA_PCI=y CONFIG_BLK_DEV_IDEDMA_PCI=y
# CONFIG_BLK_DEV_IDEDMA_FORCED is not set # CONFIG_BLK_DEV_IDEDMA_FORCED is not set
CONFIG_IDEDMA_PCI_AUTO=y
# CONFIG_IDEDMA_ONLYDISK is not set # CONFIG_IDEDMA_ONLYDISK is not set
CONFIG_BLK_DEV_AEC62XX=y CONFIG_BLK_DEV_AEC62XX=y
# CONFIG_BLK_DEV_ALI15X3 is not set # CONFIG_BLK_DEV_ALI15X3 is not set
...@@ -561,11 +560,10 @@ CONFIG_BLK_DEV_SIIMAGE=y ...@@ -561,11 +560,10 @@ CONFIG_BLK_DEV_SIIMAGE=y
# CONFIG_BLK_DEV_TRM290 is not set # CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set # CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_BLK_DEV_TC86C001 is not set # CONFIG_BLK_DEV_TC86C001 is not set
CONFIG_BLK_DEV_IDE_CELLEB=y CONFIG_BLK_DEV_CELLEB=y
# CONFIG_IDE_ARM is not set # CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set # CONFIG_IDEDMA_IVB is not set
CONFIG_IDEDMA_AUTO=y
# CONFIG_BLK_DEV_HD is not set # CONFIG_BLK_DEV_HD is not set
# #
...@@ -937,7 +935,7 @@ CONFIG_UNIX98_PTYS=y ...@@ -937,7 +935,7 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set # CONFIG_LEGACY_PTYS is not set
CONFIG_HVC_DRIVER=y CONFIG_HVC_DRIVER=y
CONFIG_HVC_RTAS=y CONFIG_HVC_RTAS=y
# CONFIG_HVC_BEAT is not set CONFIG_HVC_BEAT=y
# #
# IPMI # IPMI
...@@ -1482,6 +1480,8 @@ CONFIG_NLS_ISO8859_15=m ...@@ -1482,6 +1480,8 @@ CONFIG_NLS_ISO8859_15=m
# Distributed Lock Manager # Distributed Lock Manager
# #
# CONFIG_DLM is not set # CONFIG_DLM is not set
# CONFIG_UCC_SLOW is not set
# CONFIG_UCC_FAST is not set
# #
# Library routines # Library routines
...@@ -1540,6 +1540,7 @@ CONFIG_DEBUG_BUGVERBOSE=y ...@@ -1540,6 +1540,7 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_FAULT_INJECTION is not set # CONFIG_FAULT_INJECTION is not set
# CONFIG_DEBUG_STACKOVERFLOW is not set # CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_DEBUGGER=y CONFIG_DEBUGGER=y
CONFIG_XMON=y CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y CONFIG_XMON_DEFAULT=y
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/system.h> #include <asm/system.h>
#include "../platforms/cell/interrupt.h" #include "../platforms/cell/interrupt.h"
#include "../platforms/cell/cbe_regs.h"
#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */ #define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying #define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/pmi.h>
#include <asm/of_platform.h>
#include "cbe_regs.h" #include "cbe_regs.h"
...@@ -68,6 +70,38 @@ static u64 MIC_Slow_Next_Timer_table[] = { ...@@ -68,6 +70,38 @@ static u64 MIC_Slow_Next_Timer_table[] = {
* hardware specific functions * hardware specific functions
*/ */
static struct of_device *pmi_dev;
static int set_pmode_pmi(int cpu, unsigned int pmode)
{
int ret;
pmi_message_t pmi_msg;
#ifdef DEBUG
u64 time;
#endif
pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
pmi_msg.data1 = cbe_cpu_to_node(cpu);
pmi_msg.data2 = pmode;
#ifdef DEBUG
time = (u64) get_cycles();
#endif
pmi_send_message(pmi_dev, pmi_msg);
ret = pmi_msg.data2;
pr_debug("PMI returned slow mode %d\n", ret);
#ifdef DEBUG
time = (u64) get_cycles() - time; /* actual cycles (not cpu cycles!) */
time = 1000000000 * time / CLOCK_TICK_RATE; /* time in ns (10^-9) */
pr_debug("had to wait %lu ns for a transition\n", time);
#endif
return ret;
}
static int get_pmode(int cpu) static int get_pmode(int cpu)
{ {
int ret; int ret;
...@@ -79,7 +113,7 @@ static int get_pmode(int cpu) ...@@ -79,7 +113,7 @@ static int get_pmode(int cpu)
return ret; return ret;
} }
static int set_pmode(int cpu, unsigned int pmode) static int set_pmode_reg(int cpu, unsigned int pmode)
{ {
struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_pmd_regs __iomem *pmd_regs;
struct cbe_mic_tm_regs __iomem *mic_tm_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs;
...@@ -120,6 +154,39 @@ static int set_pmode(int cpu, unsigned int pmode) ...@@ -120,6 +154,39 @@ static int set_pmode(int cpu, unsigned int pmode)
return 0; return 0;
} }
static int set_pmode(int cpu, unsigned int slow_mode) {
if(pmi_dev)
return set_pmode_pmi(cpu, slow_mode);
else
return set_pmode_reg(cpu, slow_mode);
}
static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg)
{
struct cpufreq_policy policy;
u8 cpu;
u8 cbe_pmode_new;
BUG_ON (pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
cpu = cbe_node_to_cpu(pmi_msg.data1);
cbe_pmode_new = pmi_msg.data2;
cpufreq_get_policy(&policy, cpu);
policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency);
policy.min = min(policy.min, policy.max);
pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max);
cpufreq_set_policy(&policy);
}
static struct pmi_handler cbe_pmi_handler = {
.type = PMI_TYPE_FREQ_CHANGE,
.handle_pmi_message = cbe_cpufreq_handle_pmi,
};
/* /*
* cpufreq functions * cpufreq functions
*/ */
...@@ -234,11 +301,23 @@ static struct cpufreq_driver cbe_cpufreq_driver = { ...@@ -234,11 +301,23 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
static int __init cbe_cpufreq_init(void) static int __init cbe_cpufreq_init(void)
{ {
struct device_node *np;
np = of_find_node_by_type(NULL, "ibm,pmi");
pmi_dev = of_find_device_by_node(np);
if (pmi_dev)
pmi_register_handler(pmi_dev, &cbe_pmi_handler);
return cpufreq_register_driver(&cbe_cpufreq_driver); return cpufreq_register_driver(&cbe_cpufreq_driver);
} }
static void __exit cbe_cpufreq_exit(void) static void __exit cbe_cpufreq_exit(void)
{ {
if(pmi_dev)
pmi_unregister_handler(pmi_dev, &cbe_pmi_handler);
cpufreq_unregister_driver(&cbe_cpufreq_driver); cpufreq_unregister_driver(&cbe_cpufreq_driver);
} }
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/of_device.h>
#include <asm/of_platform.h>
#include "cbe_regs.h" #include "cbe_regs.h"
...@@ -27,6 +29,7 @@ ...@@ -27,6 +29,7 @@
static struct cbe_regs_map static struct cbe_regs_map
{ {
struct device_node *cpu_node; struct device_node *cpu_node;
struct device_node *be_node;
struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_pmd_regs __iomem *pmd_regs;
struct cbe_iic_regs __iomem *iic_regs; struct cbe_iic_regs __iomem *iic_regs;
struct cbe_mic_tm_regs __iomem *mic_tm_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs;
...@@ -37,30 +40,43 @@ static int cbe_regs_map_count; ...@@ -37,30 +40,43 @@ static int cbe_regs_map_count;
static struct cbe_thread_map static struct cbe_thread_map
{ {
struct device_node *cpu_node; struct device_node *cpu_node;
struct device_node *be_node;
struct cbe_regs_map *regs; struct cbe_regs_map *regs;
unsigned int thread_id;
unsigned int cbe_id;
} cbe_thread_map[NR_CPUS]; } cbe_thread_map[NR_CPUS];
static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE };
static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE;
static struct cbe_regs_map *cbe_find_map(struct device_node *np) static struct cbe_regs_map *cbe_find_map(struct device_node *np)
{ {
int i; int i;
struct device_node *tmp_np; struct device_node *tmp_np;
if (strcasecmp(np->type, "spe") == 0) { if (strcasecmp(np->type, "spe")) {
if (np->data == NULL) { for (i = 0; i < cbe_regs_map_count; i++)
/* walk up path until cpu node was found */ if (cbe_regs_maps[i].cpu_node == np ||
tmp_np = np->parent; cbe_regs_maps[i].be_node == np)
while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0) return &cbe_regs_maps[i];
tmp_np = tmp_np->parent; return NULL;
}
np->data = cbe_find_map(tmp_np); if (np->data)
}
return np->data; return np->data;
}
for (i = 0; i < cbe_regs_map_count; i++) /* walk up path until cpu or be node was found */
if (cbe_regs_maps[i].cpu_node == np) tmp_np = np;
return &cbe_regs_maps[i]; do {
return NULL; tmp_np = tmp_np->parent;
/* on a correct devicetree we wont get up to root */
BUG_ON(!tmp_np);
} while (strcasecmp(tmp_np->type, "cpu") &&
strcasecmp(tmp_np->type, "be"));
np->data = cbe_find_map(tmp_np);
return np->data;
} }
struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
...@@ -130,49 +146,69 @@ struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) ...@@ -130,49 +146,69 @@ struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
} }
EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
/* FIXME
* This is little more than a stub at the moment. It should be
* fleshed out so that it works for both SMT and non-SMT, no
* matter if the passed cpu is odd or even.
* For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
* For SMT disabled, returns 0 for all cpus.
*/
u32 cbe_get_hw_thread_id(int cpu) u32 cbe_get_hw_thread_id(int cpu)
{ {
return (cpu & 1); return cbe_thread_map[cpu].thread_id;
} }
EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
void __init cbe_regs_init(void) u32 cbe_cpu_to_node(int cpu)
{ {
int i; return cbe_thread_map[cpu].cbe_id;
struct device_node *cpu; }
EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
/* Build local fast map of CPUs */ u32 cbe_node_to_cpu(int node)
for_each_possible_cpu(i) {
cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL); return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t));
}
EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
/* Find maps for each device tree CPU */ static struct device_node *cbe_get_be_node(int cpu_id)
for_each_node_by_type(cpu, "cpu") { {
struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++]; struct device_node *np;
for_each_node_by_type (np, "be") {
int len,i;
const phandle *cpu_handle;
cpu_handle = of_get_property(np, "cpus", &len);
for (i=0; i<len; i++)
if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
return np;
}
return NULL;
}
void __init cbe_fill_regs_map(struct cbe_regs_map *map)
{
if(map->be_node) {
struct device_node *be, *np;
be = map->be_node;
for_each_node_by_type(np, "pervasive")
if (of_get_parent(np) == be)
map->pmd_regs = of_iomap(np, 0);
for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
if (of_get_parent(np) == be)
map->iic_regs = of_iomap(np, 2);
for_each_node_by_type(np, "mic-tm")
if (of_get_parent(np) == be)
map->mic_tm_regs = of_iomap(np, 0);
} else {
struct device_node *cpu;
/* That hack must die die die ! */ /* That hack must die die die ! */
const struct address_prop { const struct address_prop {
unsigned long address; unsigned long address;
unsigned int len; unsigned int len;
} __attribute__((packed)) *prop; } __attribute__((packed)) *prop;
cpu = map->cpu_node;
if (cbe_regs_map_count > MAX_CBE) {
printk(KERN_ERR "cbe_regs: More BE chips than supported"
"!\n");
cbe_regs_map_count--;
return;
}
map->cpu_node = cpu;
for_each_possible_cpu(i)
if (cbe_thread_map[i].cpu_node == cpu)
cbe_thread_map[i].regs = map;
prop = of_get_property(cpu, "pervasive", NULL); prop = of_get_property(cpu, "pervasive", NULL);
if (prop != NULL) if (prop != NULL)
...@@ -188,3 +224,50 @@ void __init cbe_regs_init(void) ...@@ -188,3 +224,50 @@ void __init cbe_regs_init(void)
} }
} }
void __init cbe_regs_init(void)
{
int i;
unsigned int thread_id;
struct device_node *cpu;
/* Build local fast map of CPUs */
for_each_possible_cpu(i) {
cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
cbe_thread_map[i].be_node = cbe_get_be_node(i);
cbe_thread_map[i].thread_id = thread_id;
}
/* Find maps for each device tree CPU */
for_each_node_by_type(cpu, "cpu") {
struct cbe_regs_map *map;
unsigned int cbe_id;
cbe_id = cbe_regs_map_count++;
map = &cbe_regs_maps[cbe_id];
if (cbe_regs_map_count > MAX_CBE) {
printk(KERN_ERR "cbe_regs: More BE chips than supported"
"!\n");
cbe_regs_map_count--;
return;
}
map->cpu_node = cpu;
for_each_possible_cpu(i) {
struct cbe_thread_map *thread = &cbe_thread_map[i];
if (thread->cpu_node == cpu) {
thread->regs = map;
thread->cbe_id = cbe_id;
map->be_node = thread->be_node;
cpu_set(i, cbe_local_mask[cbe_id]);
if(thread->thread_id == 0)
cpu_set(i, cbe_first_online_cpu);
}
}
cbe_fill_regs_map(map);
}
}
...@@ -255,6 +255,11 @@ struct cbe_mic_tm_regs { ...@@ -255,6 +255,11 @@ struct cbe_mic_tm_regs {
extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np); extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu); extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
/* some utility functions to deal with SMT */
extern u32 cbe_get_hw_thread_id(int cpu);
extern u32 cbe_cpu_to_node(int cpu);
extern u32 cbe_node_to_cpu(int node);
/* Init this module early */ /* Init this module early */
extern void cbe_regs_init(void); extern void cbe_regs_init(void);
......
/* /*
* thermal support for the cell processor * thermal support for the cell processor
* *
* This module adds some sysfs attributes to cpu and spu nodes.
* Base for measurements are the digital thermal sensors (DTS)
* located on the chip.
* The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius
* The attributes can be found under
* /sys/devices/system/cpu/cpuX/thermal
* /sys/devices/system/spu/spuX/thermal
*
* The following attributes are added for each node:
* temperature:
* contains the current temperature measured by the DTS
* throttle_begin:
* throttling begins when temperature is greater or equal to
* throttle_begin. Setting this value to 125 prevents throttling.
* throttle_end:
* throttling is being ceased, if the temperature is lower than
* throttle_end. Due to a delay between applying throttling and
* a reduced temperature this value should be less than throttle_begin.
* A value equal to throttle_begin provides only a very little hysteresis.
* throttle_full_stop:
* If the temperatrue is greater or equal to throttle_full_stop,
* full throttling is applied to the cpu or spu. This value should be
* greater than throttle_begin and throttle_end. Setting this value to
* 65 prevents the unit from running code at all.
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
* *
* Author: Christian Krafft <krafft@de.ibm.com> * Author: Christian Krafft <krafft@de.ibm.com>
...@@ -31,6 +56,26 @@ ...@@ -31,6 +56,26 @@
#include "cbe_regs.h" #include "cbe_regs.h"
#include "spu_priv1_mmio.h" #include "spu_priv1_mmio.h"
#define TEMP_MIN 65
#define TEMP_MAX 125
#define SYSDEV_PREFIX_ATTR(_prefix,_name,_mode) \
struct sysdev_attribute attr_ ## _prefix ## _ ## _name = { \
.attr = { .name = __stringify(_name), .mode = _mode }, \
.show = _prefix ## _show_ ## _name, \
.store = _prefix ## _store_ ## _name, \
};
static inline u8 reg_to_temp(u8 reg_value)
{
return ((reg_value & 0x3f) << 1) + TEMP_MIN;
}
static inline u8 temp_to_reg(u8 temp)
{
return ((temp - TEMP_MIN) >> 1) & 0x3f;
}
static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev) static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
{ {
struct spu *spu; struct spu *spu;
...@@ -58,20 +103,81 @@ static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iom ...@@ -58,20 +103,81 @@ static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iom
static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf) static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf)
{ {
int value; u8 value;
struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_pmd_regs __iomem *pmd_regs;
pmd_regs = get_pmd_regs(sysdev); pmd_regs = get_pmd_regs(sysdev);
value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1); value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
/* clear all other bits */
return sprintf(buf, "%d\n", reg_to_temp(value));
}
static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos)
{
u64 value;
value = in_be64(&pmd_regs->tm_tpr.val);
/* access the corresponding byte */
value >>= pos;
value &= 0x3F; value &= 0x3F;
/* temp is stored in steps of 2 degrees */
value *= 2;
/* base temp is 65 degrees */
value += 65;
return sprintf(buf, "%d\n", (int) value); return sprintf(buf, "%d\n", reg_to_temp(value));
}
static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
{
u64 reg_value;
int temp;
u64 new_value;
int ret;
ret = sscanf(buf, "%u", &temp);
if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX)
return -EINVAL;
new_value = temp_to_reg(temp);
reg_value = in_be64(&pmd_regs->tm_tpr.val);
/* zero out bits for new value */
reg_value &= ~(0xffull << pos);
/* set bits to new value */
reg_value |= new_value << pos;
out_be64(&pmd_regs->tm_tpr.val, reg_value);
return size;
}
static ssize_t spu_show_throttle_end(struct sys_device *sysdev, char *buf)
{
return show_throttle(get_pmd_regs(sysdev), buf, 0);
}
static ssize_t spu_show_throttle_begin(struct sys_device *sysdev, char *buf)
{
return show_throttle(get_pmd_regs(sysdev), buf, 8);
}
static ssize_t spu_show_throttle_full_stop(struct sys_device *sysdev, char *buf)
{
return show_throttle(get_pmd_regs(sysdev), buf, 16);
}
static ssize_t spu_store_throttle_end(struct sys_device *sysdev, const char *buf, size_t size)
{
return store_throttle(get_pmd_regs(sysdev), buf, size, 0);
}
static ssize_t spu_store_throttle_begin(struct sys_device *sysdev, const char *buf, size_t size)
{
return store_throttle(get_pmd_regs(sysdev), buf, size, 8);
}
static ssize_t spu_store_throttle_full_stop(struct sys_device *sysdev, const char *buf, size_t size)
{
return store_throttle(get_pmd_regs(sysdev), buf, size, 16);
} }
static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos) static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
...@@ -82,16 +188,9 @@ static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos) ...@@ -82,16 +188,9 @@ static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id); pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
value = in_be64(&pmd_regs->ts_ctsr2); value = in_be64(&pmd_regs->ts_ctsr2);
/* access the corresponding byte */ value = (value >> pos) & 0x3f;
value >>= pos;
/* clear all other bits */
value &= 0x3F;
/* temp is stored in steps of 2 degrees */
value *= 2;
/* base temp is 65 degrees */
value += 65;
return sprintf(buf, "%d\n", (int) value); return sprintf(buf, "%d\n", reg_to_temp(value));
} }
...@@ -108,13 +207,52 @@ static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf) ...@@ -108,13 +207,52 @@ static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf)
return ppe_show_temp(sysdev, buf, 0); return ppe_show_temp(sysdev, buf, 0);
} }
static ssize_t ppe_show_throttle_end(struct sys_device *sysdev, char *buf)
{
return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 32);
}
static ssize_t ppe_show_throttle_begin(struct sys_device *sysdev, char *buf)
{
return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 40);
}
static ssize_t ppe_show_throttle_full_stop(struct sys_device *sysdev, char *buf)
{
return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 48);
}
static ssize_t ppe_store_throttle_end(struct sys_device *sysdev, const char *buf, size_t size)
{
return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 32);
}
static ssize_t ppe_store_throttle_begin(struct sys_device *sysdev, const char *buf, size_t size)
{
return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 40);
}
static ssize_t ppe_store_throttle_full_stop(struct sys_device *sysdev, const char *buf, size_t size)
{
return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 48);
}
static struct sysdev_attribute attr_spu_temperature = { static struct sysdev_attribute attr_spu_temperature = {
.attr = {.name = "temperature", .mode = 0400 }, .attr = {.name = "temperature", .mode = 0400 },
.show = spu_show_temp, .show = spu_show_temp,
}; };
static SYSDEV_PREFIX_ATTR(spu, throttle_end, 0600);
static SYSDEV_PREFIX_ATTR(spu, throttle_begin, 0600);
static SYSDEV_PREFIX_ATTR(spu, throttle_full_stop, 0600);
static struct attribute *spu_attributes[] = { static struct attribute *spu_attributes[] = {
&attr_spu_temperature.attr, &attr_spu_temperature.attr,
&attr_spu_throttle_end.attr,
&attr_spu_throttle_begin.attr,
&attr_spu_throttle_full_stop.attr,
NULL, NULL,
}; };
...@@ -133,9 +271,16 @@ static struct sysdev_attribute attr_ppe_temperature1 = { ...@@ -133,9 +271,16 @@ static struct sysdev_attribute attr_ppe_temperature1 = {
.show = ppe_show_temp1, .show = ppe_show_temp1,
}; };
static SYSDEV_PREFIX_ATTR(ppe, throttle_end, 0600);
static SYSDEV_PREFIX_ATTR(ppe, throttle_begin, 0600);
static SYSDEV_PREFIX_ATTR(ppe, throttle_full_stop, 0600);
static struct attribute *ppe_attributes[] = { static struct attribute *ppe_attributes[] = {
&attr_ppe_temperature0.attr, &attr_ppe_temperature0.attr,
&attr_ppe_temperature1.attr, &attr_ppe_temperature1.attr,
&attr_ppe_throttle_end.attr,
&attr_ppe_throttle_begin.attr,
&attr_ppe_throttle_full_stop.attr,
NULL, NULL,
}; };
......
...@@ -3,11 +3,13 @@ ...@@ -3,11 +3,13 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/reboot.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/rtas.h>
#include "ras.h" #include "ras.h"
#include "cbe_regs.h" #include "cbe_regs.h"
...@@ -82,6 +84,164 @@ static int cbe_machine_check_handler(struct pt_regs *regs) ...@@ -82,6 +84,164 @@ static int cbe_machine_check_handler(struct pt_regs *regs)
return 0; return 0;
} }
struct ptcal_area {
struct list_head list;
int nid;
int order;
struct page *pages;
};
static LIST_HEAD(ptcal_list);
static int ptcal_start_tok, ptcal_stop_tok;
static int __init cbe_ptcal_enable_on_node(int nid, int order)
{
struct ptcal_area *area;
int ret = -ENOMEM;
unsigned long addr;
#ifdef CONFIG_CRASH_DUMP
rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
#endif
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
goto out_err;
area->nid = nid;
area->order = order;
area->pages = alloc_pages_node(area->nid, GFP_KERNEL, area->order);
if (!area->pages)
goto out_free_area;
addr = __pa(page_address(area->pages));
ret = -EIO;
if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid,
(unsigned int)(addr >> 32),
(unsigned int)(addr & 0xffffffff))) {
printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n",
__FUNCTION__, nid);
goto out_free_pages;
}
list_add(&area->list, &ptcal_list);
return 0;
out_free_pages:
__free_pages(area->pages, area->order);
out_free_area:
kfree(area);
out_err:
return ret;
}
static int __init cbe_ptcal_enable(void)
{
const u32 *size;
struct device_node *np;
int order, found_mic = 0;
np = of_find_node_by_path("/rtas");
if (!np)
return -ENODEV;
size = get_property(np, "ibm,cbe-ptcal-size", NULL);
if (!size)
return -ENODEV;
pr_debug("%s: enabling PTCAL, size = 0x%x\n", __FUNCTION__, *size);
order = get_order(*size);
of_node_put(np);
/* support for malta device trees, with be@/mic@ nodes */
for_each_node_by_type(np, "mic-tm") {
cbe_ptcal_enable_on_node(of_node_to_nid(np), order);
found_mic = 1;
}
if (found_mic)
return 0;
/* support for older device tree - use cpu nodes */
for_each_node_by_type(np, "cpu") {
const u32 *nid = get_property(np, "node-id", NULL);
if (!nid) {
printk(KERN_ERR "%s: node %s is missing node-id?\n",
__FUNCTION__, np->full_name);
continue;
}
cbe_ptcal_enable_on_node(*nid, order);
found_mic = 1;
}
return found_mic ? 0 : -ENODEV;
}
static int cbe_ptcal_disable(void)
{
struct ptcal_area *area, *tmp;
int ret = 0;
pr_debug("%s: disabling PTCAL\n", __FUNCTION__);
list_for_each_entry_safe(area, tmp, &ptcal_list, list) {
/* disable ptcal on this node */
if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) {
printk(KERN_ERR "%s: error disabling PTCAL "
"on node %d!\n", __FUNCTION__,
area->nid);
ret = -EIO;
continue;
}
/* ensure we can access the PTCAL area */
memset(page_address(area->pages), 0,
1 << (area->order + PAGE_SHIFT));
/* clean up */
list_del(&area->list);
__free_pages(area->pages, area->order);
kfree(area);
}
return ret;
}
static int cbe_ptcal_notify_reboot(struct notifier_block *nb,
unsigned long code, void *data)
{
return cbe_ptcal_disable();
}
static struct notifier_block cbe_ptcal_reboot_notifier = {
.notifier_call = cbe_ptcal_notify_reboot
};
int __init cbe_ptcal_init(void)
{
int ret;
ptcal_start_tok = rtas_token("ibm,cbe-start-ptcal");
ptcal_stop_tok = rtas_token("ibm,cbe-stop-ptcal");
if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE
|| ptcal_stop_tok == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier);
if (ret) {
printk(KERN_ERR "Can't disable PTCAL, so not enabling\n");
return ret;
}
return cbe_ptcal_enable();
}
arch_initcall(cbe_ptcal_init);
void __init cbe_ras_init(void) void __init cbe_ras_init(void)
{ {
unsigned long hid0; unsigned long hid0;
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include <asm/of_platform.h> #include <asm/of_platform.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/pmi.h> #include <asm/pmi.h>
#include <asm/prom.h>
struct pmi_data { struct pmi_data {
struct list_head handler; struct list_head handler;
...@@ -49,21 +49,6 @@ struct pmi_data { ...@@ -49,21 +49,6 @@ struct pmi_data {
}; };
static void __iomem *of_iomap(struct device_node *np)
{
struct resource res;
if (of_address_to_resource(np, 0, &res))
return NULL;
pr_debug("Resource start: 0x%lx\n", res.start);
pr_debug("Resource end: 0x%lx\n", res.end);
return ioremap(res.start, 1 + res.end - res.start);
}
static int pmi_irq_handler(int irq, void *dev_id) static int pmi_irq_handler(int irq, void *dev_id)
{ {
struct pmi_data *data; struct pmi_data *data;
...@@ -118,6 +103,7 @@ out: ...@@ -118,6 +103,7 @@ out:
static struct of_device_id pmi_match[] = { static struct of_device_id pmi_match[] = {
{ .type = "ibm,pmi", .name = "ibm,pmi" }, { .type = "ibm,pmi", .name = "ibm,pmi" },
{ .type = "ibm,pmi" },
{}, {},
}; };
...@@ -153,7 +139,7 @@ static int pmi_of_probe(struct of_device *dev, ...@@ -153,7 +139,7 @@ static int pmi_of_probe(struct of_device *dev,
goto out; goto out;
} }
data->pmi_reg = of_iomap(np); data->pmi_reg = of_iomap(np, 0);
if (!data->pmi_reg) { if (!data->pmi_reg) {
printk(KERN_ERR "pmi: invalid register address.\n"); printk(KERN_ERR "pmi: invalid register address.\n");
rc = -EFAULT; rc = -EFAULT;
...@@ -279,6 +265,9 @@ void pmi_register_handler(struct of_device *device, ...@@ -279,6 +265,9 @@ void pmi_register_handler(struct of_device *device,
struct pmi_data *data; struct pmi_data *data;
data = device->dev.driver_data; data = device->dev.driver_data;
if (!data)
return;
spin_lock(&data->handler_spinlock); spin_lock(&data->handler_spinlock);
list_add_tail(&handler->node, &data->handler); list_add_tail(&handler->node, &data->handler);
spin_unlock(&data->handler_spinlock); spin_unlock(&data->handler_spinlock);
...@@ -289,10 +278,12 @@ void pmi_unregister_handler(struct of_device *device, ...@@ -289,10 +278,12 @@ void pmi_unregister_handler(struct of_device *device,
struct pmi_handler *handler) struct pmi_handler *handler)
{ {
struct pmi_data *data; struct pmi_data *data;
data = device->dev.driver_data;
pr_debug("pmi: unregistering handler %p\n", handler); if (!data)
return;
data = device->dev.driver_data; pr_debug("pmi: unregistering handler %p\n", handler);
spin_lock(&data->handler_spinlock); spin_lock(&data->handler_spinlock);
list_del(&handler->node); list_del(&handler->node);
......
...@@ -97,11 +97,6 @@ extern void cbe_disable_pm_interrupts(u32 cpu); ...@@ -97,11 +97,6 @@ extern void cbe_disable_pm_interrupts(u32 cpu);
extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu); extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
extern void cbe_sync_irq(int node); extern void cbe_sync_irq(int node);
/* Utility functions, macros */
extern u32 cbe_get_hw_thread_id(int cpu);
#define cbe_cpu_to_node(cpu) ((cpu) >> 1)
#define CBE_COUNT_SUPERVISOR_MODE 0 #define CBE_COUNT_SUPERVISOR_MODE 0
#define CBE_COUNT_HYPERVISOR_MODE 1 #define CBE_COUNT_HYPERVISOR_MODE 1
#define CBE_COUNT_PROBLEM_MODE 2 #define CBE_COUNT_PROBLEM_MODE 2
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/io.h>
/* Definitions used by the flattened device tree */ /* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed /* marker */ #define OF_DT_HEADER 0xd00dfeed /* marker */
...@@ -355,6 +356,16 @@ static inline int of_irq_to_resource(struct device_node *dev, int index, struct ...@@ -355,6 +356,16 @@ static inline int of_irq_to_resource(struct device_node *dev, int index, struct
return irq; return irq;
} }
static inline void __iomem *of_iomap(struct device_node *np, int index)
{
struct resource res;
if (of_address_to_resource(np, index, &res))
return NULL;
return ioremap(res.start, 1 + res.end - res.start);
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _POWERPC_PROM_H */ #endif /* _POWERPC_PROM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment