Commit 2656c076 authored by Linus Torvalds's avatar Linus Torvalds
parents 811803c5 f365cfd0
......@@ -598,19 +598,6 @@ config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not
# reside on that node.
#
# This is a relatively temporary hack that should
# be able to go away when sparsemem is fully in
# place
config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
config PPC_64K_PAGES
bool "64k page size"
depends on PPC64
......
......@@ -33,6 +33,8 @@ endif
export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
KBUILD_DEFCONFIG := $(shell uname -m)_defconfig
ifeq ($(CONFIG_PPC64),y)
OLDARCH := ppc64
SZ := 64
......@@ -111,9 +113,6 @@ cpu-as-$(CONFIG_E200) += -Wa,-me200
AFLAGS += $(cpu-as-y)
CFLAGS += $(cpu-as-y)
# Default to the common case.
KBUILD_DEFCONFIG := common_defconfig
head-y := arch/powerpc/kernel/head_32.o
head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
......@@ -125,11 +124,11 @@ head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
core-y += arch/powerpc/kernel/ \
arch/$(OLDARCH)/kernel/ \
arch/powerpc/mm/ \
arch/powerpc/lib/ \
arch/powerpc/sysdev/ \
arch/powerpc/platforms/
core-$(CONFIG_PPC32) += arch/ppc/kernel/
core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_APUS) += arch/ppc/amiga/
......@@ -165,7 +164,7 @@ define archhelp
@echo ' (your) ~/bin/installkernel or'
@echo ' (distribution) /sbin/installkernel or'
@echo ' install to $$(INSTALL_PATH) and run lilo'
@echo ' *_defconfig - Select default config from arch/$(ARCH)/ppc/configs'
@echo ' *_defconfig - Select default config from arch/$(ARCH)/configs'
endef
archclean:
......
......@@ -14,43 +14,42 @@
.text
.globl _zimage_start
_zimage_start:
bl reloc_offset
bl 1f
reloc_offset:
1:
mflr r0
lis r9,reloc_offset@ha
addi r9,r9,reloc_offset@l
lis r9,1b@ha
addi r9,r9,1b@l
subf. r0,r9,r0
beq clear_caches
beq 3f
reloc_got2:
lis r9,__got2_start@ha
addi r9,r9,__got2_start@l
lis r8,__got2_end@ha
addi r8,r8,__got2_end@l
subf. r8,r9,r8
beq clear_caches
beq 3f
srwi. r8,r8,2
mtctr r8
add r9,r0,r9
reloc_got2_loop:
2:
lwz r8,0(r9)
add r8,r8,r0
stw r8,0(r9)
addi r9,r9,4
bdnz reloc_got2_loop
bdnz 2b
clear_caches:
3:
lis r9,_start@h
add r9,r0,r9
lis r8,_etext@ha
addi r8,r8,_etext@l
add r8,r0,r8
1: dcbf r0,r9
4: dcbf r0,r9
icbi r0,r9
addi r9,r9,0x20
cmplwi 0,r9,8
blt 1b
blt 4b
sync
isync
......
......@@ -165,7 +165,6 @@ CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set
CONFIG_SPLIT_PTLOCK_CPUS=4096
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NODES_SPAN_OTHER_NODES=y
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y
......
......@@ -12,12 +12,12 @@ CFLAGS_btext.o += -fPIC
endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
irq.o signal_32.o pmc.o vdso.o
irq.o align.o signal_32.o pmc.o vdso.o
obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o systbl.o \
paca.o ioctl32.o cpu_setup_power4.o \
firmware.o sysfs.o udbg.o
firmware.o sysfs.o udbg.o idle_64.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_POWER4) += idle_power4.o
......@@ -35,6 +35,7 @@ obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o
obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o
udbgscc-$(CONFIG_PPC64) := udbg_scc.o
obj-$(CONFIG_PPC_PMAC) += $(udbgscc-y)
obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
ifeq ($(CONFIG_PPC_MERGE),y)
......@@ -78,5 +79,7 @@ smpobj-$(CONFIG_SMP) += smp.o
endif
obj-$(CONFIG_PPC64) += $(obj64-y)
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-$(CONFIG_PPC64) += entry_64.o
......@@ -27,14 +27,6 @@
.text
.align 5
_GLOBAL(__delay)
cmpwi 0,r3,0
mtctr r3
beqlr
1: bdnz 1b
blr
/*
* This returns the high 64 bits of the product of two 64-bit numbers.
*/
......
......@@ -15,7 +15,7 @@ unsigned long __init rtas_get_boot_time(void)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
......@@ -45,7 +45,7 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
......@@ -80,7 +80,7 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
int rtas_set_rtc_time(struct rtc_time *tm)
{
int error, wait_time;
unsigned long max_wait_tb;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
......
......@@ -130,6 +130,34 @@ unsigned long tb_last_stamp;
*/
DEFINE_PER_CPU(unsigned long, last_jiffy);
void __delay(unsigned long loops)
{
unsigned long start;
int diff;
if (__USE_RTC()) {
start = get_rtcl();
do {
/* the RTCL register wraps at 1000000000 */
diff = get_rtcl() - start;
if (diff < 0)
diff += 1000000000;
} while (diff < loops);
} else {
start = get_tbl();
while (get_tbl() - start < loops)
HMT_low();
HMT_medium();
}
}
EXPORT_SYMBOL(__delay);
void udelay(unsigned long usecs)
{
__delay(tb_ticks_per_usec * usecs);
}
EXPORT_SYMBOL(udelay);
static __inline__ void timer_check_rtc(void)
{
/*
......
......@@ -257,6 +257,13 @@ void __init chrp_setup_arch(void)
if (rtas_token("display-character") >= 0)
ppc_md.progress = rtas_progress;
/* use RTAS time-of-day routines if available */
if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) {
ppc_md.get_boot_time = rtas_get_boot_time;
ppc_md.get_rtc_time = rtas_get_rtc_time;
ppc_md.set_rtc_time = rtas_set_rtc_time;
}
#ifdef CONFIG_BOOTX_TEXT
if (ppc_md.progress == NULL && boot_text_mapped)
ppc_md.progress = btext_progress;
......@@ -505,9 +512,11 @@ void __init chrp_init(void)
ppc_md.halt = rtas_halt;
ppc_md.time_init = chrp_time_init;
ppc_md.calibrate_decr = chrp_calibrate_decr;
/* this may get overridden with rtas routines later... */
ppc_md.set_rtc_time = chrp_set_rtc_time;
ppc_md.get_rtc_time = chrp_get_rtc_time;
ppc_md.calibrate_decr = chrp_calibrate_decr;
#ifdef CONFIG_SMP
smp_ops = &chrp_smp_ops;
......
......@@ -34,6 +34,7 @@
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
static void __devinit smp_chrp_kick_cpu(int nr)
{
......
......@@ -87,7 +87,6 @@ int chrp_set_rtc_time(struct rtc_time *tmarg)
chrp_cmos_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
tm.tm_year -= 1900;
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
BIN_TO_BCD(tm.tm_sec);
BIN_TO_BCD(tm.tm_min);
......@@ -156,7 +155,7 @@ void chrp_get_rtc_time(struct rtc_time *tm)
BCD_TO_BIN(mon);
BCD_TO_BIN(year);
}
if ((year += 1900) < 1970)
if (year < 70)
year += 100;
tm->tm_sec = sec;
tm->tm_min = min;
......
......@@ -158,6 +158,11 @@ int maple_set_rtc_time(struct rtc_time *tm)
return 0;
}
static struct resource rtc_iores = {
.name = "rtc",
.flags = IORESOURCE_BUSY,
};
unsigned long __init maple_get_boot_time(void)
{
struct rtc_time tm;
......@@ -172,7 +177,11 @@ unsigned long __init maple_get_boot_time(void)
printk(KERN_INFO "Maple: No device node for RTC, assuming "
"legacy address (0x%x)\n", maple_rtc_addr);
}
rtc_iores.start = maple_rtc_addr;
rtc_iores.end = maple_rtc_addr + 7;
request_resource(&ioport_resource, &rtc_iores);
maple_get_rtc_time(&tm);
return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
......
......@@ -86,7 +86,8 @@ static int ibm_read_slot_reset_state;
static int ibm_read_slot_reset_state2;
static int ibm_slot_error_detail;
static int eeh_subsystem_enabled;
int eeh_subsystem_enabled;
EXPORT_SYMBOL(eeh_subsystem_enabled);
/* Lock to avoid races due to multiple reports of an error */
static DEFINE_SPINLOCK(confirm_error_lock);
......
......@@ -504,7 +504,7 @@ static void pseries_dedicated_idle(void)
lpaca->lppaca.idle = 1;
if (!need_resched()) {
start_snooze = __get_tb() +
start_snooze = get_tb() +
*smt_snooze_delay * tb_ticks_per_usec;
while (!need_resched() && !cpu_is_offline(cpu)) {
......@@ -518,7 +518,7 @@ static void pseries_dedicated_idle(void)
HMT_very_low();
if (*smt_snooze_delay != 0 &&
__get_tb() > start_snooze) {
get_tb() > start_snooze) {
HMT_medium();
dedicated_idle_sleep(cpu);
}
......
......@@ -13,7 +13,7 @@ extra-$(CONFIG_POWER4) += idle_power4.o
extra-y += vmlinux.lds
obj-y := entry.o traps.o idle.o time.o misc.o \
process.o align.o \
process.o \
setup.o \
ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
......@@ -38,7 +38,7 @@ endif
# These are here while we do the architecture merge
else
obj-y := idle.o align.o
obj-y := idle.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
obj-$(CONFIG_MODULES) += module.o
......
This diff is collapsed.
......@@ -45,7 +45,6 @@ static void update_bridge_base(struct pci_bus *bus, int i);
static void pcibios_fixup_resources(struct pci_dev* dev);
static void fixup_broken_pcnet32(struct pci_dev* dev);
static int reparent_resources(struct resource *parent, struct resource *res);
static void fixup_rev1_53c810(struct pci_dev* dev);
static void fixup_cpc710_pci64(struct pci_dev* dev);
#ifdef CONFIG_PPC_OF
static u8* pci_to_OF_bus_map;
......
......@@ -69,9 +69,33 @@ struct ppc_sys_spec ppc_sys_specs[] = {
},
},
{
.ppc_sys_name = "8343E",
.ppc_sys_name = "8347E",
.mask = 0xFFFF0000,
.value = 0x80540000,
.num_devices = 9,
.device_list = (enum ppc_sys_devices[])
{
MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
MPC83xx_IIC2, MPC83xx_DUART, MPC83xx_SEC2,
MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
},
},
{
.ppc_sys_name = "8347",
.mask = 0xFFFF0000,
.value = 0x80550000,
.num_devices = 8,
.device_list = (enum ppc_sys_devices[])
{
MPC83xx_TSEC1, MPC83xx_TSEC2, MPC83xx_IIC1,
MPC83xx_IIC2, MPC83xx_DUART,
MPC83xx_USB2_DR, MPC83xx_USB2_MPH, MPC83xx_MDIO
},
},
{
.ppc_sys_name = "8343E",
.mask = 0xFFFF0000,
.value = 0x80560000,
.num_devices = 8,
.device_list = (enum ppc_sys_devices[])
{
......@@ -83,7 +107,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
{
.ppc_sys_name = "8343",
.mask = 0xFFFF0000,
.value = 0x80550000,
.value = 0x80570000,
.num_devices = 7,
.device_list = (enum ppc_sys_devices[])
{
......
menu "Kernel hacking"
source "lib/Kconfig.debug"
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
help
This option will cause messages to be printed if free stack space
drops below a certain limit.
config KPROBES
bool "Kprobes"
depends on DEBUG_KERNEL
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
a probepoint and specifies the callback. Kprobes is useful
for kernel debugging, non-intrusive instrumentation and testing.
If in doubt, say "N".
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
depends on DEBUG_KERNEL
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
This option will slow down process creation somewhat.
config DEBUGGER
bool "Enable debugger hooks"
depends on DEBUG_KERNEL
help
Include in-kernel hooks for kernel debuggers. Unless you are
intending to debug the kernel, say N here.
config XMON
bool "Include xmon kernel debugger"
depends on DEBUGGER && !PPC_ISERIES
help
Include in-kernel hooks for the xmon kernel monitor/debugger.
Unless you are intending to debug the kernel, say N here.
Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise
nothing will appear on the screen (xmon writes directly to the
framebuffer memory).
The cmdline option 'xmon' or 'xmon=early' will drop into xmon very
early during boot. 'xmon=on' will just enable the xmon debugger hooks.
'xmon=off' will disable the debugger hooks if CONFIG_XMON_DEFAULT is set.
config XMON_DEFAULT
bool "Enable xmon by default"
depends on XMON
help
xmon is normally disabled unless booted with 'xmon=on'.
Use 'xmon=off' to disable xmon init during runtime.
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
overflowing the process kernel stacks.
endmenu
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies. Remember to do have actions
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1994 by Linus Torvalds
# Changes for PPC by Gary Thomas
# Rewritten by Cort Dougan and Paul Mackerras
# Adjusted for PPC64 by Tom Gall
#
KERNELLOAD := 0xc000000000000000
# Set default 32 bits cross compilers for vdso and boot wrapper
CROSS32_COMPILE ?=
CROSS32CC := $(CROSS32_COMPILE)gcc
CROSS32AS := $(CROSS32_COMPILE)as
CROSS32LD := $(CROSS32_COMPILE)ld
CROSS32OBJCOPY := $(CROSS32_COMPILE)objcopy
# If we have a biarch compiler, use it for 32 bits cross compile if
# CROSS32_COMPILE wasn't explicitely defined, and add proper explicit
# target type to target compilers
HAS_BIARCH := $(call cc-option-yn, -m64)
ifeq ($(HAS_BIARCH),y)
ifeq ($(CROSS32_COMPILE),)
CROSS32CC := $(CC) -m32
CROSS32AS := $(AS) -a32
CROSS32LD := $(LD) -m elf32ppc
CROSS32OBJCOPY := $(OBJCOPY)
endif
override AS += -a64
override LD += -m elf64ppc
override CC += -m64
endif
export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
ifeq ($(new_nm),y)
NM := $(NM) --synthetic
endif
CHECKFLAGS += -m64 -D__powerpc__ -D__powerpc64__
LDFLAGS := -m elf64ppc
LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \
-mcall-aixdesc
# Temporary hack until we have migrated to asm-powerpc
CPPFLAGS += -Iarch/$(ARCH)/include
GCC_VERSION := $(call cc-version)
GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
ifeq ($(CONFIG_POWER4_ONLY),y)
ifeq ($(CONFIG_ALTIVEC),y)
ifeq ($(GCC_BROKEN_VEC),y)
CFLAGS += $(call cc-option,-mcpu=970)
else
CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
CFLAGS += $(call cc-option,-mtune=power4)
endif
# No AltiVec instruction when building kernel
CFLAGS += $(call cc-option, -mno-altivec)
# Enable unit-at-a-time mode when possible. It shrinks the
# kernel considerably.
CFLAGS += $(call cc-option,-funit-at-a-time)
head-y := arch/ppc64/kernel/head.o
head-y += arch/powerpc/kernel/fpu.o
head-y += arch/powerpc/kernel/entry_64.o
core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
core-y += arch/powerpc/mm/
core-y += arch/powerpc/sysdev/
core-y += arch/powerpc/platforms/
core-y += arch/powerpc/lib/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
boot := arch/ppc64/boot
boottargets-$(CONFIG_PPC_PSERIES) += zImage zImage.initrd
boottargets-$(CONFIG_PPC_PMAC) += zImage.vmode zImage.initrd.vmode
boottargets-$(CONFIG_PPC_MAPLE) += zImage zImage.initrd
boottargets-$(CONFIG_PPC_ISERIES) += vmlinux.sminitrd vmlinux.initrd vmlinux.sm
boottargets-$(CONFIG_PPC_BPA) += zImage zImage.initrd
$(boottargets-y): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage
bootimage-$(CONFIG_PPC_PMAC) := vmlinux
bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage
bootimage-$(CONFIG_PPC_BPA) := $(boot)/zImage
bootimage-$(CONFIG_PPC_ISERIES) := vmlinux
BOOTIMAGE := $(bootimage-y)
install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
defaultimage-$(CONFIG_PPC_PSERIES) := zImage
defaultimage-$(CONFIG_PPC_PMAC) := zImage.vmode
defaultimage-$(CONFIG_PPC_MAPLE) := zImage
defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
KBUILD_IMAGE := $(defaultimage-y)
all: $(KBUILD_IMAGE)
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
# Temporary hack until we have migrated to asm-powerpc
$(Q)rm -rf arch/$(ARCH)/include
# Temporary hack until we have migrated to asm-powerpc
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm:
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
define archhelp
echo ' zImage.vmode - Compressed kernel image (arch/$(ARCH)/boot/zImage.vmode)'
echo ' zImage.initrd.vmode - Compressed kernel image with initrd attached,'
echo ' sourced from arch/$(ARCH)/boot/ramdisk.image.gz'
echo ' (arch/$(ARCH)/boot/zImage.initrd.vmode)'
echo ' zImage - zImage for pSeries machines'
echo ' zImage.initrd - zImage with initrd for pSeries machines'
endef
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#
# Makefile for the linux ppc64 kernel.
#
obj-y += idle.o align.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
......@@ -26,6 +26,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/prom.h>
......@@ -325,8 +326,8 @@ static void __init offb_init_nodriver(struct device_node *dp)
int *pp, i;
unsigned int len;
int width = 640, height = 480, depth = 8, pitch;
unsigned *up;
unsigned long address;
unsigned int rsize, *up;
unsigned long address = 0;
if ((pp = (int *) get_property(dp, "depth", &len)) != NULL
&& len == sizeof(int))
......@@ -344,10 +345,40 @@ static void __init offb_init_nodriver(struct device_node *dp)
pitch = 0x1000;
} else
pitch = width;
if ((up = (unsigned *) get_property(dp, "address", &len)) != NULL
&& len == sizeof(unsigned))
rsize = (unsigned long)pitch * (unsigned long)height *
(unsigned long)(depth / 8);
/* Try to match device to a PCI device in order to get a properly
* translated address rather then trying to decode the open firmware
* stuff in various incorrect ways
*/
#ifdef CONFIG_PCI
/* First try to locate the PCI device if any */
{
struct pci_dev *pdev = NULL;
for_each_pci_dev(pdev) {
if (dp == pci_device_to_OF_node(pdev))
break;
}
if (pdev) {
for (i = 0; i < 6 && address == 0; i++) {
if ((pci_resource_flags(pdev, i) &
IORESOURCE_MEM) &&
(pci_resource_len(pdev, i) >= rsize))
address = pci_resource_start(pdev, i);
}
pci_dev_put(pdev);
}
}
#endif /* CONFIG_PCI */
if (address == 0 &&
(up = (unsigned *) get_property(dp, "address", &len)) != NULL &&
len == sizeof(unsigned))
address = (u_long) * up;
else {
if (address == 0) {
for (i = 0; i < dp->n_addrs; ++i)
if (dp->addrs[i].size >=
pitch * height * depth / 8)
......
......@@ -90,6 +90,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#ifdef __powerpc64__
/* Add the 64b processor unique features in the top half of the word */
......@@ -97,7 +98,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000)
#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
#define CPU_FTR_CTRL ASM_CONST(0x0000008000000000)
......@@ -113,7 +113,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
#define CPU_FTR_TLBIEL ASM_CONST(0x0)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0)
#define CPU_FTR_IABR ASM_CONST(0x0)
#define CPU_FTR_MMCRA ASM_CONST(0x0)
#define CPU_FTR_CTRL ASM_CONST(0x0)
......@@ -273,18 +272,21 @@ enum {
CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_NODSISRALIGN,
CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
CPU_FTR_MAYBE_CAN_NAP,
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN,
CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
CPU_FTRS_E200 = CPU_FTR_USE_TB,
CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_NODSISRALIGN,
CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_NODSISRALIGN,
CPU_FTRS_E200 = CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN,
CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_NODSISRALIGN,
CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_BIG_PHYS,
CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN,
CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN,
#ifdef __powerpc64__
CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
......
......@@ -13,43 +13,7 @@
* Anton Blanchard.
*/
extern unsigned long tb_ticks_per_usec;
#ifdef CONFIG_PPC64
/* define these here to prevent circular dependencies */
/* these instructions control the thread priority on multi-threaded cpus */
#define __HMT_low() asm volatile("or 1,1,1")
#define __HMT_medium() asm volatile("or 2,2,2")
#else
#define __HMT_low()
#define __HMT_medium()
#endif
#define __barrier() asm volatile("" ::: "memory")
static inline unsigned long __get_tb(void)
{
unsigned long rval;
asm volatile("mftb %0" : "=r" (rval));
return rval;
}
static inline void __delay(unsigned long loops)
{
unsigned long start = __get_tb();
while((__get_tb() - start) < loops)
__HMT_low();
__HMT_medium();
__barrier();
}
static inline void udelay(unsigned long usecs)
{
unsigned long loops = tb_ticks_per_usec * usecs;
__delay(loops);
}
extern void __delay(unsigned long loops);
extern void udelay(unsigned long usecs);
#endif /* _ASM_POWERPC_DELAY_H */
......@@ -30,6 +30,8 @@ struct device_node;
#ifdef CONFIG_EEH
extern int eeh_subsystem_enabled;
/* Values for eeh_mode bits in device_node */
#define EEH_MODE_SUPPORTED (1<<0)
#define EEH_MODE_NOCHECK (1<<1)
......@@ -75,7 +77,7 @@ void eeh_remove_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0)
#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
/*
* Reads from a device which has been isolated by EEH will return
......
......@@ -41,6 +41,10 @@ static inline int node_to_first_cpu(int node)
.cache_hot_time = (10*1000000), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 3, \
.idle_idx = 1, \
.newidle_idx = 2, \
.wake_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_NEWIDLE \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment