Commit 8d6cea51 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin: (27 commits)
  Blackfin: hook up new rt_tgsigqueueinfo syscall
  Blackfin: improve CLKIN_HZ config default
  Blackfin: initial support for ftrace grapher
  Blackfin: initial support for ftrace
  Blackfin: enable support for LOCKDEP
  Blackfin: add preliminary support for STACKTRACE
  Blackfin: move custom sections into sections.h
  Blackfin: punt unused/wrong mutex-dec.h
  Blackfin: add support for irqflags
  Blackfin: add support for bzip2/lzma compressed kernel images
  Blackfin: convert Kconfig style to def_bool
  Blackfin: bf548-ezkit: update smsc911x resources
  Blackfin: update aedos-ipipe code to upstream 1.10-00
  Blackfin: bf537-stamp: update ADP5520 resources
  Blackfin: bf518f-ezbrd: fix SPI CS for SPI flash
  Blackfin: define SPI IRQ in board resources
  Blackfin: do not configure the UART early if on wrong processor
  Blackfin: fix deadlock in SMP IPI handler
  Blackfin: fix flag storage for irq funcs
  Blackfin: push down exception oops checking
  ...
parents 6a454f71 61cdd7a2
...@@ -6,59 +6,65 @@ ...@@ -6,59 +6,65 @@
mainmenu "Blackfin Kernel Configuration" mainmenu "Blackfin Kernel Configuration"
config MMU config MMU
bool def_bool n
default n
config FPU config FPU
bool def_bool n
default n
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
bool def_bool y
default y
config RWSEM_XCHGADD_ALGORITHM config RWSEM_XCHGADD_ALGORITHM
bool def_bool n
default n
config BLACKFIN config BLACKFIN
bool def_bool y
default y select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_IDE select HAVE_IDE
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
select HAVE_OPROFILE select HAVE_OPROFILE
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
config GENERIC_BUG
def_bool y
depends on BUG
config ZONE_DMA config ZONE_DMA
bool def_bool y
default y
config GENERIC_FIND_NEXT_BIT config GENERIC_FIND_NEXT_BIT
bool def_bool y
default y
config GENERIC_HWEIGHT config GENERIC_HWEIGHT
bool def_bool y
default y
config GENERIC_HARDIRQS config GENERIC_HARDIRQS
bool def_bool y
default y
config GENERIC_IRQ_PROBE config GENERIC_IRQ_PROBE
bool def_bool y
default y
config GENERIC_GPIO config GENERIC_GPIO
bool def_bool y
default y
config FORCE_MAX_ZONEORDER config FORCE_MAX_ZONEORDER
int int
default "14" default "14"
config GENERIC_CALIBRATE_DELAY config GENERIC_CALIBRATE_DELAY
bool def_bool y
default y
config LOCKDEP_SUPPORT
def_bool y
config STACKTRACE_SUPPORT
def_bool y
config TRACE_IRQFLAGS_SUPPORT
def_bool y
source "init/Kconfig" source "init/Kconfig"
...@@ -408,12 +414,12 @@ comment "Clock/PLL Setup" ...@@ -408,12 +414,12 @@ comment "Clock/PLL Setup"
config CLKIN_HZ config CLKIN_HZ
int "Frequency of the crystal on the board in Hz" int "Frequency of the crystal on the board in Hz"
default "10000000" if BFIN532_IP0X
default "11059200" if BFIN533_STAMP default "11059200" if BFIN533_STAMP
default "24576000" if PNAV10
default "25000000" # most people use this
default "27000000" if BFIN533_EZKIT default "27000000" if BFIN533_EZKIT
default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN538_EZKIT || BFIN518F-EZBRD)
default "30000000" if BFIN561_EZKIT default "30000000" if BFIN561_EZKIT
default "24576000" if PNAV10
default "10000000" if BFIN532_IP0X
help help
The frequency of CLKIN crystal oscillator on the board in Hz. The frequency of CLKIN crystal oscillator on the board in Hz.
Warning: This value should match the crystal on the board. Otherwise, Warning: This value should match the crystal on the board. Otherwise,
......
...@@ -137,7 +137,7 @@ archclean: ...@@ -137,7 +137,7 @@ archclean:
INSTALL_PATH ?= /tftpboot INSTALL_PATH ?= /tftpboot
boot := arch/$(ARCH)/boot boot := arch/$(ARCH)/boot
BOOT_TARGETS = vmImage BOOT_TARGETS = vmImage vmImage.bz2 vmImage.gz vmImage.lzma
PHONY += $(BOOT_TARGETS) install PHONY += $(BOOT_TARGETS) install
KBUILD_IMAGE := $(boot)/vmImage KBUILD_IMAGE := $(boot)/vmImage
...@@ -150,7 +150,10 @@ install: ...@@ -150,7 +150,10 @@ install:
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
define archhelp define archhelp
echo '* vmImage - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage)' echo '* vmImage - Alias to selected kernel format (vmImage.gz by default)'
echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)'
echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
echo ' install - Install kernel using' echo ' install - Install kernel using'
echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or' echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or'
echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or' echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or'
......
...@@ -8,24 +8,41 @@ ...@@ -8,24 +8,41 @@
MKIMAGE := $(srctree)/scripts/mkuboot.sh MKIMAGE := $(srctree)/scripts/mkuboot.sh
targets := vmImage targets := vmImage vmImage.bz2 vmImage.gz vmImage.lzma
extra-y += vmlinux.bin vmlinux.gz extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
quiet_cmd_uimage = UIMAGE $@ quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
-C gzip -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \ -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
-d $< $@ -d $< $@
$(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip) $(call if_changed,gzip)
$(obj)/vmImage: $(obj)/vmlinux.gz $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage) $(call if_changed,bzip2)
@$(kecho) 'Kernel: $@ is ready'
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzma)
$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
$(call if_changed,uimage,bzip2)
$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
$(call if_changed,uimage,gzip)
$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
$(call if_changed,uimage,lzma)
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZMA) := lzma
$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
@ln -sf $(notdir $<) $@
install: install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
...@@ -90,7 +90,7 @@ static inline int atomic_test_mask(int mask, atomic_t *v) ...@@ -90,7 +90,7 @@ static inline int atomic_test_mask(int mask, atomic_t *v)
static inline void atomic_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter += i; v->counter += i;
...@@ -99,7 +99,7 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -99,7 +99,7 @@ static inline void atomic_add(int i, atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v)
{ {
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter -= i; v->counter -= i;
...@@ -110,7 +110,7 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -110,7 +110,7 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
int __temp = 0; int __temp = 0;
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter += i; v->counter += i;
...@@ -124,7 +124,7 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -124,7 +124,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
{ {
int __temp = 0; int __temp = 0;
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter -= i; v->counter -= i;
...@@ -136,7 +136,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -136,7 +136,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline void atomic_inc(volatile atomic_t *v) static inline void atomic_inc(volatile atomic_t *v)
{ {
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter++; v->counter++;
...@@ -145,7 +145,7 @@ static inline void atomic_inc(volatile atomic_t *v) ...@@ -145,7 +145,7 @@ static inline void atomic_inc(volatile atomic_t *v)
static inline void atomic_dec(volatile atomic_t *v) static inline void atomic_dec(volatile atomic_t *v)
{ {
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter--; v->counter--;
...@@ -154,7 +154,7 @@ static inline void atomic_dec(volatile atomic_t *v) ...@@ -154,7 +154,7 @@ static inline void atomic_dec(volatile atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter &= ~mask; v->counter &= ~mask;
...@@ -163,7 +163,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) ...@@ -163,7 +163,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
static inline void atomic_set_mask(unsigned int mask, atomic_t *v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
long flags; unsigned long flags;
local_irq_save_hw(flags); local_irq_save_hw(flags);
v->counter |= mask; v->counter |= mask;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm-generic/sections.h> #include <asm/sections.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/user.h> #include <asm/user.h>
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -99,15 +99,6 @@ extern const char bfin_board_name[]; ...@@ -99,15 +99,6 @@ extern const char bfin_board_name[];
extern unsigned long bfin_sic_iwr[]; extern unsigned long bfin_sic_iwr[];
extern unsigned vr_wakeup; extern unsigned vr_wakeup;
extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */ extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */
extern unsigned long _ramstart, _ramend, _rambase;
extern unsigned long memory_start, memory_end, physical_mem_end;
extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
_ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
_stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
_ebss_l2[], _l2_lma_start[];
/* only used when MTD_UCLINUX */
extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
#ifdef CONFIG_BFIN_ICACHE_LOCK #ifdef CONFIG_BFIN_ICACHE_LOCK
extern void cache_grab_lock(int way); extern void cache_grab_lock(int way);
......
...@@ -109,7 +109,8 @@ static inline void clear_bit(int nr, volatile unsigned long *addr) ...@@ -109,7 +109,8 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
static inline void change_bit(int nr, volatile unsigned long *addr) static inline void change_bit(int nr, volatile unsigned long *addr)
{ {
int mask, flags; int mask;
unsigned long flags;
unsigned long *ADDR = (unsigned long *)addr; unsigned long *ADDR = (unsigned long *)addr;
ADDR += nr >> 5; ADDR += nr >> 5;
......
...@@ -2,13 +2,58 @@ ...@@ -2,13 +2,58 @@
#define _BLACKFIN_BUG_H #define _BLACKFIN_BUG_H
#ifdef CONFIG_BUG #ifdef CONFIG_BUG
#define HAVE_ARCH_BUG
#define BUG() do { \ #define BFIN_BUG_OPCODE 0xefcd
dump_bfin_trace_buffer(); \
printk(KERN_EMERG "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ #ifdef CONFIG_DEBUG_BUGVERBOSE
panic("BUG!"); \
} while (0) #define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \
"2: .long 1b\n" \
" .long %1\n" \
" .short %2\n" \
" .short %3\n" \
" .org 2b + %4\n" \
" .previous" \
: \
: "i"(BFIN_BUG_OPCODE), "i"(__FILE__), \
"i"(__LINE__), "i"(flags), \
"i"(sizeof(struct bug_entry)))
#else
#define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \
"2: .long 1b\n" \
" .short %1\n" \
" .org 2b + %2\n" \
" .previous" \
: \
: "i"(BFIN_BUG_OPCODE), "i"(flags), \
"i"(sizeof(struct bug_entry)))
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define BUG() \
do { \
_BUG_OR_WARN(0); \
for (;;); \
} while (0)
#define WARN_ON(condition) \
({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
_BUG_OR_WARN(BUGFLAG_WARNING); \
unlikely(__ret_warn_on); \
})
#define HAVE_ARCH_BUG
#define HAVE_ARCH_WARN_ON
#endif #endif
......
...@@ -34,9 +34,13 @@ ...@@ -34,9 +34,13 @@
#define L1_CACHE_SHIFT_MAX 5 #define L1_CACHE_SHIFT_MAX 5
#if defined(CONFIG_SMP) && \ #if defined(CONFIG_SMP) && \
!defined(CONFIG_BFIN_CACHE_COHERENT) && \ !defined(CONFIG_BFIN_CACHE_COHERENT)
defined(CONFIG_BFIN_DCACHE) # if defined(CONFIG_BFIN_ICACHE)
#define __ARCH_SYNC_CORE_DCACHE # define __ARCH_SYNC_CORE_ICACHE
# endif
# if defined(CONFIG_BFIN_DCACHE)
# define __ARCH_SYNC_CORE_DCACHE
# endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
asmlinkage void __raw_smp_mark_barrier_asm(void); asmlinkage void __raw_smp_mark_barrier_asm(void);
asmlinkage void __raw_smp_check_barrier_asm(void); asmlinkage void __raw_smp_check_barrier_asm(void);
...@@ -51,6 +55,7 @@ static inline void smp_check_barrier(void) ...@@ -51,6 +55,7 @@ static inline void smp_check_barrier(void)
} }
void resync_core_dcache(void); void resync_core_dcache(void);
void resync_core_icache(void);
#endif #endif
#endif #endif
......
...@@ -37,6 +37,7 @@ extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned lo ...@@ -37,6 +37,7 @@ extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned lo
extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dflush_page(void *page); extern void blackfin_dflush_page(void *page);
extern void blackfin_invalidate_entire_dcache(void); extern void blackfin_invalidate_entire_dcache(void);
extern void blackfin_invalidate_entire_icache(void);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
...@@ -97,7 +98,7 @@ do { memcpy(dst, src, len); \ ...@@ -97,7 +98,7 @@ do { memcpy(dst, src, len); \
extern unsigned long reserved_mem_dcache_on; extern unsigned long reserved_mem_dcache_on;
extern unsigned long reserved_mem_icache_on; extern unsigned long reserved_mem_icache_on;
static inline int bfin_addr_dcachable(unsigned long addr) static inline int bfin_addr_dcacheable(unsigned long addr)
{ {
#ifdef CONFIG_BFIN_DCACHE #ifdef CONFIG_BFIN_DCACHE
if (addr < (_ramend - DMA_UNCACHED_REGION)) if (addr < (_ramend - DMA_UNCACHED_REGION))
......
...@@ -34,6 +34,7 @@ struct blackfin_cpudata { ...@@ -34,6 +34,7 @@ struct blackfin_cpudata {
unsigned int dmemctl; unsigned int dmemctl;
unsigned long loops_per_jiffy; unsigned long loops_per_jiffy;
unsigned long dcache_invld_count; unsigned long dcache_invld_count;
unsigned long icache_invld_count;
}; };
DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
......
/* empty */ /*
* Blackfin ftrace code
*
* Copyright 2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#ifndef __ASM_BFIN_FTRACE_H__
#define __ASM_BFIN_FTRACE_H__
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call: LINK + CALL */
#endif
...@@ -35,10 +35,10 @@ ...@@ -35,10 +35,10 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/traps.h> #include <asm/traps.h>
#define IPIPE_ARCH_STRING "1.9-01" #define IPIPE_ARCH_STRING "1.10-00"
#define IPIPE_MAJOR_NUMBER 1 #define IPIPE_MAJOR_NUMBER 1
#define IPIPE_MINOR_NUMBER 9 #define IPIPE_MINOR_NUMBER 10
#define IPIPE_PATCH_NUMBER 1 #define IPIPE_PATCH_NUMBER 0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#error "I-pipe/blackfin: SMP not implemented" #error "I-pipe/blackfin: SMP not implemented"
...@@ -54,10 +54,11 @@ do { \ ...@@ -54,10 +54,11 @@ do { \
#define task_hijacked(p) \ #define task_hijacked(p) \
({ \ ({ \
int __x__ = ipipe_current_domain != ipipe_root_domain; \ int __x__ = __ipipe_root_domain_p; \
/* We would need to clear the SYNC flag for the root domain */ \ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \
/* over the current processor in SMP mode. */ \ if (__x__) \
local_irq_enable_hw(); __x__; \ local_irq_enable_hw(); \
!__x__; \
}) })
struct ipipe_domain; struct ipipe_domain;
...@@ -179,23 +180,24 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul) ...@@ -179,23 +180,24 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
#define __ipipe_run_isr(ipd, irq) \ #define __ipipe_run_isr(ipd, irq) \
do { \ do { \
if (ipd == ipipe_root_domain) { \ if (!__ipipe_pipeline_head_p(ipd)) \
local_irq_enable_hw(); \ local_irq_enable_hw(); \
if (ipipe_virtual_irq_p(irq)) \ if (ipd == ipipe_root_domain) { \
if (unlikely(ipipe_virtual_irq_p(irq))) { \
irq_enter(); \
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
else \ irq_exit(); \
} else \
ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \ ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
local_irq_disable_hw(); \
} else { \ } else { \
__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
local_irq_enable_nohead(ipd); \
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
/* Attempt to exit the outer interrupt level before \ /* Attempt to exit the outer interrupt level before \
* starting the deferred IRQ processing. */ \ * starting the deferred IRQ processing. */ \
local_irq_disable_nohead(ipd); \
__ipipe_run_irqtail(); \ __ipipe_run_irqtail(); \
__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
} \ } \
local_irq_disable_hw(); \
} while (0) } while (0)
#define __ipipe_syscall_watched_p(p, sc) \ #define __ipipe_syscall_watched_p(p, sc) \
......
...@@ -17,270 +17,17 @@ ...@@ -17,270 +17,17 @@
#ifndef _BFIN_IRQ_H_ #ifndef _BFIN_IRQ_H_
#define _BFIN_IRQ_H_ #define _BFIN_IRQ_H_
/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/ #include <linux/irqflags.h>
#include <mach/irq.h>
#include <asm/pda.h>
#include <asm/processor.h>
#ifdef CONFIG_SMP
/* Forward decl needed due to cdef inter dependencies */
static inline uint32_t __pure bfin_dspid(void);
# define blackfin_core_id() (bfin_dspid() & 0xff)
# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
#else
extern unsigned long bfin_irq_flags;
#endif
#ifdef CONFIG_IPIPE
#include <linux/ipipe_trace.h>
void __ipipe_unstall_root(void); /* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
#include <mach/irq.h>
void __ipipe_restore_root(unsigned long flags);
#ifdef CONFIG_DEBUG_HWERR
# define __all_masked_irq_flags 0x3f
# define __save_and_cli_hw(x) \
__asm__ __volatile__( \
"cli %0;" \
"sti %1;" \
: "=&d"(x) \
: "d" (0x3F) \
)
#else
# define __all_masked_irq_flags 0x1f
# define __save_and_cli_hw(x) \
__asm__ __volatile__( \
"cli %0;" \
: "=&d"(x) \
)
#endif
#define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags)
#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x)
#define local_save_flags(x) \
do { \
(x) = __ipipe_test_root() ? \
__all_masked_irq_flags : bfin_irq_flags; \
barrier(); \
} while (0)
#define local_irq_save(x) \
do { \
(x) = __ipipe_test_and_stall_root() ? \
__all_masked_irq_flags : bfin_irq_flags; \
barrier(); \
} while (0)
static inline void local_irq_restore(unsigned long x)
{
barrier();
__ipipe_restore_root(x == __all_masked_irq_flags);
}
#define local_irq_disable() \
do { \
__ipipe_stall_root(); \
barrier(); \
} while (0)
static inline void local_irq_enable(void)
{
barrier();
__ipipe_unstall_root();
}
#define irqs_disabled() __ipipe_test_root()
#define local_save_flags_hw(x) \
__asm__ __volatile__( \
"cli %0;" \
"sti %0;" \
: "=d"(x) \
)
#define irqs_disabled_hw() \
({ \
unsigned long flags; \
local_save_flags_hw(flags); \
!irqs_enabled_from_flags_hw(flags); \
})
static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
{
/* Merge virtual and real interrupt mask bits into a single
32bit word. */
return (real & ~(1 << 31)) | ((virt != 0) << 31);
}
static inline int raw_demangle_irq_bits(unsigned long *x)
{
int virt = (*x & (1 << 31)) != 0;
*x &= ~(1L << 31);
return virt;
}
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
#define local_irq_disable_hw() \
do { \
int _tmp_dummy; \
if (!irqs_disabled_hw()) \
ipipe_trace_begin(0x80000000); \
__asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
} while (0)
#define local_irq_enable_hw() \
do { \
if (irqs_disabled_hw()) \
ipipe_trace_end(0x80000000); \
__asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \
} while (0)
#define local_irq_save_hw(x) \
do { \
__save_and_cli_hw(x); \
if (local_test_iflag_hw(x)) \
ipipe_trace_begin(0x80000001); \
} while (0)
#define local_irq_restore_hw(x) \
do { \
if (local_test_iflag_hw(x)) { \
ipipe_trace_end(0x80000001); \
local_irq_enable_hw_notrace(); \
} \
} while (0)
#define local_irq_disable_hw_notrace() \
do { \
int _tmp_dummy; \
__asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
} while (0)
#define local_irq_enable_hw_notrace() \
__asm__ __volatile__( \
"sti %0;" \
: \
: "d"(bfin_irq_flags) \
)
#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x)
#define local_irq_restore_hw_notrace(x) \
do { \
if (local_test_iflag_hw(x)) \
local_irq_enable_hw_notrace(); \
} while (0)
#else /* CONFIG_IPIPE_TRACE_IRQSOFF */
#define local_irq_enable_hw() \
__asm__ __volatile__( \
"sti %0;" \
: \
: "d"(bfin_irq_flags) \
)
#define local_irq_disable_hw() \
do { \
int _tmp_dummy; \
__asm__ __volatile__ ( \
"cli %0;" \
: "=d" (_tmp_dummy)); \
} while (0)
#define local_irq_restore_hw(x) \
do { \
if (irqs_enabled_from_flags_hw(x)) \
local_irq_enable_hw(); \
} while (0)
#define local_irq_save_hw(x) __save_and_cli_hw(x)
#define local_irq_disable_hw_notrace() local_irq_disable_hw()
#define local_irq_enable_hw_notrace() local_irq_enable_hw()
#define local_irq_save_hw_notrace(x) local_irq_save_hw(x)
#define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x)
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
#else /* !CONFIG_IPIPE */
/*
* Interrupt configuring macros.
*/
#define local_irq_disable() \
do { \
int __tmp_dummy; \
__asm__ __volatile__( \
"cli %0;" \
: "=d" (__tmp_dummy) \
); \
} while (0)
#define local_irq_enable() \
__asm__ __volatile__( \
"sti %0;" \
: \
: "d" (bfin_irq_flags) \
)
#ifdef CONFIG_DEBUG_HWERR
# define __save_and_cli(x) \
__asm__ __volatile__( \
"cli %0;" \
"sti %1;" \
: "=&d" (x) \
: "d" (0x3F) \
)
#else
# define __save_and_cli(x) \
__asm__ __volatile__( \
"cli %0;" \
: "=&d" (x) \
)
#endif
#define local_save_flags(x) \
__asm__ __volatile__( \
"cli %0;" \
"sti %0;" \
: "=d" (x) \
)
#ifdef CONFIG_DEBUG_HWERR
#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
#else
#define irqs_enabled_from_flags(x) ((x) != 0x1f)
#endif
#define local_irq_restore(x) \
do { \
if (irqs_enabled_from_flags(x)) \
local_irq_enable(); \
} while (0)
/* For spinlocks etc */
#define local_irq_save(x) __save_and_cli(x)
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!irqs_enabled_from_flags(flags); \
})
#define local_irq_save_hw(x) local_irq_save(x)
#define local_irq_restore_hw(x) local_irq_restore(x)
#define local_irq_enable_hw() local_irq_enable()
#define local_irq_disable_hw() local_irq_disable()
#define irqs_disabled_hw() irqs_disabled()
#endif /* !CONFIG_IPIPE */ /* Xenomai IPIPE helpers */
#define local_irq_restore_hw(x) local_irq_restore(x)
#define local_irq_save_hw(x) local_irq_save(x)
#define local_irq_enable_hw(x) local_irq_enable(x)
#define local_irq_disable_hw(x) local_irq_disable(x)
#define irqs_disabled_hw(x) irqs_disabled(x)
#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE) #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
# define NOP_PAD_ANOMALY_05000244 "nop; nop;" # define NOP_PAD_ANOMALY_05000244 "nop; nop;"
......
/*
* interface to Blackfin CEC
*
* Copyright 2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#ifndef __ASM_BFIN_IRQFLAGS_H__
#define __ASM_BFIN_IRQFLAGS_H__
#ifdef CONFIG_SMP
# include <asm/pda.h>
# include <asm/processor.h>
/* Forward decl needed due to cdef inter dependencies */
static inline uint32_t __pure bfin_dspid(void);
# define blackfin_core_id() (bfin_dspid() & 0xff)
# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
#else
extern unsigned long bfin_irq_flags;
#endif
static inline void bfin_sti(unsigned long flags)
{
asm volatile("sti %0;" : : "d" (flags));
}
static inline unsigned long bfin_cli(void)
{
unsigned long flags;
asm volatile("cli %0;" : "=d" (flags));
return flags;
}
static inline void raw_local_irq_disable(void)
{
bfin_cli();
}
static inline void raw_local_irq_enable(void)
{
bfin_sti(bfin_irq_flags);
}
#define raw_local_save_flags(flags) do { (flags) = bfin_read_IMASK(); } while (0)
#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0)
static inline void raw_local_irq_restore(unsigned long flags)
{
if (!raw_irqs_disabled_flags(flags))
raw_local_irq_enable();
}
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags = bfin_cli();
#ifdef CONFIG_DEBUG_HWERR
bfin_sti(0x3f);
#endif
return flags;
}
#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0)
#endif
/*
* include/asm-generic/mutex-dec.h
*
* Generic implementation of the mutex fastpath, based on atomic
* decrement/increment.
*/
#ifndef _ASM_GENERIC_MUTEX_DEC_H
#define _ASM_GENERIC_MUTEX_DEC_H
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function MUST leave the value lower than
* 1 even when the "1" assertion wasn't true.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
else
smp_mb();
}
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
* from 1 to a 0 value
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 1
*
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
* or anything the slow path function returns.
*/
static inline int
__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
else {
smp_mb();
return 0;
}
}
/**
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
* @count: pointer of type atomic_t
* @fail_fn: function to call if the original value was not 0
*
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
* In the failure case, this function is allowed to either set the value to
* 1, or to set it to a value lower than 1.
*
* If the implementation sets it to a value of lower than 1, then the
* __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
* to return 0 otherwise.
*/
static inline void
__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
{
smp_mb();
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}
#define __mutex_slowpath_needs_to_unlock() 1
/**
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
*
* @count: pointer of type atomic_t
* @fail_fn: fallback function
*
* Change the count from 1 to a value lower than 1, and return 0 (failure)
* if it wasn't 1 originally, or return 1 (success) otherwise. This function
* MUST leave the value lower than 1 even when the "1" assertion wasn't true.
* Additionally, if the value was < 0 originally, this function must not leave
* it to 0 on failure.
*
* If the architecture has no effective trylock variant, it should call the
* <fail_fn> spinlock-based trylock variant unconditionally.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
/*
* We have two variants here. The cmpxchg based one is the best one
* because it never induce a false contention state. It is included
* here because architectures using the inc/dec algorithms over the
* xchg ones are much more likely to support cmpxchg natively.
*
* If not we fall back to the spinlock based variant - that is
* just as efficient (and simpler) as a 'destructive' probing of
* the mutex state would be.
*/
#ifdef __HAVE_ARCH_CMPXCHG
if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
smp_mb();
return 1;
}
return 0;
#else
return fail_fn(count);
#endif
}
#endif
...@@ -4,4 +4,15 @@ ...@@ -4,4 +4,15 @@
/* nothing to see, move along */ /* nothing to see, move along */
#include <asm-generic/sections.h> #include <asm-generic/sections.h>
/* only used when MTD_UCLINUX */
extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
extern unsigned long _ramstart, _ramend, _rambase;
extern unsigned long memory_start, memory_end, physical_mem_end;
extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
_ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
_stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
_ebss_l2[], _l2_lma_start[];
#endif #endif
...@@ -35,10 +35,10 @@ ...@@ -35,10 +35,10 @@
#define _BLACKFIN_SYSTEM_H #define _BLACKFIN_SYSTEM_H
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/compiler.h> #include <linux/irqflags.h>
#include <mach/anomaly.h> #include <mach/anomaly.h>
#include <asm/cache.h>
#include <asm/pda.h> #include <asm/pda.h>
#include <asm/processor.h>
#include <asm/irq.h> #include <asm/irq.h>
/* /*
......
...@@ -380,8 +380,9 @@ ...@@ -380,8 +380,9 @@
#define __NR_inotify_init1 365 #define __NR_inotify_init1 365
#define __NR_preadv 366 #define __NR_preadv 366
#define __NR_pwritev 367 #define __NR_pwritev 367
#define __NR_rt_tgsigqueueinfo 368
#define __NR_syscall 368 #define __NR_syscall 369
#define NR_syscalls __NR_syscall #define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */ /* Old optional stuff no one actually uses */
......
...@@ -15,6 +15,10 @@ else ...@@ -15,6 +15,10 @@ else
obj-y += time.o obj-y += time.o
endif endif
obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
CFLAGS_REMOVE_ftrace.o = -pg
obj-$(CONFIG_IPIPE) += ipipe.o obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
...@@ -23,6 +27,7 @@ obj-$(CONFIG_MODULES) += module.o ...@@ -23,6 +27,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
# the kgdb test puts code into L2 and without linker # the kgdb test puts code into L2 and without linker
# relaxation, we need to force long calls to/from it # relaxation, we need to force long calls to/from it
......
...@@ -453,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size) ...@@ -453,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size)
unsigned long src = (unsigned long)psrc; unsigned long src = (unsigned long)psrc;
size_t bulk, rest; size_t bulk, rest;
if (bfin_addr_dcachable(src)) if (bfin_addr_dcacheable(src))
blackfin_dcache_flush_range(src, src + size); blackfin_dcache_flush_range(src, src + size);
if (bfin_addr_dcachable(dst)) if (bfin_addr_dcacheable(dst))
blackfin_dcache_invalidate_range(dst, dst + size); blackfin_dcache_invalidate_range(dst, dst + size);
bulk = size & ~0xffff; bulk = size & ~0xffff;
......
...@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm); ...@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
EXPORT_SYMBOL(__raw_smp_check_barrier_asm); EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
#endif #endif
#endif #endif
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
...@@ -151,7 +151,7 @@ static noinline int dcplb_miss(unsigned int cpu) ...@@ -151,7 +151,7 @@ static noinline int dcplb_miss(unsigned int cpu)
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_DCACHE #ifdef CONFIG_BFIN_DCACHE
if (bfin_addr_dcachable(addr)) { if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#ifdef CONFIG_BFIN_WT #ifdef CONFIG_BFIN_WT
d_data |= CPLB_L1_AOW | CPLB_WT; d_data |= CPLB_L1_AOW | CPLB_WT;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/cplbinit.h> #include <asm/cplbinit.h>
#include <asm/cplb.h> #include <asm/cplb.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/traps.h>
/* /*
* WARNING * WARNING
...@@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data, ...@@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data,
#endif #endif
} }
/*
* Given the contents of the status register, return the index of the
* CPLB that caused the fault.
*/
static inline int faulting_cplb_index(int status)
{
int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
return 30 - signbits;
}
/*
* Given the contents of the status register and the DCPLB_DATA contents,
* return true if a write access should be permitted.
*/
static inline int write_permitted(int status, unsigned long data)
{
if (status & FAULT_USERSUPV)
return !!(data & CPLB_SUPV_WR);
else
return !!(data & CPLB_USER_WR);
}
/* Counters to implement round-robin replacement. */ /* Counters to implement round-robin replacement. */
static int icplb_rr_index[NR_CPUS] PDT_ATTR; static int icplb_rr_index[NR_CPUS] PDT_ATTR;
static int dcplb_rr_index[NR_CPUS] PDT_ATTR; static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
...@@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu) ...@@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu)
return CPLB_RELOADED; return CPLB_RELOADED;
} }
MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
{
int status = bfin_read_DCPLB_STATUS();
nr_dcplb_prot[cpu]++;
if (likely(status & FAULT_RW)) {
int idx = faulting_cplb_index(status);
unsigned long regaddr = DCPLB_DATA0 + idx * 4;
unsigned long data = bfin_read32(regaddr);
/* Check if fault is to dirty a clean page */
if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
write_permitted(status, data)) {
dcplb_tbl[cpu][idx].data = data;
bfin_write32(regaddr, data);
return CPLB_RELOADED;
}
}
return CPLB_PROT_VIOL;
}
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
{ {
int cause = seqstat & 0x3f; int cause = seqstat & 0x3f;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
switch (cause) { switch (cause) {
case 0x2C: case VEC_CPLB_I_M:
return icplb_miss(cpu); return icplb_miss(cpu);
case 0x26: case VEC_CPLB_M:
return dcplb_miss(cpu); return dcplb_miss(cpu);
default: default:
if (unlikely(cause == 0x23))
return dcplb_protection_fault(cpu);
return CPLB_UNKNOWN_ERR; return CPLB_UNKNOWN_ERR;
} }
} }
...@@ -202,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void) ...@@ -202,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void)
asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr) asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
{ {
/* This can happen before the uart is initialized, so initialize /* This can happen before the uart is initialized, so initialize
* the UART now * the UART now (but only if we are running on the processor we think
* we are compiled for - otherwise we write to MMRs that don't exist,
* and cause other problems. Nothing comes out the UART, but it does
* end up in the __buf_log.
*/ */
if (likely(early_console == NULL)) if (likely(early_console == NULL) && CPUID == bfin_cpuid())
setup_early_printk(DEFAULT_EARLY_PORT); setup_early_printk(DEFAULT_EARLY_PORT);
printk(KERN_EMERG "Early panic\n");
dump_bfin_mem(fp); dump_bfin_mem(fp);
show_regs(fp); show_regs(fp);
dump_bfin_trace_buffer(); dump_bfin_trace_buffer();
......
/*
* mcount and friends -- ftrace stuff
*
* Copyright (C) 2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <asm/ftrace.h>
.text
/* GCC will have called us before setting up the function prologue, so we
* can clobber the normal scratch registers, but we need to make sure to
* save/restore the registers used for argument passing (R0-R2) in case
* the profiled function is using them. With data registers, R3 is the
* only one we can blow away. With pointer registers, we have P0-P2.
*
* Upon entry, the RETS will point to the top of the current profiled
* function. And since GCC setup the frame for us, the previous function
* will be waiting there. mmmm pie.
*/
ENTRY(__mcount)
/* save third function arg early so we can do testing below */
[--sp] = r2;
/* load the function pointer to the tracer */
p0.l = _ftrace_trace_function;
p0.h = _ftrace_trace_function;
r3 = [p0];
/* optional micro optimization: don't call the stub tracer */
r2.l = _ftrace_stub;
r2.h = _ftrace_stub;
cc = r2 == r3;
if ! cc jump .Ldo_trace;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* if the ftrace_graph_return function pointer is not set to
* the ftrace_stub entry, call prepare_ftrace_return().
*/
p0.l = _ftrace_graph_return;
p0.h = _ftrace_graph_return;
r3 = [p0];
cc = r2 == r3;
if ! cc jump _ftrace_graph_caller;
/* similarly, if the ftrace_graph_entry function pointer is not
* set to the ftrace_graph_entry_stub entry, ...
*/
p0.l = _ftrace_graph_entry;
p0.h = _ftrace_graph_entry;
r2.l = _ftrace_graph_entry_stub;
r2.h = _ftrace_graph_entry_stub;
r3 = [p0];
cc = r2 == r3;
if ! cc jump _ftrace_graph_caller;
#endif
r2 = [sp++];
rts;
.Ldo_trace:
/* save first/second function arg and the return register */
[--sp] = r0;
[--sp] = r1;
[--sp] = rets;
/* setup the tracer function */
p0 = r3;
/* tracer(ulong frompc, ulong selfpc):
* frompc: the pc that did the call to ...
* selfpc: ... this location
* the selfpc itself will need adjusting for the mcount call
*/
r1 = rets;
r0 = [fp + 4];
r1 += -MCOUNT_INSN_SIZE;
/* call the tracer */
call (p0);
/* restore state and get out of dodge */
.Lfinish_trace:
rets = [sp++];
r1 = [sp++];
r0 = [sp++];
r2 = [sp++];
.globl _ftrace_stub
_ftrace_stub:
rts;
ENDPROC(__mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* The prepare_ftrace_return() function is similar to the trace function
* except it takes a pointer to the location of the frompc. This is so
* the prepare_ftrace_return() can hijack it temporarily for probing
* purposes.
*/
ENTRY(_ftrace_graph_caller)
/* save first/second function arg and the return register */
[--sp] = r0;
[--sp] = r1;
[--sp] = rets;
r0 = fp;
r1 = rets;
r0 += 4;
r1 += -MCOUNT_INSN_SIZE;
call _prepare_ftrace_return;
jump .Lfinish_trace;
ENDPROC(_ftrace_graph_caller)
/* Undo the rewrite caused by ftrace_graph_caller(). The common function
* ftrace_return_to_handler() will return the original rets so we can
* restore it and be on our way.
*/
ENTRY(_return_to_handler)
/* make sure original return values are saved */
[--sp] = p0;
[--sp] = r0;
[--sp] = r1;
/* get original return address */
call _ftrace_return_to_handler;
rets = r0;
/* anomaly 05000371 - make sure we have at least three instructions
* between rets setting and the return
*/
r1 = [sp++];
r0 = [sp++];
p0 = [sp++];
rts;
ENDPROC(_return_to_handler)
#endif
/*
* ftrace graph code
*
* Copyright (C) 2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY)
return;
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
return;
}
/* all is well in the world ! hijack RETS ... */
*parent = return_hooker;
}
#endif
...@@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* interrupt. * interrupt.
*/ */
m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
this_domain = ipipe_current_domain; this_domain = __ipipe_current_domain;
if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
head = &this_domain->p_link; head = &this_domain->p_link;
...@@ -212,7 +212,9 @@ void __ipipe_unstall_root_raw(void) ...@@ -212,7 +212,9 @@ void __ipipe_unstall_root_raw(void)
int __ipipe_syscall_root(struct pt_regs *regs) int __ipipe_syscall_root(struct pt_regs *regs)
{ {
struct ipipe_percpu_domain_data *p;
unsigned long flags; unsigned long flags;
int ret;
/* /*
* We need to run the IRQ tail hook whenever we don't * We need to run the IRQ tail hook whenever we don't
...@@ -231,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs) ...@@ -231,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs)
/* /*
* This routine either returns: * This routine either returns:
* 0 -- if the syscall is to be passed to Linux; * 0 -- if the syscall is to be passed to Linux;
* 1 -- if the syscall should not be passed to Linux, and no * >0 -- if the syscall should not be passed to Linux, and no
* tail work should be performed; * tail work should be performed;
* -1 -- if the syscall should not be passed to Linux but the * <0 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc). * tail work has to be performed (for handling signals etc).
*/ */
if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
__ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) { return 0;
if (ipipe_root_domain_p && !in_atomic()) {
/* ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
* Sync pending VIRQs before _TIF_NEED_RESCHED
* is tested. local_irq_save_hw(flags);
*/
local_irq_save_hw(flags); if (!__ipipe_root_domain_p) {
if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0) local_irq_restore_hw(flags);
__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
local_irq_restore_hw(flags);
return -1;
}
return 1; return 1;
} }
return 0; p = ipipe_root_cpudom_ptr();
if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
local_irq_restore_hw(flags);
return -ret;
} }
unsigned long ipipe_critical_enter(void (*syncfn) (void)) unsigned long ipipe_critical_enter(void (*syncfn) (void))
...@@ -329,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void) ...@@ -329,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void)
void ___ipipe_sync_pipeline(unsigned long syncmask) void ___ipipe_sync_pipeline(unsigned long syncmask)
{ {
struct ipipe_domain *ipd = ipipe_current_domain; if (__ipipe_root_domain_p) {
if (ipd == ipipe_root_domain) {
if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
return; return;
} }
......
...@@ -1098,7 +1098,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1098,7 +1098,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
CPUID, bfin_cpuid()); CPUID, bfin_cpuid());
seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
"stepping\t: %d\n", "stepping\t: %d ",
cpu, cclk/1000000, sclk/1000000, cpu, cclk/1000000, sclk/1000000,
#ifdef CONFIG_MPU #ifdef CONFIG_MPU
"mpu on", "mpu on",
...@@ -1107,7 +1107,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1107,7 +1107,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#endif #endif
revid); revid);
seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
seq_printf(m, "(Compiled for Rev none)");
else if (bfin_compiled_revid() == 0xffff)
seq_printf(m, "(Compiled for Rev any)");
else
seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
}
seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
cclk/1000000, cclk%1000000, cclk/1000000, cclk%1000000,
sclk/1000000, sclk%1000000); sclk/1000000, sclk%1000000);
seq_printf(m, "bogomips\t: %lu.%02lu\n" seq_printf(m, "bogomips\t: %lu.%02lu\n"
...@@ -1172,6 +1181,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -1172,6 +1181,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef __ARCH_SYNC_CORE_DCACHE #ifdef __ARCH_SYNC_CORE_DCACHE
seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
#endif #endif
#ifdef __ARCH_SYNC_CORE_ICACHE
seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
#endif
#ifdef CONFIG_BFIN_ICACHE_LOCK #ifdef CONFIG_BFIN_ICACHE_LOCK
switch ((cpudata->imemctl >> 3) & WAYALL_L) { switch ((cpudata->imemctl >> 3) & WAYALL_L) {
case WAY0_L: case WAY0_L:
......
/*
* Blackfin stacktrace code (mostly copied from avr32)
*
* Copyright 2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
register unsigned long current_frame_pointer asm("FP");
struct stackframe {
unsigned long fp;
unsigned long rets;
};
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
unsigned long low, high;
unsigned long fp;
struct stackframe *frame;
int skip = trace->skip;
low = (unsigned long)task_stack_page(current);
high = low + THREAD_SIZE;
fp = current_frame_pointer;
while (fp >= low && fp <= (high - sizeof(*frame))) {
frame = (struct stackframe *)fp;
if (skip) {
skip--;
} else {
trace->entries[trace->nr_entries++] = frame->rets;
if (trace->nr_entries >= trace->max_entries)
break;
}
/*
* The next frame must be at a higher address than the
* current frame.
*/
low = fp + sizeof(*frame);
fp = frame->fp;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace);
This diff is collapsed.
...@@ -54,6 +54,7 @@ SECTIONS ...@@ -54,6 +54,7 @@ SECTIONS
SCHED_TEXT SCHED_TEXT
#endif #endif
LOCK_TEXT LOCK_TEXT
IRQENTRY_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.text.*) *(.text.*)
*(.fixup) *(.fixup)
...@@ -166,6 +167,20 @@ SECTIONS ...@@ -166,6 +167,20 @@ SECTIONS
} }
PERCPU(4) PERCPU(4)
SECURITY_INIT SECURITY_INIT
/* we have to discard exit text and such at runtime, not link time, to
* handle embedded cross-section references (alt instructions, bug
* table, eh_frame, etc...)
*/
.exit.text :
{
EXIT_TEXT
}
.exit.data :
{
EXIT_DATA
}
.init.ramfs : .init.ramfs :
{ {
. = ALIGN(4); . = ALIGN(4);
...@@ -264,8 +279,6 @@ SECTIONS ...@@ -264,8 +279,6 @@ SECTIONS
/DISCARD/ : /DISCARD/ :
{ {
EXIT_TEXT
EXIT_DATA
*(.exitcall.exit) *(.exitcall.exit)
} }
} }
...@@ -116,6 +116,7 @@ __sum16 ip_compute_csum(const void *buff, int len) ...@@ -116,6 +116,7 @@ __sum16 ip_compute_csum(const void *buff, int len)
{ {
return (__force __sum16)~do_csum(buff, len); return (__force __sum16)~do_csum(buff, len);
} }
EXPORT_SYMBOL(ip_compute_csum);
/* /*
* copy from fs while checksumming, otherwise like csum_partial * copy from fs while checksumming, otherwise like csum_partial
...@@ -130,6 +131,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, ...@@ -130,6 +131,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
memcpy(dst, (__force void *)src, len); memcpy(dst, (__force void *)src, len);
return csum_partial(dst, len, sum); return csum_partial(dst, len, sum);
} }
EXPORT_SYMBOL(csum_partial_copy_from_user);
/* /*
* copy from ds while checksumming, otherwise like csum_partial * copy from ds while checksumming, otherwise like csum_partial
......
...@@ -246,7 +246,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { ...@@ -246,7 +246,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.modalias = "m25p80", /* Name of spi_driver for this device */ .modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */ .bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .chip_select = 2, /* On BF518F-EZBRD it's SPI0_SSEL2 */
.platform_data = &bfin_spi_flash_data, .platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info, .controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3, .mode = SPI_MODE_3,
...@@ -369,6 +369,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -369,6 +369,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI0, .start = CH_SPI0,
.end = CH_SPI0, .end = CH_SPI0,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI0,
.end = IRQ_SPI0,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
...@@ -399,6 +404,11 @@ static struct resource bfin_spi1_resource[] = { ...@@ -399,6 +404,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = { [1] = {
.start = CH_SPI1, .start = CH_SPI1,
.end = CH_SPI1, .end = CH_SPI1,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI1,
.end = IRQ_SPI1,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
......
...@@ -664,6 +664,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -664,6 +664,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
......
...@@ -467,6 +467,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -467,6 +467,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
......
...@@ -723,6 +723,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -723,6 +723,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
......
...@@ -266,6 +266,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -266,6 +266,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -162,6 +162,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -162,6 +162,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -160,6 +160,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -160,6 +160,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -196,6 +196,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -196,6 +196,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -299,6 +299,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -299,6 +299,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -182,8 +182,13 @@ static struct resource bfin_spi0_resource[] = { ...@@ -182,8 +182,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} },
}; };
/* SPI controller data */ /* SPI controller data */
......
...@@ -184,6 +184,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -184,6 +184,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
......
...@@ -398,8 +398,13 @@ static struct resource bfin_spi0_resource[] = { ...@@ -398,8 +398,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} },
}; };
/* SPI controller data */ /* SPI controller data */
......
...@@ -1345,7 +1345,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { ...@@ -1345,7 +1345,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
{ {
I2C_BOARD_INFO("pmic-adp5520", 0x32), I2C_BOARD_INFO("pmic-adp5520", 0x32),
.irq = IRQ_PF7, .irq = IRQ_PG0,
.platform_data = (void *)&adp5520_pdev_data, .platform_data = (void *)&adp5520_pdev_data,
}, },
#endif #endif
......
...@@ -182,6 +182,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -182,6 +182,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -352,6 +352,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -352,6 +352,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI0, .start = CH_SPI0,
.end = CH_SPI0, .end = CH_SPI0,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI0,
.end = IRQ_SPI0,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
...@@ -366,6 +371,11 @@ static struct resource bfin_spi1_resource[] = { ...@@ -366,6 +371,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = { [1] = {
.start = CH_SPI1, .start = CH_SPI1,
.end = CH_SPI1, .end = CH_SPI1,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI1,
.end = IRQ_SPI1,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -612,6 +612,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -612,6 +612,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI0, .start = CH_SPI0,
.end = CH_SPI0, .end = CH_SPI0,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI0,
.end = IRQ_SPI0,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
...@@ -626,6 +631,11 @@ static struct resource bfin_spi1_resource[] = { ...@@ -626,6 +631,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = { [1] = {
.start = CH_SPI1, .start = CH_SPI1,
.end = CH_SPI1, .end = CH_SPI1,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI1,
.end = IRQ_SPI1,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -396,6 +396,8 @@ static struct platform_device bfin_sir3_device = { ...@@ -396,6 +396,8 @@ static struct platform_device bfin_sir3_device = {
#endif #endif
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
#include <linux/smsc911x.h>
static struct resource smsc911x_resources[] = { static struct resource smsc911x_resources[] = {
{ {
.name = "smsc911x-memory", .name = "smsc911x-memory",
...@@ -409,11 +411,22 @@ static struct resource smsc911x_resources[] = { ...@@ -409,11 +411,22 @@ static struct resource smsc911x_resources[] = {
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
}, },
}; };
static struct smsc911x_platform_config smsc911x_config = {
.flags = SMSC911X_USE_32BIT,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = { static struct platform_device smsc911x_device = {
.name = "smsc911x", .name = "smsc911x",
.id = 0, .id = 0,
.num_resources = ARRAY_SIZE(smsc911x_resources), .num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources, .resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
}; };
#endif #endif
...@@ -741,6 +754,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -741,6 +754,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI0, .start = CH_SPI0,
.end = CH_SPI0, .end = CH_SPI0,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI0,
.end = IRQ_SPI0,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
...@@ -755,6 +773,11 @@ static struct resource bfin_spi1_resource[] = { ...@@ -755,6 +773,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = { [1] = {
.start = CH_SPI1, .start = CH_SPI1,
.end = CH_SPI1, .end = CH_SPI1,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI1,
.end = IRQ_SPI1,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -177,8 +177,13 @@ static struct resource bfin_spi0_resource[] = { ...@@ -177,8 +177,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} },
}; };
/* SPI controller data */ /* SPI controller data */
......
...@@ -304,6 +304,11 @@ static struct resource bfin_spi0_resource[] = { ...@@ -304,6 +304,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = { [1] = {
.start = CH_SPI, .start = CH_SPI,
.end = CH_SPI, .end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
} }
}; };
......
...@@ -16,9 +16,21 @@ ...@@ -16,9 +16,21 @@
void blackfin_invalidate_entire_dcache(void) void blackfin_invalidate_entire_dcache(void)
{ {
u32 dmem = bfin_read_DMEM_CONTROL(); u32 dmem = bfin_read_DMEM_CONTROL();
SSYNC();
bfin_write_DMEM_CONTROL(dmem & ~0xc); bfin_write_DMEM_CONTROL(dmem & ~0xc);
SSYNC(); SSYNC();
bfin_write_DMEM_CONTROL(dmem); bfin_write_DMEM_CONTROL(dmem);
SSYNC(); SSYNC();
} }
/* Invalidate the Entire Instruction cache by
* clearing IMC bit
*/
void blackfin_invalidate_entire_icache(void)
{
u32 imem = bfin_read_IMEM_CONTROL();
bfin_write_IMEM_CONTROL(imem & ~0x4);
SSYNC();
bfin_write_IMEM_CONTROL(imem);
SSYNC();
}
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <asm/thread_info.h> /* TIF_NEED_RESCHED */ #include <asm/thread_info.h> /* TIF_NEED_RESCHED */
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/trace.h> #include <asm/trace.h>
#include <asm/traps.h>
#include <asm/context.S> #include <asm/context.S>
...@@ -84,13 +85,15 @@ ENTRY(_ex_workaround_261) ...@@ -84,13 +85,15 @@ ENTRY(_ex_workaround_261)
if !cc jump _bfin_return_from_exception; if !cc jump _bfin_return_from_exception;
/* fall through */ /* fall through */
R7 = P4; R7 = P4;
R6 = 0x26; /* Data CPLB Miss */ R6 = VEC_CPLB_M; /* Data CPLB Miss */
cc = R6 == R7; cc = R6 == R7;
if cc jump _ex_dcplb_miss (BP); if cc jump _ex_dcplb_miss (BP);
R6 = 0x23; /* Data CPLB Miss */ #ifdef CONFIG_MPU
R6 = VEC_CPLB_VL; /* Data CPLB Violation */
cc = R6 == R7; cc = R6 == R7;
if cc jump _ex_dcplb_viol (BP); if cc jump _ex_dcplb_viol (BP);
/* Handle 0x23 Data CPLB Protection Violation #endif
/* Handle Data CPLB Protection Violation
* and Data CPLB Multiple Hits - Linux Trap Zero * and Data CPLB Multiple Hits - Linux Trap Zero
*/ */
jump _ex_trap_c; jump _ex_trap_c;
...@@ -270,7 +273,7 @@ ENTRY(_bfin_return_from_exception) ...@@ -270,7 +273,7 @@ ENTRY(_bfin_return_from_exception)
r6.l = lo(SEQSTAT_EXCAUSE); r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE); r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6; r7 = r7 & r6;
r6 = 0x25; r6 = VEC_UNCOV;
CC = R7 == R6; CC = R7 == R6;
if CC JUMP _double_fault; if CC JUMP _double_fault;
#endif #endif
...@@ -1605,6 +1608,7 @@ ENTRY(_sys_call_table) ...@@ -1605,6 +1608,7 @@ ENTRY(_sys_call_table)
.long _sys_inotify_init1 /* 365 */ .long _sys_inotify_init1 /* 365 */
.long _sys_preadv .long _sys_preadv
.long _sys_pwritev .long _sys_pwritev
.long _sys_rt_tgsigqueueinfo
.rept NR_syscalls-(.-_sys_call_table)/4 .rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall .long _sys_ni_syscall
......
...@@ -144,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) ...@@ -144,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
static irqreturn_t ipi_handler(int irq, void *dev_instance) static irqreturn_t ipi_handler(int irq, void *dev_instance)
{ {
struct ipi_message *msg, *mg; struct ipi_message *msg;
struct ipi_message_queue *msg_queue; struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -154,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance) ...@@ -154,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
msg_queue->count++; msg_queue->count++;
spin_lock(&msg_queue->lock); spin_lock(&msg_queue->lock);
list_for_each_entry_safe(msg, mg, &msg_queue->head, list) { while (!list_empty(&msg_queue->head)) {
msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list); list_del(&msg->list);
switch (msg->type) { switch (msg->type) {
case BFIN_IPI_RESCHEDULE: case BFIN_IPI_RESCHEDULE:
...@@ -221,7 +222,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) ...@@ -221,7 +222,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
for_each_cpu_mask(cpu, callmap) { for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu); msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags); spin_lock_irqsave(&msg_queue->lock, flags);
list_add(&msg->list, &msg_queue->head); list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags); spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu); platform_send_ipi_cpu(cpu);
} }
...@@ -261,7 +262,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, ...@@ -261,7 +262,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
msg_queue = &per_cpu(ipi_msg_queue, cpu); msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags); spin_lock_irqsave(&msg_queue->lock, flags);
list_add(&msg->list, &msg_queue->head); list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags); spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu); platform_send_ipi_cpu(cpu);
...@@ -292,7 +293,7 @@ void smp_send_reschedule(int cpu) ...@@ -292,7 +293,7 @@ void smp_send_reschedule(int cpu)
msg_queue = &per_cpu(ipi_msg_queue, cpu); msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags); spin_lock_irqsave(&msg_queue->lock, flags);
list_add(&msg->list, &msg_queue->head); list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags); spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu); platform_send_ipi_cpu(cpu);
...@@ -320,7 +321,7 @@ void smp_send_stop(void) ...@@ -320,7 +321,7 @@ void smp_send_stop(void)
for_each_cpu_mask(cpu, callmap) { for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu); msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags); spin_lock_irqsave(&msg_queue->lock, flags);
list_add(&msg->list, &msg_queue->head); list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags); spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu); platform_send_ipi_cpu(cpu);
} }
...@@ -468,6 +469,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end) ...@@ -468,6 +469,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
} }
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
#ifdef __ARCH_SYNC_CORE_ICACHE
void resync_core_icache(void)
{
unsigned int cpu = get_cpu();
blackfin_invalidate_entire_icache();
++per_cpu(cpu_data, cpu).icache_invld_count;
put_cpu();
}
EXPORT_SYMBOL(resync_core_icache);
#endif
#ifdef __ARCH_SYNC_CORE_DCACHE #ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
......
...@@ -803,7 +803,7 @@ static void bfin_spi_pump_transfers(unsigned long data) ...@@ -803,7 +803,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
drv_data->rx, drv_data->len_in_bytes); drv_data->rx, drv_data->len_in_bytes);
/* invalidate caches, if needed */ /* invalidate caches, if needed */
if (bfin_addr_dcachable((unsigned long) drv_data->rx)) if (bfin_addr_dcacheable((unsigned long) drv_data->rx))
invalidate_dcache_range((unsigned long) drv_data->rx, invalidate_dcache_range((unsigned long) drv_data->rx,
(unsigned long) (drv_data->rx + (unsigned long) (drv_data->rx +
drv_data->len_in_bytes)); drv_data->len_in_bytes));
...@@ -816,7 +816,7 @@ static void bfin_spi_pump_transfers(unsigned long data) ...@@ -816,7 +816,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
/* flush caches, if needed */ /* flush caches, if needed */
if (bfin_addr_dcachable((unsigned long) drv_data->tx)) if (bfin_addr_dcacheable((unsigned long) drv_data->tx))
flush_dcache_range((unsigned long) drv_data->tx, flush_dcache_range((unsigned long) drv_data->tx,
(unsigned long) (drv_data->tx + (unsigned long) (drv_data->tx +
drv_data->len_in_bytes)); drv_data->len_in_bytes));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment