Commit 854de764 authored by Russell King's avatar Russell King Committed by Catalin Marinas

[ARM] Handle HWCAP_VFP in VFP support code

Don't set HWCAP_VFP in the processor support file; not only does it
depend on the processor features, but it also depends on the support
code being present.  Therefore, only set it if the support code
detects that we have a VFP coprocessor attached.

Also, move the VFP handling of the coprocessor access register into
the VFP support code.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent e3b8b8e1
...@@ -354,12 +354,6 @@ static void __init setup_processor(void) ...@@ -354,12 +354,6 @@ static void __init setup_processor(void)
#ifndef CONFIG_ARM_THUMB #ifndef CONFIG_ARM_THUMB
elf_hwcap &= ~HWCAP_THUMB; elf_hwcap &= ~HWCAP_THUMB;
#endif #endif
#ifndef CONFIG_VFP
elf_hwcap &= ~HWCAP_VFP;
#endif
#ifndef CONFIG_IWMMXT
elf_hwcap &= ~HWCAP_IWMMXT;
#endif
cpu_proc_init(); cpu_proc_init();
} }
......
...@@ -480,7 +480,7 @@ __arm926_proc_info: ...@@ -480,7 +480,7 @@ __arm926_proc_info:
b __arm926_setup b __arm926_setup
.long cpu_arch_name .long cpu_arch_name
.long cpu_elf_name .long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
.long cpu_arm926_name .long cpu_arm926_name
.long arm926_processor_functions .long arm926_processor_functions
.long v4wbi_tlb_fns .long v4wbi_tlb_fns
......
...@@ -207,11 +207,6 @@ __v6_setup: ...@@ -207,11 +207,6 @@ __v6_setup:
#endif #endif
mcr p15, 0, r4, c2, c0, 1 @ load TTB1 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#ifdef CONFIG_VFP
mrc p15, 0, r0, c1, c0, 2
orr r0, r0, #(0xf << 20)
mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP
#endif
adr r5, v6_crval adr r5, v6_crval
ldmia r5, {r5, r6} ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0, 0 @ read control register mrc p15, 0, r0, c1, c0, 0 @ read control register
...@@ -273,7 +268,7 @@ __v6_proc_info: ...@@ -273,7 +268,7 @@ __v6_proc_info:
b __v6_setup b __v6_setup
.long cpu_arch_name .long cpu_arch_name
.long cpu_elf_name .long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
.long cpu_v6_name .long cpu_v6_name
.long v6_processor_functions .long v6_processor_functions
.long v6wbi_tlb_fns .long v6wbi_tlb_fns
......
...@@ -270,6 +270,17 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) ...@@ -270,6 +270,17 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
static int __init vfp_init(void) static int __init vfp_init(void)
{ {
unsigned int vfpsid; unsigned int vfpsid;
unsigned int cpu_arch = cpu_architecture();
u32 access = 0;
if (cpu_arch >= CPU_ARCH_ARMv6) {
access = get_copro_access();
/*
* Enable full access to VFP (cp10 and cp11)
*/
set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
}
/* /*
* First check that there is a VFP that we can use. * First check that there is a VFP that we can use.
...@@ -281,6 +292,12 @@ static int __init vfp_init(void) ...@@ -281,6 +292,12 @@ static int __init vfp_init(void)
printk(KERN_INFO "VFP support v0.3: "); printk(KERN_INFO "VFP support v0.3: ");
if (VFP_arch) { if (VFP_arch) {
printk("not present\n"); printk("not present\n");
/*
* Restore the copro access register.
*/
if (cpu_arch >= CPU_ARCH_ARMv6)
set_copro_access(access);
} else if (vfpsid & FPSID_NODOUBLE) { } else if (vfpsid & FPSID_NODOUBLE) {
printk("no double precision support\n"); printk("no double precision support\n");
} else { } else {
...@@ -291,9 +308,16 @@ static int __init vfp_init(void) ...@@ -291,9 +308,16 @@ static int __init vfp_init(void)
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
vfp_vector = vfp_support_entry; vfp_vector = vfp_support_entry;
thread_register_notifier(&vfp_notifier_block); thread_register_notifier(&vfp_notifier_block);
/*
* We detected VFP, and the support code is
* in place; report VFP support to userspace.
*/
elf_hwcap |= HWCAP_VFP;
} }
return 0; return 0;
} }
......
...@@ -139,19 +139,36 @@ static inline int cpu_is_xsc3(void) ...@@ -139,19 +139,36 @@ static inline int cpu_is_xsc3(void)
#define cpu_is_xscale() 1 #define cpu_is_xscale() 1
#endif #endif
#define set_cr(x) \ static inline unsigned int get_cr(void)
__asm__ __volatile__( \ {
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \ unsigned int val;
: : "r" (x) : "cc") asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
return val;
}
#define get_cr() \ static inline void set_cr(unsigned int val)
({ \ {
unsigned int __val; \ asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
__asm__ __volatile__( \ : : "r" (val) : "cc");
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \ }
: "=r" (__val) : : "cc"); \
__val; \ #define CPACC_FULL(n) (3 << (n * 2))
}) #define CPACC_SVC(n) (1 << (n * 2))
#define CPACC_DISABLE(n) (0 << (n * 2))
static inline unsigned int get_copro_access(void)
{
unsigned int val;
asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
: "=r" (val) : : "cc");
return val;
}
static inline void set_copro_access(unsigned int val)
{
asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
: : "r" (val) : "cc");
}
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment