Commit ecba1060 authored by Paul Mundt's avatar Paul Mundt

sh: Centralize the CPU cache initialization routines.

This provides a central point for CPU cache initialization routines.
This replaces the antiquated p3_cache_init() method, which the vast
majority of CPUs never cared about.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent e82da214
......@@ -12,7 +12,6 @@
*
* See arch/sh/kernel/cpu/init.c:cache_init().
*/
#define p3_cache_init() do { } while (0)
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
......@@ -78,5 +77,7 @@ void kunmap_coherent(void);
#define PG_dcache_dirty PG_arch_1
void cpu_cache_init(void);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
......@@ -39,6 +39,4 @@
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_sigtramp(vaddr) do { } while (0)
#define p3_cache_init() do { } while (0)
#endif /* __ASM_CPU_SH2_CACHEFLUSH_H */
......@@ -30,5 +30,4 @@ void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_sigtramp(vaddr) do { } while (0)
#define p3_cache_init() do { } while (0)
#endif /* __ASM_CPU_SH2A_CACHEFLUSH_H */
......@@ -32,8 +32,6 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_cache_sigtramp(vaddr) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define p3_cache_init() do { } while (0)
#else
#include <cpu-common/cpu/cacheflush.h>
#endif
......
......@@ -35,7 +35,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
#define flush_icache_page(vma,pg) do { } while (0)
/* Initialization of P3 area for copy_user_page */
void p3_cache_init(void);
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
......@@ -25,7 +25,6 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
void p3_cache_init(void);
#endif /* __ASSEMBLY__ */
......
......@@ -94,7 +94,7 @@ static void __init emit_cache_params(void)
/*
* SH-4 has virtually indexed and physically tagged cache.
*/
void __init p3_cache_init(void)
void __init sh4_cache_init(void)
{
compute_alias(&boot_cpu_data.icache);
compute_alias(&boot_cpu_data.dcache);
......
......@@ -23,7 +23,7 @@
/* Wired TLB entry for the D-cache */
static unsigned long long dtlb_cache_slot;
void __init p3_cache_init(void)
void __init cpu_cache_init(void)
{
/* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot = sh64_get_wired_dtlb_entry();
......
......@@ -127,3 +127,14 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
__flush_wback_region((void *)addr, PAGE_SIZE);
}
}
void __init cpu_cache_init(void)
{
if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
(boot_cpu_data.family == CPU_FAMILY_SH4A) ||
(boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
extern void __weak sh4_cache_init(void);
sh4_cache_init();
}
}
......@@ -230,7 +230,7 @@ void __init mem_init(void)
datasize >> 10,
initsize >> 10);
p3_cache_init();
cpu_cache_init();
/* Initialize the vDSO */
vsyscall_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment