Commit 2f0b1926 authored by Russell King's avatar Russell King

ARM: Avoid duplicated implementation for VIVT cache flushing

We had two copies of the wrapper code for VIVT cache flushing - one in
asm/cacheflush.h and one in arch/arm/mm/flush.c.  Reduce this down to
one common copy.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 29e55363
...@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) ...@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
* Convert calls to our calling convention. * Convert calls to our calling convention.
*/ */
#define flush_cache_all() __cpuc_flush_kern_all() #define flush_cache_all() __cpuc_flush_kern_all()
#ifndef CONFIG_CPU_CACHE_VIPT
static inline void flush_cache_mm(struct mm_struct *mm) static inline void vivt_flush_cache_mm(struct mm_struct *mm)
{ {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all(); __cpuc_flush_user_all();
} }
static inline void static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
...@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long ...@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
} }
static inline void static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{ {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK; unsigned long addr = user_addr & PAGE_MASK;
...@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l ...@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
} }
static inline void static inline void
flush_ptrace_access(struct vm_area_struct *vma, struct page *page, vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr, unsigned long uaddr, void *kaddr,
unsigned long len, int write) unsigned long len, int write)
{ {
...@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
__cpuc_coherent_kern_range(addr, addr + len); __cpuc_coherent_kern_range(addr, addr + len);
} }
} }
#ifndef CONFIG_CPU_CACHE_VIPT
#define flush_cache_mm(mm) \
vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
vivt_flush_cache_range(vma,start,end)
#define flush_cache_page(vma,addr,pfn) \
vivt_flush_cache_page(vma,addr,pfn)
#define flush_ptrace_access(vma,page,ua,ka,len,write) \
vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else #else
extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
......
...@@ -41,8 +41,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) ...@@ -41,8 +41,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
void flush_cache_mm(struct mm_struct *mm) void flush_cache_mm(struct mm_struct *mm)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) vivt_flush_cache_mm(mm);
__cpuc_flush_user_all();
return; return;
} }
...@@ -59,9 +58,7 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -59,9 +58,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) vivt_flush_cache_range(vma, start, end);
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
return; return;
} }
...@@ -78,10 +75,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned ...@@ -78,10 +75,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { vivt_flush_cache_page(vma, user_addr, pfn);
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
return; return;
} }
...@@ -94,10 +88,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -94,10 +88,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write) unsigned long len, int write)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len);
}
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment