Commit 7fbb2d3b authored by Paul Mundt's avatar Paul Mundt

sh: consolidate flush_dcache_mmap_lock/unlock() definitions.

All of the flush_dcache_mmap_lock()/flush_dcache_mmap_unlock()
definitions are identical across all CPUs, so just provide them
generically in asm/cacheflush.h.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 0b445dca
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_sigtramp(vaddr) do { } while (0) #define flush_cache_sigtramp(vaddr) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define __flush_wback_region(start, size) do { (void)(start); } while (0) #define __flush_wback_region(start, size) do { (void)(start); } while (0)
...@@ -71,6 +69,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, ...@@ -71,6 +69,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void kmap_coherent_init(void); void kmap_coherent_init(void);
void *kmap_coherent(struct page *page, unsigned long addr); void *kmap_coherent(struct page *page, unsigned long addr);
void kunmap_coherent(void); void kunmap_coherent(void);
......
...@@ -32,8 +32,6 @@ ...@@ -32,8 +32,6 @@
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -25,9 +25,6 @@ void flush_dcache_page(struct page *pg); ...@@ -25,9 +25,6 @@ void flush_dcache_page(struct page *pg);
void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page); void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
/* SH3 has unified cache so no special action needed here */ /* SH3 has unified cache so no special action needed here */
#define flush_cache_sigtramp(vaddr) do { } while (0) #define flush_cache_sigtramp(vaddr) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -24,10 +24,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, ...@@ -24,10 +24,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn); unsigned long pfn);
void flush_dcache_page(struct page *pg); void flush_dcache_page(struct page *pg);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len); unsigned long addr, int len);
......
...@@ -20,10 +20,6 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, ...@@ -20,10 +20,6 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
int len); int len);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_page(vma, page) do { } while (0)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment