Commit 2c9b9c84 authored by Russell King's avatar Russell King

ARM: add size argument to __cpuc_flush_dcache_page

... and rename the function since it no longer operates on just
pages.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent ccaf5f05
...@@ -211,7 +211,7 @@ struct cpu_cache_fns { ...@@ -211,7 +211,7 @@ struct cpu_cache_fns {
void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *); void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_inv_range)(const void *, const void *); void (*dma_inv_range)(const void *, const void *);
void (*dma_clean_range)(const void *, const void *); void (*dma_clean_range)(const void *, const void *);
...@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
/* /*
* These are private to the dma-mapping API. Do not use directly. * These are private to the dma-mapping API. Do not use directly.
...@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *); extern void __cpuc_flush_dcache_area(void *, size_t);
/* /*
* These are private to the dma-mapping API. Do not use directly. * These are private to the dma-mapping API. Do not use directly.
...@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page) ...@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{ {
/* highmem pages are always flushed upon kunmap already */ /* highmem pages are always flushed upon kunmap already */
if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} }
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \
......
...@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range) ...@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(kaddr) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure that the data held in the page kaddr is written back * Ensure that the data held in the page kaddr is written back
* to the page in question. * to the page in question.
* *
* - kaddr - kernel address (guaranteed to be page aligned) * - addr - kernel address
* - size - size of region
*/ */
ENTRY(fa_flush_kern_dcache_page) ENTRY(fa_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns) ...@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
.long fa_flush_user_cache_range .long fa_flush_user_cache_range
.long fa_coherent_kern_range .long fa_coherent_kern_range
.long fa_coherent_user_range .long fa_coherent_user_range
.long fa_flush_kern_dcache_page .long fa_flush_kern_dcache_area
.long fa_dma_inv_range .long fa_dma_inv_range
.long fa_dma_clean_range .long fa_dma_clean_range
.long fa_dma_flush_range .long fa_dma_flush_range
......
...@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range) ...@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *page, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v3_flush_kern_dcache_page) ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
...@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns) ...@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_user_cache_range .long v3_flush_user_cache_range
.long v3_coherent_kern_range .long v3_coherent_kern_range
.long v3_coherent_user_range .long v3_coherent_user_range
.long v3_flush_kern_dcache_page .long v3_flush_kern_dcache_area
.long v3_dma_inv_range .long v3_dma_inv_range
.long v3_dma_clean_range .long v3_dma_clean_range
.long v3_dma_flush_range .long v3_dma_flush_range
......
...@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range) ...@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v4_flush_kern_dcache_page) ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
...@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns) ...@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_user_cache_range .long v4_flush_user_cache_range
.long v4_coherent_kern_range .long v4_coherent_kern_range
.long v4_coherent_user_range .long v4_coherent_user_range
.long v4_flush_kern_dcache_page .long v4_flush_kern_dcache_area
.long v4_dma_inv_range .long v4_dma_inv_range
.long v4_dma_clean_range .long v4_dma_clean_range
.long v4_dma_flush_range .long v4_dma_flush_range
......
...@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range) ...@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v4wb_flush_kern_dcache_page) ENTRY(v4wb_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
/* fall through */ /* fall through */
/* /*
...@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns) ...@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_user_cache_range .long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range .long v4wb_coherent_kern_range
.long v4wb_coherent_user_range .long v4wb_coherent_user_range
.long v4wb_flush_kern_dcache_page .long v4wb_flush_kern_dcache_area
.long v4wb_dma_inv_range .long v4wb_dma_inv_range
.long v4wb_dma_clean_range .long v4wb_dma_clean_range
.long v4wb_dma_flush_range .long v4wb_dma_flush_range
......
...@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range) ...@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v4wt_flush_kern_dcache_page) ENTRY(v4wt_flush_kern_dcache_area)
mov r2, #0 mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ add r1, r0, r1
/* fallthrough */ /* fallthrough */
/* /*
...@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns) ...@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_user_cache_range .long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range .long v4wt_coherent_kern_range
.long v4wt_coherent_user_range .long v4wt_coherent_user_range
.long v4wt_flush_kern_dcache_page .long v4wt_flush_kern_dcache_area
.long v4wt_dma_inv_range .long v4wt_dma_inv_range
.long v4wt_dma_clean_range .long v4wt_dma_clean_range
.long v4wt_dma_flush_range .long v4wt_dma_flush_range
......
...@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range) ...@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range) ENDPROC(v6_coherent_kern_range)
/* /*
* v6_flush_kern_dcache_page(kaddr) * v6_flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure that the data held in the page kaddr is written back * Ensure that the data held in the page kaddr is written back
* to the page in question. * to the page in question.
* *
* - kaddr - kernel address (guaranteed to be page aligned) * - addr - kernel address
* - size - region size
*/ */
ENTRY(v6_flush_kern_dcache_page) ENTRY(v6_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: 1:
#ifdef HARVARD_CACHE #ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
...@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns) ...@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_user_cache_range .long v6_flush_user_cache_range
.long v6_coherent_kern_range .long v6_coherent_kern_range
.long v6_coherent_user_range .long v6_coherent_user_range
.long v6_flush_kern_dcache_page .long v6_flush_kern_dcache_area
.long v6_dma_inv_range .long v6_dma_inv_range
.long v6_dma_clean_range .long v6_dma_clean_range
.long v6_dma_flush_range .long v6_dma_flush_range
......
...@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range) ...@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range) ENDPROC(v7_coherent_user_range)
/* /*
* v7_flush_kern_dcache_page(kaddr) * v7_flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure that the data held in the page kaddr is written back * Ensure that the data held in the page kaddr is written back
* to the page in question. * to the page in question.
* *
* - kaddr - kernel address (guaranteed to be page aligned) * - addr - kernel address
* - size - region size
*/ */
ENTRY(v7_flush_kern_dcache_page) ENTRY(v7_flush_kern_dcache_area)
dcache_line_size r2, r3 dcache_line_size r2, r3
add r1, r0, #PAGE_SZ add r1, r0, r1
1: 1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2 add r0, r0, r2
...@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page) ...@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page)
blo 1b blo 1b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7_flush_kern_dcache_page) ENDPROC(v7_flush_kern_dcache_area)
/* /*
* v7_dma_inv_range(start,end) * v7_dma_inv_range(start,end)
...@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns) ...@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns)
.long v7_flush_user_cache_range .long v7_flush_user_cache_range
.long v7_coherent_kern_range .long v7_coherent_kern_range
.long v7_coherent_user_range .long v7_coherent_user_range
.long v7_flush_kern_dcache_page .long v7_flush_kern_dcache_area
.long v7_dma_inv_range .long v7_dma_inv_range
.long v7_dma_clean_range .long v7_dma_clean_range
.long v7_dma_flush_range .long v7_dma_flush_range
......
...@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) ...@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/ */
if (addr) if (addr)
#endif #endif
__cpuc_flush_dcache_page(addr); __cpuc_flush_dcache_area(addr, PAGE_SIZE);
/* /*
* If this is a page cache page, and we have an aliasing VIPT cache, * If this is a page cache page, and we have an aliasing VIPT cache,
...@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l ...@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
* in this mapping of the page. FIXME: this is overkill * in this mapping of the page. FIXME: this is overkill
* since we actually ask for a write-back and invalidate. * since we actually ask for a write-back and invalidate.
*/ */
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} }
...@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) { if (kvaddr >= (void *)FIXADDR_START) {
__cpuc_flush_dcache_page((void *)vaddr); __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
......
...@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode) ...@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode)
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
......
...@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range) ...@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1020_flush_kern_dcache_page) ENTRY(arm1020_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns) ...@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns)
.long arm1020_flush_user_cache_range .long arm1020_flush_user_cache_range
.long arm1020_coherent_kern_range .long arm1020_coherent_kern_range
.long arm1020_coherent_user_range .long arm1020_coherent_user_range
.long arm1020_flush_kern_dcache_page .long arm1020_flush_kern_dcache_area
.long arm1020_dma_inv_range .long arm1020_dma_inv_range
.long arm1020_dma_clean_range .long arm1020_dma_clean_range
.long arm1020_dma_flush_range .long arm1020_dma_flush_range
......
...@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range) ...@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1020e_flush_kern_dcache_page) ENTRY(arm1020e_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns) ...@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_user_cache_range .long arm1020e_flush_user_cache_range
.long arm1020e_coherent_kern_range .long arm1020e_coherent_kern_range
.long arm1020e_coherent_user_range .long arm1020e_coherent_user_range
.long arm1020e_flush_kern_dcache_page .long arm1020e_flush_kern_dcache_area
.long arm1020e_dma_inv_range .long arm1020e_dma_inv_range
.long arm1020e_dma_clean_range .long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range .long arm1020e_dma_flush_range
......
...@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range) ...@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1022_flush_kern_dcache_page) ENTRY(arm1022_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns) ...@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns)
.long arm1022_flush_user_cache_range .long arm1022_flush_user_cache_range
.long arm1022_coherent_kern_range .long arm1022_coherent_kern_range
.long arm1022_coherent_user_range .long arm1022_coherent_user_range
.long arm1022_flush_kern_dcache_page .long arm1022_flush_kern_dcache_area
.long arm1022_dma_inv_range .long arm1022_dma_inv_range
.long arm1022_dma_clean_range .long arm1022_dma_clean_range
.long arm1022_dma_flush_range .long arm1022_dma_flush_range
......
...@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range) ...@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1026_flush_kern_dcache_page) ENTRY(arm1026_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns) ...@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns)
.long arm1026_flush_user_cache_range .long arm1026_flush_user_cache_range
.long arm1026_coherent_kern_range .long arm1026_coherent_kern_range
.long arm1026_coherent_user_range .long arm1026_coherent_user_range
.long arm1026_flush_kern_dcache_page .long arm1026_flush_kern_dcache_area
.long arm1026_dma_inv_range .long arm1026_dma_inv_range
.long arm1026_dma_clean_range .long arm1026_dma_clean_range
.long arm1026_dma_flush_range .long arm1026_dma_flush_range
......
...@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range) ...@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm920_flush_kern_dcache_page) ENTRY(arm920_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns) ...@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns)
.long arm920_flush_user_cache_range .long arm920_flush_user_cache_range
.long arm920_coherent_kern_range .long arm920_coherent_kern_range
.long arm920_coherent_user_range .long arm920_coherent_user_range
.long arm920_flush_kern_dcache_page .long arm920_flush_kern_dcache_area
.long arm920_dma_inv_range .long arm920_dma_inv_range
.long arm920_dma_clean_range .long arm920_dma_clean_range
.long arm920_dma_flush_range .long arm920_dma_flush_range
......
...@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range) ...@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm922_flush_kern_dcache_page) ENTRY(arm922_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns) ...@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns)
.long arm922_flush_user_cache_range .long arm922_flush_user_cache_range
.long arm922_coherent_kern_range .long arm922_coherent_kern_range
.long arm922_coherent_user_range .long arm922_coherent_user_range
.long arm922_flush_kern_dcache_page .long arm922_flush_kern_dcache_area
.long arm922_dma_inv_range .long arm922_dma_inv_range
.long arm922_dma_clean_range .long arm922_dma_clean_range
.long arm922_dma_flush_range .long arm922_dma_flush_range
......
...@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range) ...@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm925_flush_kern_dcache_page) ENTRY(arm925_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns) ...@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns)
.long arm925_flush_user_cache_range .long arm925_flush_user_cache_range
.long arm925_coherent_kern_range .long arm925_coherent_kern_range
.long arm925_coherent_user_range .long arm925_coherent_user_range
.long arm925_flush_kern_dcache_page .long arm925_flush_kern_dcache_area
.long arm925_dma_inv_range .long arm925_dma_inv_range
.long arm925_dma_clean_range .long arm925_dma_clean_range
.long arm925_dma_flush_range .long arm925_dma_flush_range
......
...@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range) ...@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm926_flush_kern_dcache_page) ENTRY(arm926_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns) ...@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns)
.long arm926_flush_user_cache_range .long arm926_flush_user_cache_range
.long arm926_coherent_kern_range .long arm926_coherent_kern_range
.long arm926_coherent_user_range .long arm926_coherent_user_range
.long arm926_flush_kern_dcache_page .long arm926_flush_kern_dcache_area
.long arm926_dma_inv_range .long arm926_dma_inv_range
.long arm926_dma_clean_range .long arm926_dma_clean_range
.long arm926_dma_flush_range .long arm926_dma_flush_range
......
...@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range) ...@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range)
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm940_flush_kern_dcache_page) ENTRY(arm940_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
...@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns) ...@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns)
.long arm940_flush_user_cache_range .long arm940_flush_user_cache_range
.long arm940_coherent_kern_range .long arm940_coherent_kern_range
.long arm940_coherent_user_range .long arm940_coherent_user_range
.long arm940_flush_kern_dcache_page .long arm940_flush_kern_dcache_area
.long arm940_dma_inv_range .long arm940_dma_inv_range
.long arm940_dma_clean_range .long arm940_dma_clean_range
.long arm940_dma_flush_range .long arm940_dma_flush_range
......
...@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range) ...@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
* (same as arm926) * (same as arm926)
*/ */
ENTRY(arm946_flush_kern_dcache_page) ENTRY(arm946_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns) ...@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns)
.long arm946_flush_user_cache_range .long arm946_flush_user_cache_range
.long arm946_coherent_kern_range .long arm946_coherent_kern_range
.long arm946_coherent_user_range .long arm946_coherent_user_range
.long arm946_flush_kern_dcache_page .long arm946_flush_kern_dcache_area
.long arm946_dma_inv_range .long arm946_dma_inv_range
.long arm946_dma_clean_range .long arm946_dma_clean_range
.long arm946_dma_flush_range .long arm946_dma_flush_range
......
...@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range) ...@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
.align 5 .align 5
ENTRY(feroceon_flush_kern_dcache_page) ENTRY(feroceon_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page) ...@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page)
mov pc, lr mov pc, lr
.align 5 .align 5
ENTRY(feroceon_range_flush_kern_dcache_page) ENTRY(feroceon_range_flush_kern_dcache_area)
mrs r2, cpsr mrs r2, cpsr
add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
orr r3, r2, #PSR_I_BIT orr r3, r2, #PSR_I_BIT
...@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns) ...@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns)
.long feroceon_flush_user_cache_range .long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range .long feroceon_coherent_kern_range
.long feroceon_coherent_user_range .long feroceon_coherent_user_range
.long feroceon_flush_kern_dcache_page .long feroceon_flush_kern_dcache_area
.long feroceon_dma_inv_range .long feroceon_dma_inv_range
.long feroceon_dma_clean_range .long feroceon_dma_clean_range
.long feroceon_dma_flush_range .long feroceon_dma_flush_range
...@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns) ...@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns)
.long feroceon_flush_user_cache_range .long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range .long feroceon_coherent_kern_range
.long feroceon_coherent_user_range .long feroceon_coherent_user_range
.long feroceon_range_flush_kern_dcache_page .long feroceon_range_flush_kern_dcache_area
.long feroceon_range_dma_inv_range .long feroceon_range_dma_inv_range
.long feroceon_range_dma_clean_range .long feroceon_range_dma_clean_range
.long feroceon_range_dma_flush_range .long feroceon_range_dma_flush_range
......
...@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range) ...@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(mohawk_flush_kern_dcache_page) ENTRY(mohawk_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns) ...@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns)
.long mohawk_flush_user_cache_range .long mohawk_flush_user_cache_range
.long mohawk_coherent_kern_range .long mohawk_coherent_kern_range
.long mohawk_coherent_user_range .long mohawk_coherent_user_range
.long mohawk_flush_kern_dcache_page .long mohawk_flush_kern_dcache_area
.long mohawk_dma_inv_range .long mohawk_dma_inv_range
.long mohawk_dma_clean_range .long mohawk_dma_clean_range
.long mohawk_dma_flush_range .long mohawk_dma_flush_range
......
...@@ -27,7 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all); ...@@ -27,7 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all); EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range); EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range); EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(__cpuc_flush_dcache_page); EXPORT_SYMBOL(__cpuc_flush_dcache_area);
#else #else
EXPORT_SYMBOL(cpu_cache); EXPORT_SYMBOL(cpu_cache);
#endif #endif
......
...@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range) ...@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache. * the I cache.
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(xsc3_flush_kern_dcache_page) ENTRY(xsc3_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
...@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns) ...@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns)
.long xsc3_flush_user_cache_range .long xsc3_flush_user_cache_range
.long xsc3_coherent_kern_range .long xsc3_coherent_kern_range
.long xsc3_coherent_user_range .long xsc3_coherent_user_range
.long xsc3_flush_kern_dcache_page .long xsc3_flush_kern_dcache_area
.long xsc3_dma_inv_range .long xsc3_dma_inv_range
.long xsc3_dma_clean_range .long xsc3_dma_clean_range
.long xsc3_dma_flush_range .long xsc3_dma_flush_range
......
...@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range) ...@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(xscale_flush_kern_dcache_page) ENTRY(xscale_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
...@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns) ...@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns)
.long xscale_flush_user_cache_range .long xscale_flush_user_cache_range
.long xscale_coherent_kern_range .long xscale_coherent_kern_range
.long xscale_coherent_user_range .long xscale_coherent_user_range
.long xscale_flush_kern_dcache_page .long xscale_flush_kern_dcache_area
.long xscale_dma_inv_range .long xscale_dma_inv_range
.long xscale_dma_clean_range .long xscale_dma_clean_range
.long xscale_dma_flush_range .long xscale_dma_flush_range
...@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns) ...@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.long xscale_flush_user_cache_range .long xscale_flush_user_cache_range
.long xscale_coherent_kern_range .long xscale_coherent_kern_range
.long xscale_coherent_user_range .long xscale_coherent_user_range
.long xscale_flush_kern_dcache_page .long xscale_flush_kern_dcache_area
.long xscale_dma_flush_range .long xscale_dma_flush_range
.long xscale_dma_clean_range .long xscale_dma_clean_range
.long xscale_dma_flush_range .long xscale_dma_flush_range
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment