Commit a018ef2b authored by Catalin Marinas's avatar Catalin Marinas

Add generic support for outer caches

The outer cache can be L2 as on RealView/EB MPCore platform or even L3
or further on ARMv7 cores. This patch adds the generic support for
flushing the outer cache in the DMA operations.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 37e1b898
......@@ -334,6 +334,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
*/
ptr = (unsigned long)buf->ptr;
dmac_clean_range(ptr, ptr + size);
outer_clean_range(__pa(ptr), __pa(ptr) + size);
}
free_safe_buffer(device_info, buf);
}
......
......@@ -88,6 +88,9 @@ struct cpu_user_fns cpu_user;
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache;
#endif
struct stack {
u32 irq[3];
......
......@@ -609,3 +609,6 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
Forget about fast user space cmpxchg support.
It is just not possible.
config OUTER_CACHE
bool
default n
......@@ -208,6 +208,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
unsigned long kaddr = (unsigned long)page_address(page);
memset(page_address(page), 0, size);
dmac_flush_range(kaddr, kaddr + size);
outer_flush_range(__pa(kaddr), __pa(kaddr) + size);
}
/*
......@@ -485,15 +486,20 @@ void consistent_sync(void *vaddr, size_t size, int direction)
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1));
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
dmac_inv_range(start, end);
outer_inv_range(__pa(start), __pa(end));
break;
case DMA_TO_DEVICE: /* writeback only */
dmac_clean_range(start, end);
outer_clean_range(__pa(start), __pa(end));
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
dmac_flush_range(start, end);
outer_flush_range(__pa(start), __pa(end));
break;
default:
BUG();
......
......@@ -190,6 +190,12 @@ struct cpu_cache_fns {
void (*dma_flush_range)(unsigned long, unsigned long);
};
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
};
/*
* Select the calling method
*/
......@@ -246,6 +252,37 @@ extern void dmac_flush_range(unsigned long, unsigned long);
#endif
#ifdef CONFIG_OUTER_CACHE
extern struct outer_cache_fns outer_cache;
static inline void outer_inv_range(unsigned long start, unsigned long end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
static inline void outer_clean_range(unsigned long start, unsigned long end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
static inline void outer_flush_range(unsigned long start, unsigned long end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
#else
static inline void outer_inv_range(unsigned long start, unsigned long end)
{ }
static inline void outer_clean_range(unsigned long start, unsigned long end)
{ }
static inline void outer_flush_range(unsigned long start, unsigned long end)
{ }
#endif
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. Since the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment