Commit 98ed7d4b authored by Russell King's avatar Russell King Committed by Russell King

[ARM] dma-mapping: improve type-safeness of DMA translations

OMAP at least gets the return type(s) for the DMA translation functions
wrong, which can lead to subtle errors.  Avoid this by moving the DMA
translation functions to asm/dma-mapping.h, and converting them to
inline functions.

Fix the OMAP DMA translation macros to use the correct argument and
result types.

Also, remove the unnecessary casts in dmabounce.c.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 492c71dd
...@@ -246,9 +246,9 @@ map_single(struct device *dev, void *ptr, size_t size, ...@@ -246,9 +246,9 @@ map_single(struct device *dev, void *ptr, size_t size,
} }
dev_dbg(dev, dev_dbg(dev,
"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, (void *) buf->safe_dma_addr); buf->safe, buf->safe_dma_addr);
if ((dir == DMA_TO_DEVICE) || if ((dir == DMA_TO_DEVICE) ||
(dir == DMA_BIDIRECTIONAL)) { (dir == DMA_BIDIRECTIONAL)) {
...@@ -292,9 +292,9 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -292,9 +292,9 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
BUG_ON(buf->size != size); BUG_ON(buf->size != size);
dev_dbg(dev, dev_dbg(dev,
"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, (void *) buf->safe_dma_addr); buf->safe, buf->safe_dma_addr);
DO_STATS ( device_info->bounce_count++ ); DO_STATS ( device_info->bounce_count++ );
...@@ -355,9 +355,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -355,9 +355,9 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
*/ */
dev_dbg(dev, dev_dbg(dev,
"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, (void *) buf->safe_dma_addr); buf->safe, buf->safe_dma_addr);
DO_STATS ( device_info->bounce_count++ ); DO_STATS ( device_info->bounce_count++ );
......
...@@ -3,11 +3,48 @@ ...@@ -3,11 +3,48 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/mm.h> /* need struct page */ #include <linux/mm_types.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm-generic/dma-coherent.h> #include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
/*
* page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
* used internally by the DMA-mapping API to provide DMA addresses. They
* must not be used by drivers.
*/
#ifndef __arch_page_to_dma
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
return (void *)__bus_to_virt(addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
return __arch_page_to_dma(dev, page);
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_virt(dev, addr);
}
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
return __arch_virt_to_dma(dev, addr);
}
#endif
/* /*
* DMA-consistent mapping functions. These allocate/free a region of * DMA-consistent mapping functions. These allocate/free a region of
...@@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, ...@@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
if (!arch_is_coherent()) if (!arch_is_coherent())
dma_cache_maint(cpu_addr, size, dir); dma_cache_maint(cpu_addr, size, dir);
return virt_to_dma(dev, (unsigned long)cpu_addr); return virt_to_dma(dev, cpu_addr);
} }
#else #else
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
...@@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page, ...@@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, unsigned long offset, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
return dma_map_single(dev, page_address(page) + offset, size, (int)dir); return dma_map_single(dev, page_address(page) + offset, size, dir);
} }
/** /**
...@@ -241,7 +278,7 @@ static inline void ...@@ -241,7 +278,7 @@ static inline void
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_unmap_single(dev, handle, size, (int)dir); dma_unmap_single(dev, handle, size, dir);
} }
/** /**
...@@ -336,7 +373,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, ...@@ -336,7 +373,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (!arch_is_coherent()) if (!arch_is_coherent())
dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); dma_cache_maint(dma_to_virt(dev, handle), size, dir);
} }
static inline void static inline void
...@@ -344,7 +381,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, ...@@ -344,7 +381,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (!arch_is_coherent()) if (!arch_is_coherent())
dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir); dma_cache_maint(dma_to_virt(dev, handle), size, dir);
} }
#else #else
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
......
...@@ -313,20 +313,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) ...@@ -313,20 +313,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*/ */
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/*
* Optional device DMA address remapping. Do _not_ use directly!
* We should really eliminate virt_to_bus() here - it's deprecated.
*/
#ifndef __arch_page_to_dma
#define page_to_dma(dev, page) ((dma_addr_t)__virt_to_bus((unsigned long)page_address(page)))
#define dma_to_virt(dev, addr) ((void *)__bus_to_virt(addr))
#define virt_to_dma(dev, addr) ((dma_addr_t)__virt_to_bus((unsigned long)(addr)))
#else
#define page_to_dma(dev, page) (__arch_page_to_dma(dev, page))
#define dma_to_virt(dev, addr) (__arch_dma_to_virt(dev, addr))
#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
#endif
/* /*
* Optional coherency support. Currently used only by selected * Optional coherency support. Currently used only by selected
* Intel XSC3-based systems. * Intel XSC3-based systems.
......
...@@ -76,13 +76,14 @@ ...@@ -76,13 +76,14 @@
(dma_addr_t)virt_to_lbus(page_address(page)) : \ (dma_addr_t)virt_to_lbus(page_address(page)) : \
(dma_addr_t)__virt_to_bus(page_address(page));}) (dma_addr_t)__virt_to_bus(page_address(page));})
#define __arch_dma_to_virt(dev, addr) ({is_lbus_device(dev) ? \ #define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
lbus_to_virt(addr) : \ lbus_to_virt(addr) : \
__bus_to_virt(addr);}) __bus_to_virt(addr)); })
#define __arch_virt_to_dma(dev, addr) ({is_lbus_device(dev) ? \ #define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \
virt_to_lbus(addr) : \ (dma_addr_t) (is_lbus_device(dev) ? \
__virt_to_bus(addr);}) virt_to_lbus(__addr) : \
__virt_to_bus(__addr)); })
#endif /* CONFIG_ARCH_OMAP15XX */ #endif /* CONFIG_ARCH_OMAP15XX */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment