Commit f67637ee authored by Ralf Baechle's avatar Ralf Baechle Committed by Linus Torvalds

[PATCH] Add struct dev pointer to dma_is_consistent()

dma_is_consistent() is ill-designed in that it does not have a struct
device pointer argument which makes proper support for systems that consist
of a mix of coherent and non-coherent DMA devices hard.  Change
dma_is_consistent to take a struct device pointer as first argument and fix
the sole caller to pass it.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 83b7b44e
...@@ -431,10 +431,10 @@ be identical to those passed in (and returned by ...@@ -431,10 +431,10 @@ be identical to those passed in (and returned by
dma_alloc_noncoherent()). dma_alloc_noncoherent()).
int int
dma_is_consistent(dma_addr_t dma_handle) dma_is_consistent(struct device *dev, dma_addr_t dma_handle)
returns true if the memory pointed to by the dma_handle is actually returns true if the device dev is performing consistent DMA on the memory
consistent. area pointed to by the dma_handle.
int int
dma_get_cache_alignment(void) dma_get_cache_alignment(void)
......
...@@ -190,7 +190,7 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -190,7 +190,7 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
int dma_is_consistent(dma_addr_t dma_addr) int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{ {
return 1; return 1;
} }
......
...@@ -197,7 +197,7 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -197,7 +197,7 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
int dma_is_consistent(dma_addr_t dma_addr) int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{ {
return 1; return 1;
} }
......
...@@ -363,7 +363,7 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -363,7 +363,7 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
int dma_is_consistent(dma_addr_t dma_addr) int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{ {
return 1; return 1;
} }
......
...@@ -299,7 +299,7 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -299,7 +299,7 @@ int dma_supported(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
int dma_is_consistent(dma_addr_t dma_addr) int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{ {
return 1; return 1;
} }
......
...@@ -313,7 +313,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, ...@@ -313,7 +313,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
hostdata->status = memory + STATUS_OFFSET; hostdata->status = memory + STATUS_OFFSET;
/* all of these offsets are L1_CACHE_BYTES separated. It is fatal /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
* if this isn't sufficient separation to avoid dma flushing issues */ * if this isn't sufficient separation to avoid dma flushing issues */
BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment()); BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET); hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
hostdata->dev = dev; hostdata->dev = dev;
......
...@@ -51,7 +51,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -51,7 +51,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(dev) (1) #define dma_is_consistent(d, h) (1)
int dma_set_mask(struct device *dev, u64 mask); int dma_set_mask(struct device *dev, u64 mask);
......
...@@ -48,7 +48,7 @@ static inline int dma_get_cache_alignment(void) ...@@ -48,7 +48,7 @@ static inline int dma_get_cache_alignment(void)
return 32; return 32;
} }
static inline int dma_is_consistent(dma_addr_t handle) static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
{ {
return !!arch_is_coherent(); return !!arch_is_coherent();
} }
......
...@@ -307,7 +307,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -307,7 +307,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
static inline int dma_is_consistent(dma_addr_t dma_addr) static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{ {
return 1; return 1;
} }
......
...@@ -156,7 +156,7 @@ dma_get_cache_alignment(void) ...@@ -156,7 +156,7 @@ dma_get_cache_alignment(void)
return (1 << INTERNODE_CACHE_SHIFT); return (1 << INTERNODE_CACHE_SHIFT);
} }
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(void *vaddr, size_t size,
......
...@@ -172,7 +172,7 @@ int dma_get_cache_alignment(void) ...@@ -172,7 +172,7 @@ int dma_get_cache_alignment(void)
return 1 << L1_CACHE_SHIFT; return 1 << L1_CACHE_SHIFT;
} }
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
static inline static inline
void dma_cache_sync(void *vaddr, size_t size, void dma_cache_sync(void *vaddr, size_t size,
......
...@@ -266,7 +266,7 @@ dma_error(dma_addr_t dma_addr) ...@@ -266,7 +266,7 @@ dma_error(dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
static inline int static inline int
dma_get_cache_alignment(void) dma_get_cache_alignment(void)
......
...@@ -156,7 +156,7 @@ dma_get_cache_alignment(void) ...@@ -156,7 +156,7 @@ dma_get_cache_alignment(void)
return (1 << INTERNODE_CACHE_SHIFT); return (1 << INTERNODE_CACHE_SHIFT);
} }
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(void *vaddr, size_t size,
......
...@@ -59,6 +59,6 @@ dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) ...@@ -59,6 +59,6 @@ dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
mb(); mb();
} }
#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */ #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
#endif /* _ASM_IA64_DMA_MAPPING_H */ #endif /* _ASM_IA64_DMA_MAPPING_H */
...@@ -21,7 +21,7 @@ static inline int dma_get_cache_alignment(void) ...@@ -21,7 +21,7 @@ static inline int dma_get_cache_alignment(void)
return 1 << L1_CACHE_SHIFT; return 1 << L1_CACHE_SHIFT;
} }
static inline int dma_is_consistent(dma_addr_t dma_addr) static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{ {
return 0; return 0;
} }
......
...@@ -63,7 +63,7 @@ dma_get_cache_alignment(void) ...@@ -63,7 +63,7 @@ dma_get_cache_alignment(void)
return 128; return 128;
} }
extern int dma_is_consistent(dma_addr_t dma_addr); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
extern void dma_cache_sync(void *vaddr, size_t size, extern void dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction direction); enum dma_data_direction direction);
......
...@@ -191,7 +191,7 @@ dma_get_cache_alignment(void) ...@@ -191,7 +191,7 @@ dma_get_cache_alignment(void)
} }
static inline int static inline int
dma_is_consistent(dma_addr_t dma_addr) dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{ {
return (hppa_dma_ops->dma_sync_single_for_cpu == NULL); return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
} }
......
...@@ -342,9 +342,9 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) ...@@ -342,9 +342,9 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
#define dma_is_consistent(d) (0) #define dma_is_consistent(d, h) (0)
#else #else
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
#endif #endif
static inline int dma_get_cache_alignment(void) static inline int dma_get_cache_alignment(void)
......
...@@ -181,7 +181,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz ...@@ -181,7 +181,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
static inline int static inline int
dma_get_cache_alignment(void) dma_get_cache_alignment(void)
......
...@@ -94,7 +94,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, ...@@ -94,7 +94,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
static inline int static inline int
dma_get_cache_alignment(void) dma_get_cache_alignment(void)
......
...@@ -180,7 +180,7 @@ static inline int dma_get_cache_alignment(void) ...@@ -180,7 +180,7 @@ static inline int dma_get_cache_alignment(void)
return boot_cpu_data.x86_clflush_size; return boot_cpu_data.x86_clflush_size;
} }
#define dma_is_consistent(h) 1 #define dma_is_consistent(d, h) 1
extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask);
......
...@@ -170,7 +170,7 @@ dma_get_cache_alignment(void) ...@@ -170,7 +170,7 @@ dma_get_cache_alignment(void)
return L1_CACHE_BYTES; return L1_CACHE_BYTES;
} }
#define dma_is_consistent(d) (1) #define dma_is_consistent(d, h) (1)
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(void *vaddr, size_t size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment