Commit d1b51632 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Linus Torvalds

iommu sg merging: parisc: make iommu respect the segment size limits

This patch makes iommu respect segment size limits when merging sg
lists.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Acked-by: default avatarJens Axboe <jens.axboe@oracle.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: default avatarGrant Grundler <grundler@parisc-linux.org>
Cc: Matthew Wilcox <willy@debian.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fde6a3c8
...@@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, ...@@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
** w/o this association, we wouldn't have coherent DMA! ** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm. ** Access to the virtual address is what forces a two pass algorithm.
*/ */
coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range); coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
/* /*
** Program the I/O Pdir ** Program the I/O Pdir
......
...@@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, ...@@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
*/ */
static inline unsigned int static inline unsigned int
iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
struct scatterlist *startsg, int nents,
int (*iommu_alloc_range)(struct ioc *, size_t)) int (*iommu_alloc_range)(struct ioc *, size_t))
{ {
struct scatterlist *contig_sg; /* contig chunk head */ struct scatterlist *contig_sg; /* contig chunk head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */ unsigned long dma_offset, dma_len; /* start/len of DMA stream */
unsigned int n_mappings = 0; unsigned int n_mappings = 0;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
while (nents > 0) { while (nents > 0) {
...@@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents, ...@@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
IOVP_SIZE) > DMA_CHUNK_SIZE)) IOVP_SIZE) > DMA_CHUNK_SIZE))
break; break;
if (startsg->length + dma_len > max_seg_size)
break;
/* /*
** Next see if we can append the next chunk (i.e. ** Next see if we can append the next chunk (i.e.
** it must end on one page and begin on another ** it must end on one page and begin on another
......
...@@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, ...@@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
** w/o this association, we wouldn't have coherent DMA! ** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm. ** Access to the virtual address is what forces a two pass algorithm.
*/ */
coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range); coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
/* /*
** Program the I/O Pdir ** Program the I/O Pdir
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment