Commit a7375762 authored by Joerg Roedel's avatar Joerg Roedel Committed by Linus Torvalds

sparc64: rename iommu_num_pages function to iommu_nr_pages

This is a preparation patch for introducing a generic iommu_num_pages function.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bdab0ba3
......@@ -575,7 +575,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen);
npages = iommu_nr_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
......@@ -647,7 +647,7 @@ iommu_map_failed:
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length);
npages = iommu_nr_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
entry = (vaddr - iommu->page_table_map_base)
......@@ -715,7 +715,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (!len)
break;
npages = iommu_num_pages(dma_handle, len);
npages = iommu_nr_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base)
......
......@@ -35,7 +35,7 @@
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static inline unsigned long iommu_num_pages(unsigned long vaddr,
static inline unsigned long iommu_nr_pages(unsigned long vaddr,
unsigned long slen)
{
unsigned long npages;
......@@ -53,7 +53,7 @@ static inline int is_span_boundary(unsigned long entry,
struct scatterlist *sg)
{
unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
int nr = iommu_nr_pages(paddr, outs->dma_length + sg->length);
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
......
......@@ -384,7 +384,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen);
npages = iommu_nr_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
......@@ -461,7 +461,7 @@ iommu_map_failed:
unsigned long vaddr, npages;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length);
npages = iommu_nr_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
......@@ -500,7 +500,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
if (!len)
break;
npages = iommu_num_pages(dma_handle, len);
npages = iommu_nr_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment