Commit 6a32fd4d authored by David S. Miller's avatar David S. Miller

[SPARC64]: Remove PGLIST_NENTS PCI IOMMU mapping limitation on SUN4V.

Use a batching queue system for IOMMU mapping setup,
with a page sized batch.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 04d74758
...@@ -26,11 +26,86 @@ ...@@ -26,11 +26,86 @@
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct sun4v_pglist { struct pci_iommu_batch {
u64 *pglist; struct pci_dev *pdev; /* Device mapping is for. */
unsigned long prot; /* IOMMU page protections */
unsigned long entry; /* Index into IOTSB. */
u64 *pglist; /* List of physical pages */
unsigned long npages; /* Number of pages in list. */
}; };
static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
/* Interrupts must be disabled. */
static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
{
struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
p->pdev = pdev;
p->prot = prot;
p->entry = entry;
p->npages = 0;
}
/* Interrupts must be disabled. */
static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
{
struct pcidev_cookie *pcp = p->pdev->sysdata;
unsigned long devhandle = pcp->pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
u64 *pglist = p->pglist;
unsigned long npages = p->npages;
do {
long num;
num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
npages, prot, __pa(pglist));
if (unlikely(num < 0)) {
if (printk_ratelimit())
printk("pci_iommu_batch_flush: IOMMU map of "
"[%08lx:%08lx:%lx:%lx:%lx] failed with "
"status %ld\n",
devhandle, HV_PCI_TSBID(0, entry),
npages, prot, __pa(pglist), num);
return -1;
}
entry += num;
npages -= num;
pglist += num;
} while (npages != 0);
p->entry = entry;
p->npages = 0;
return 0;
}
/* Interrupts must be disabled. */
static inline long pci_iommu_batch_add(u64 phys_page)
{
struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
return pci_iommu_batch_flush(p);
return 0;
}
/* Interrupts must be disabled. */
static inline long pci_iommu_batch_end(void)
{
struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
return pci_iommu_batch_flush(p);
}
static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
{ {
...@@ -86,65 +161,64 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr ...@@ -86,65 +161,64 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr
unsigned long flags, order, first_page, npages, n; unsigned long flags, order, first_page, npages, n;
void *ret; void *ret;
long entry; long entry;
u64 *pglist;
u32 devhandle;
int cpu;
size = IO_PAGE_ALIGN(size); size = IO_PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
if (order >= MAX_ORDER) if (unlikely(order >= MAX_ORDER))
return NULL; return NULL;
npages = size >> IO_PAGE_SHIFT; npages = size >> IO_PAGE_SHIFT;
if (npages > PGLIST_NENTS)
return NULL;
first_page = __get_free_pages(GFP_ATOMIC, order); first_page = __get_free_pages(GFP_ATOMIC, order);
if (first_page == 0UL) if (unlikely(first_page == 0UL))
return NULL; return NULL;
memset((char *)first_page, 0, PAGE_SIZE << order); memset((char *)first_page, 0, PAGE_SIZE << order);
pcp = pdev->sysdata; pcp = pdev->sysdata;
devhandle = pcp->pbm->devhandle;
iommu = pcp->pbm->iommu; iommu = pcp->pbm->iommu;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
entry = pci_arena_alloc(&iommu->arena, npages); entry = pci_arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(entry < 0L)) { if (unlikely(entry < 0L))
free_pages(first_page, order); goto arena_alloc_fail;
return NULL;
}
*dma_addrp = (iommu->page_table_map_base + *dma_addrp = (iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT)); (entry << IO_PAGE_SHIFT));
ret = (void *) first_page; ret = (void *) first_page;
first_page = __pa(first_page); first_page = __pa(first_page);
cpu = get_cpu(); local_irq_save(flags);
pglist = __get_cpu_var(iommu_pglists).pglist;
for (n = 0; n < npages; n++)
pglist[n] = first_page + (n * PAGE_SIZE);
do { pci_iommu_batch_start(pdev,
unsigned long num;
num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
npages,
(HV_PCI_MAP_ATTR_READ | (HV_PCI_MAP_ATTR_READ |
HV_PCI_MAP_ATTR_WRITE), HV_PCI_MAP_ATTR_WRITE),
__pa(pglist)); entry);
entry += num;
npages -= num; for (n = 0; n < npages; n++) {
pglist += num; long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
} while (npages != 0); if (unlikely(err < 0L))
goto iommu_map_fail;
}
if (unlikely(pci_iommu_batch_end() < 0L))
goto iommu_map_fail;
put_cpu(); local_irq_restore(flags);
return ret; return ret;
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock(&iommu->lock);
pci_arena_free(&iommu->arena, entry, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
arena_alloc_fail:
free_pages(first_page, order);
return NULL;
} }
static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
...@@ -186,15 +260,12 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, ...@@ -186,15 +260,12 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
struct pci_iommu *iommu; struct pci_iommu *iommu;
unsigned long flags, npages, oaddr; unsigned long flags, npages, oaddr;
unsigned long i, base_paddr; unsigned long i, base_paddr;
u32 devhandle, bus_addr, ret; u32 bus_addr, ret;
unsigned long prot; unsigned long prot;
long entry; long entry;
u64 *pglist;
int cpu;
pcp = pdev->sysdata; pcp = pdev->sysdata;
iommu = pcp->pbm->iommu; iommu = pcp->pbm->iommu;
devhandle = pcp->pbm->devhandle;
if (unlikely(direction == PCI_DMA_NONE)) if (unlikely(direction == PCI_DMA_NONE))
goto bad; goto bad;
...@@ -202,8 +273,6 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, ...@@ -202,8 +273,6 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
oaddr = (unsigned long)ptr; oaddr = (unsigned long)ptr;
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT; npages >>= IO_PAGE_SHIFT;
if (unlikely(npages > PGLIST_NENTS))
goto bad;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
entry = pci_arena_alloc(&iommu->arena, npages); entry = pci_arena_alloc(&iommu->arena, npages);
...@@ -220,24 +289,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, ...@@ -220,24 +289,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz,
if (direction != PCI_DMA_TODEVICE) if (direction != PCI_DMA_TODEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE; prot |= HV_PCI_MAP_ATTR_WRITE;
cpu = get_cpu(); local_irq_save(flags);
pglist = __get_cpu_var(iommu_pglists).pglist;
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE)
pglist[i] = base_paddr;
do { pci_iommu_batch_start(pdev, prot, entry);
unsigned long num;
num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
npages, prot, long err = pci_iommu_batch_add(base_paddr);
__pa(pglist)); if (unlikely(err < 0L))
entry += num; goto iommu_map_fail;
npages -= num; }
pglist += num; if (unlikely(pci_iommu_batch_end() < 0L))
} while (npages != 0); goto iommu_map_fail;
put_cpu(); local_irq_restore(flags);
return ret; return ret;
...@@ -245,6 +309,14 @@ bad: ...@@ -245,6 +309,14 @@ bad:
if (printk_ratelimit()) if (printk_ratelimit())
WARN_ON(1); WARN_ON(1);
return PCI_DMA_ERROR_CODE; return PCI_DMA_ERROR_CODE;
iommu_map_fail:
/* Interrupts are disabled. */
spin_lock(&iommu->lock);
pci_arena_free(&iommu->arena, entry, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
return PCI_DMA_ERROR_CODE;
} }
static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
...@@ -289,18 +361,19 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ ...@@ -289,18 +361,19 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_
#define SG_ENT_PHYS_ADDRESS(SG) \ #define SG_ENT_PHYS_ADDRESS(SG) \
(__pa(page_address((SG)->page)) + (SG)->offset) (__pa(page_address((SG)->page)) + (SG)->offset)
static inline void fill_sg(long entry, u32 devhandle, static inline long fill_sg(long entry, struct pci_dev *pdev,
struct scatterlist *sg, struct scatterlist *sg,
int nused, int nelems, unsigned long prot) int nused, int nelems, unsigned long prot)
{ {
struct scatterlist *dma_sg = sg; struct scatterlist *dma_sg = sg;
struct scatterlist *sg_end = sg + nelems; struct scatterlist *sg_end = sg + nelems;
int i, cpu, pglist_ent; unsigned long flags;
u64 *pglist; int i;
local_irq_save(flags);
pci_iommu_batch_start(pdev, prot, entry);
cpu = get_cpu();
pglist = __get_cpu_var(iommu_pglists).pglist;
pglist_ent = 0;
for (i = 0; i < nused; i++) { for (i = 0; i < nused; i++) {
unsigned long pteval = ~0UL; unsigned long pteval = ~0UL;
u32 dma_npages; u32 dma_npages;
...@@ -338,7 +411,12 @@ static inline void fill_sg(long entry, u32 devhandle, ...@@ -338,7 +411,12 @@ static inline void fill_sg(long entry, u32 devhandle,
pteval = (pteval & IOPTE_PAGE); pteval = (pteval & IOPTE_PAGE);
while (len > 0) { while (len > 0) {
pglist[pglist_ent++] = pteval; long err;
err = pci_iommu_batch_add(pteval);
if (unlikely(err < 0L))
goto iommu_map_failed;
pteval += IO_PAGE_SIZE; pteval += IO_PAGE_SIZE;
len -= (IO_PAGE_SIZE - offset); len -= (IO_PAGE_SIZE - offset);
offset = 0; offset = 0;
...@@ -366,18 +444,15 @@ static inline void fill_sg(long entry, u32 devhandle, ...@@ -366,18 +444,15 @@ static inline void fill_sg(long entry, u32 devhandle,
dma_sg++; dma_sg++;
} }
BUG_ON(pglist_ent == 0); if (unlikely(pci_iommu_batch_end() < 0L))
goto iommu_map_failed;
do {
unsigned long num;
num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), local_irq_restore(flags);
pglist_ent); return 0;
entry += num;
pglist_ent -= num;
} while (pglist_ent != 0);
put_cpu(); iommu_map_failed:
local_irq_restore(flags);
return -1L;
} }
static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
...@@ -385,9 +460,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n ...@@ -385,9 +460,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
struct pcidev_cookie *pcp; struct pcidev_cookie *pcp;
struct pci_iommu *iommu; struct pci_iommu *iommu;
unsigned long flags, npages, prot; unsigned long flags, npages, prot;
u32 devhandle, dma_base; u32 dma_base;
struct scatterlist *sgtmp; struct scatterlist *sgtmp;
long entry; long entry, err;
int used; int used;
/* Fast path single entry scatterlists. */ /* Fast path single entry scatterlists. */
...@@ -404,7 +479,6 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n ...@@ -404,7 +479,6 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
pcp = pdev->sysdata; pcp = pdev->sysdata;
iommu = pcp->pbm->iommu; iommu = pcp->pbm->iommu;
devhandle = pcp->pbm->devhandle;
if (unlikely(direction == PCI_DMA_NONE)) if (unlikely(direction == PCI_DMA_NONE))
goto bad; goto bad;
...@@ -441,7 +515,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n ...@@ -441,7 +515,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n
if (direction != PCI_DMA_TODEVICE) if (direction != PCI_DMA_TODEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE; prot |= HV_PCI_MAP_ATTR_WRITE;
fill_sg(entry, devhandle, sglist, used, nelems, prot); err = fill_sg(entry, pdev, sglist, used, nelems, prot);
if (unlikely(err < 0L))
goto iommu_map_failed;
return used; return used;
...@@ -449,6 +525,13 @@ bad: ...@@ -449,6 +525,13 @@ bad:
if (printk_ratelimit()) if (printk_ratelimit())
WARN_ON(1); WARN_ON(1);
return 0; return 0;
iommu_map_failed:
spin_lock_irqsave(&iommu->lock, flags);
pci_arena_free(&iommu->arena, entry, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
} }
static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
...@@ -1011,13 +1094,13 @@ void sun4v_pci_init(int node, char *model_name) ...@@ -1011,13 +1094,13 @@ void sun4v_pci_init(int node, char *model_name)
} }
} }
for (i = 0; i < NR_CPUS; i++) { for_each_cpu(i) {
unsigned long page = get_zeroed_page(GFP_ATOMIC); unsigned long page = get_zeroed_page(GFP_ATOMIC);
if (!page) if (!page)
goto fatal_memory_error; goto fatal_memory_error;
per_cpu(iommu_pglists, i).pglist = (u64 *) page; per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
} }
p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifndef _PCI_SUN4V_H #ifndef _PCI_SUN4V_H
#define _PCI_SUN4V_H #define _PCI_SUN4V_H
extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, extern long pci_sun4v_iommu_map(unsigned long devhandle,
unsigned long tsbid, unsigned long tsbid,
unsigned long num_ttes, unsigned long num_ttes,
unsigned long io_attributes, unsigned long io_attributes,
......
...@@ -11,14 +11,19 @@ ...@@ -11,14 +11,19 @@
* %o3: io_attributes * %o3: io_attributes
* %o4: io_page_list phys address * %o4: io_page_list phys address
* *
* returns %o0: num ttes mapped * returns %o0: -status if status was non-zero, else
* %o0: num pages mapped
*/ */
.globl pci_sun4v_iommu_map .globl pci_sun4v_iommu_map
pci_sun4v_iommu_map: pci_sun4v_iommu_map:
mov %o5, %g1
mov HV_FAST_PCI_IOMMU_MAP, %o5 mov HV_FAST_PCI_IOMMU_MAP, %o5
ta HV_FAST_TRAP ta HV_FAST_TRAP
retl brnz,pn %o0, 1f
sub %g0, %o0, %o0
mov %o1, %o0 mov %o1, %o0
1: retl
nop
/* %o0: devhandle /* %o0: devhandle
* %o1: tsbid * %o1: tsbid
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment