Commit 46663448 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Linus Torvalds

iommu: parisc: make the IOMMUs respect the segment boundary limits

Make PARISC's two IOMMU implementations not allocate a memory area spanning
LLD's segment boundary.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Kyle McMartin <kyle@parisc-linux.org>
Cc: Matthew Wilcox <matthew@wil.cx>
Cc: Grant Grundler <grundler@parisc-linux.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7c8cda62
...@@ -103,6 +103,11 @@ config IOMMU_SBA ...@@ -103,6 +103,11 @@ config IOMMU_SBA
depends on PCI_LBA depends on PCI_LBA
default PCI_LBA default PCI_LBA
config IOMMU_HELPER
bool
depends on IOMMU_SBA || IOMMU_CCIO
default y
#config PCI_EPIC #config PCI_EPIC
# bool "EPIC/SAGA PCI support" # bool "EPIC/SAGA PCI support"
# depends on PCI # depends on PCI
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/cache.h> /* for L1_CACHE_BYTES */ #include <asm/cache.h> /* for L1_CACHE_BYTES */
...@@ -302,13 +303,17 @@ static int ioc_count; ...@@ -302,13 +303,17 @@ static int ioc_count;
*/ */
#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
for(; res_ptr < res_end; ++res_ptr) { \ for(; res_ptr < res_end; ++res_ptr) { \
if(0 == (*res_ptr & mask)) { \ int ret;\
*res_ptr |= mask; \ unsigned int idx;\
res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
ioc->res_hint = res_idx + (size >> 3); \ ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
goto resource_found; \ if ((0 == (*res_ptr & mask)) && !ret) { \
} \ *res_ptr |= mask; \
} res_idx = idx;\
ioc->res_hint = res_idx + (size >> 3); \
goto resource_found; \
} \
}
#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
...@@ -345,6 +350,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) ...@@ -345,6 +350,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
{ {
unsigned int pages_needed = size >> IOVP_SHIFT; unsigned int pages_needed = size >> IOVP_SHIFT;
unsigned int res_idx; unsigned int res_idx;
unsigned long boundary_size;
#ifdef CCIO_SEARCH_TIME #ifdef CCIO_SEARCH_TIME
unsigned long cr_start = mfctl(16); unsigned long cr_start = mfctl(16);
#endif #endif
...@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) ...@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
** ggg sacrifices another 710 to the computer gods. ** ggg sacrifices another 710 to the computer gods.
*/ */
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT);
boundary_size >>= IOVP_SHIFT;
if (pages_needed <= 8) { if (pages_needed <= 8) {
/* /*
* LAN traffic will not thrash the TLB IFF the same NIC * LAN traffic will not thrash the TLB IFF the same NIC
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) ...@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
unsigned int bitshiftcnt)
{
return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
+ bitshiftcnt;
}
/** /**
* sba_search_bitmap - find free space in IO PDIR resource bitmap * sba_search_bitmap - find free space in IO PDIR resource bitmap
...@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) ...@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
* Cool perf optimization: search for log2(size) bits at a time. * Cool perf optimization: search for log2(size) bits at a time.
*/ */
static SBA_INLINE unsigned long static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) sba_search_bitmap(struct ioc *ioc, struct device *dev,
unsigned long bits_wanted)
{ {
unsigned long *res_ptr = ioc->res_hint; unsigned long *res_ptr = ioc->res_hint;
unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
unsigned long pide = ~0UL; unsigned long pide = ~0UL, tpide;
unsigned long boundary_size;
unsigned long shift;
int ret;
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT);
boundary_size >>= IOVP_SHIFT;
#if defined(ZX1_SUPPORT)
BUG_ON(ioc->ibase & ~IOVP_MASK);
shift = ioc->ibase >> IOVP_SHIFT;
#else
shift = 0;
#endif
if (bits_wanted > (BITS_PER_LONG/2)) { if (bits_wanted > (BITS_PER_LONG/2)) {
/* Search word at a time - no mask needed */ /* Search word at a time - no mask needed */
for(; res_ptr < res_end; ++res_ptr) { for(; res_ptr < res_end; ++res_ptr) {
if (*res_ptr == 0) { tpide = ptr_to_pide(ioc, res_ptr, 0);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift,
boundary_size);
if ((*res_ptr == 0) && !ret) {
*res_ptr = RESMAP_MASK(bits_wanted); *res_ptr = RESMAP_MASK(bits_wanted);
pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); pide = tpide;
pide <<= 3; /* convert to bit address */
break; break;
} }
} }
...@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) ...@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
{ {
DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
WARN_ON(mask == 0); WARN_ON(mask == 0);
if(((*res_ptr) & mask) == 0) { tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
ret = iommu_is_span_boundary(tpide, bits_wanted,
shift,
boundary_size);
if ((((*res_ptr) & mask) == 0) && !ret) {
*res_ptr |= mask; /* mark resources busy! */ *res_ptr |= mask; /* mark resources busy! */
pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); pide = tpide;
pide <<= 3; /* convert to bit address */
pide += bitshiftcnt;
break; break;
} }
mask >>= o; mask >>= o;
...@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) ...@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
#endif #endif
unsigned long pide; unsigned long pide;
pide = sba_search_bitmap(ioc, pages_needed); pide = sba_search_bitmap(ioc, dev, pages_needed);
if (pide >= (ioc->res_size << 3)) { if (pide >= (ioc->res_size << 3)) {
pide = sba_search_bitmap(ioc, pages_needed); pide = sba_search_bitmap(ioc, dev, pages_needed);
if (pide >= (ioc->res_size << 3)) if (pide >= (ioc->res_size << 3))
panic("%s: I/O MMU @ %p is out of mapping resources\n", panic("%s: I/O MMU @ %p is out of mapping resources\n",
__FILE__, ioc->ioc_hpa); __FILE__, ioc->ioc_hpa);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment