Commit f76aec76 authored by Keshavamurthy, Anil S's avatar Keshavamurthy, Anil S Committed by Linus Torvalds

intel-iommu: optimize sg map/unmap calls

This patch adds PageSelectiveInvalidation support replacing existing
DomainSelectiveInvalidation for intel_{map/unmap}_sg() calls and also
enables to mapping one big contiguous DMA virtual address which is mapped
to discontiguous physical address for SG map/unmap calls.

"Doamin selective invalidations" wipes out the IOMMU address translation
cache based on domain ID where as "Page selective invalidations" wipes out
the IOMMU address translation cache for that address mask range which is
more cache friendly when compared to Domain selective invalidations.

Here is how it is done.
1) changes to iova.c
alloc_iova() now takes a bool size_aligned argument, which
when when set, returns the io virtual address that is
naturally aligned to 2 ^ x, where x is the order
of the size requested.

Returning this io vitual address which is naturally
aligned helps iommu to do the "page selective
invalidations" which is IOMMU cache friendly
over "domain selective invalidations".

2) Changes to driver/pci/intel-iommu.c
Clean up intel_{map/unmap}_{single/sg} () calls so that
s/g map/unamp calls is no more dependent on
intel_{map/unmap}_single()

intel_map_sg() now computes the total DMA virtual address
required and allocates the size aligned total DMA virtual address
and maps the discontiguous physical address to the allocated
contiguous DMA virtual address.

In the intel_unmap_sg() case since the DMA virtual address
is contiguous and size_aligned, PageSelectiveInvalidation
is used replacing earlier DomainSelectiveInvalidations.
Signed-off-by: default avatarAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: Greg KH <greg@kroah.com>
Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Suresh B <suresh.b.siddha@intel.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 49a0429e
This diff is collapsed.
...@@ -57,12 +57,28 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) ...@@ -57,12 +57,28 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
iovad->cached32_node = rb_next(&free->node); iovad->cached32_node = rb_next(&free->node);
} }
static int __alloc_iova_range(struct iova_domain *iovad, /* Computes the padding size required, to make the
unsigned long size, unsigned long limit_pfn, struct iova *new) * the start address naturally aligned on its size
*/
static int
iova_get_pad_size(int size, unsigned int limit_pfn)
{
unsigned int pad_size = 0;
unsigned int order = ilog2(size);
if (order)
pad_size = (limit_pfn + 1) % (1 << order);
return pad_size;
}
static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, struct iova *new, bool size_aligned)
{ {
struct rb_node *curr = NULL; struct rb_node *curr = NULL;
unsigned long flags; unsigned long flags;
unsigned long saved_pfn; unsigned long saved_pfn;
unsigned int pad_size = 0;
/* Walk the tree backwards */ /* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
...@@ -72,22 +88,32 @@ static int __alloc_iova_range(struct iova_domain *iovad, ...@@ -72,22 +88,32 @@ static int __alloc_iova_range(struct iova_domain *iovad,
struct iova *curr_iova = container_of(curr, struct iova, node); struct iova *curr_iova = container_of(curr, struct iova, node);
if (limit_pfn < curr_iova->pfn_lo) if (limit_pfn < curr_iova->pfn_lo)
goto move_left; goto move_left;
if (limit_pfn < curr_iova->pfn_hi) else if (limit_pfn < curr_iova->pfn_hi)
goto adjust_limit_pfn; goto adjust_limit_pfn;
if ((curr_iova->pfn_hi + size) <= limit_pfn) else {
break; /* found a free slot */ if (size_aligned)
pad_size = iova_get_pad_size(size, limit_pfn);
if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
break; /* found a free slot */
}
adjust_limit_pfn: adjust_limit_pfn:
limit_pfn = curr_iova->pfn_lo - 1; limit_pfn = curr_iova->pfn_lo - 1;
move_left: move_left:
curr = rb_prev(curr); curr = rb_prev(curr);
} }
if ((!curr) && !(IOVA_START_PFN + size <= limit_pfn)) { if (!curr) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); if (size_aligned)
return -ENOMEM; pad_size = iova_get_pad_size(size, limit_pfn);
if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
} }
new->pfn_hi = limit_pfn;
new->pfn_lo = limit_pfn - size + 1; /* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = limit_pfn - (size + pad_size) + 1;
new->pfn_hi = new->pfn_lo + size - 1;
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return 0; return 0;
...@@ -119,12 +145,16 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) ...@@ -119,12 +145,16 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
* @iovad - iova domain in question * @iovad - iova domain in question
* @size - size of page frames to allocate * @size - size of page frames to allocate
* @limit_pfn - max limit address * @limit_pfn - max limit address
* @size_aligned - set if size_aligned address range is required
* This function allocates an iova in the range limit_pfn to IOVA_START_PFN * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
* looking from limit_pfn instead from IOVA_START_PFN. * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
* flag is set then the allocated address iova->pfn_lo will be naturally
* aligned on roundup_power_of_two(size).
*/ */
struct iova * struct iova *
alloc_iova(struct iova_domain *iovad, unsigned long size, alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn) unsigned long limit_pfn,
bool size_aligned)
{ {
unsigned long flags; unsigned long flags;
struct iova *new_iova; struct iova *new_iova;
...@@ -134,8 +164,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -134,8 +164,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (!new_iova) if (!new_iova)
return NULL; return NULL;
/* If size aligned is set then round the size to
* to next power of two.
*/
if (size_aligned)
size = __roundup_pow_of_two(size);
spin_lock_irqsave(&iovad->iova_alloc_lock, flags); spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova); ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
size_aligned);
if (ret) { if (ret) {
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags); spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
......
...@@ -51,7 +51,8 @@ void free_iova_mem(struct iova *iova); ...@@ -51,7 +51,8 @@ void free_iova_mem(struct iova *iova);
void free_iova(struct iova_domain *iovad, unsigned long pfn); void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova); void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn); unsigned long limit_pfn,
bool size_aligned);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi); unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment