Commit 206a73c1 authored by David Woodhouse's avatar David Woodhouse

intel-iommu: Kill superfluous mapping_lock

Since we're using cmpxchg64() anyway (because that's the only way to do
an atomic 64-bit store on i386), we might as well ditch the extra
locking and just use cmpxchg64() to ensure that we don't add the page
twice.
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent c85994e4
...@@ -267,7 +267,6 @@ struct dmar_domain { ...@@ -267,7 +267,6 @@ struct dmar_domain {
struct iova_domain iovad; /* iova's that belong to this domain */ struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */ struct dma_pte *pgd; /* virtual address */
spinlock_t mapping_lock; /* page table lock */
int gaw; /* max guest address width */ int gaw; /* max guest address width */
/* adjusted guest address width, 0 is level 2 30-bit */ /* adjusted guest address width, 0 is level 2 30-bit */
...@@ -701,13 +700,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -701,13 +700,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
struct dma_pte *parent, *pte = NULL; struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw); int level = agaw_to_level(domain->agaw);
int offset; int offset;
unsigned long flags;
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd; parent = domain->pgd;
spin_lock_irqsave(&domain->mapping_lock, flags);
while (level > 0) { while (level > 0) {
void *tmp_page; void *tmp_page;
...@@ -721,11 +718,9 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -721,11 +718,9 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
tmp_page = alloc_pgtable_page(); tmp_page = alloc_pgtable_page();
if (!tmp_page) { if (!tmp_page)
spin_unlock_irqrestore(&domain->mapping_lock,
flags);
return NULL; return NULL;
}
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (cmpxchg64(&pte->val, 0ULL, pteval)) { if (cmpxchg64(&pte->val, 0ULL, pteval)) {
...@@ -740,7 +735,6 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -740,7 +735,6 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
level--; level--;
} }
spin_unlock_irqrestore(&domain->mapping_lock, flags);
return pte; return pte;
} }
...@@ -1375,7 +1369,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width) ...@@ -1375,7 +1369,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
unsigned long sagaw; unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN); init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock); spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain); domain_reserve_special_ranges(domain);
...@@ -3336,7 +3329,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) ...@@ -3336,7 +3329,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width; int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN); init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
spin_lock_init(&domain->mapping_lock);
spin_lock_init(&domain->iommu_lock); spin_lock_init(&domain->iommu_lock);
domain_reserve_special_ranges(domain); domain_reserve_special_ranges(domain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment