Commit ff1649ff authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/~dwmw2/iommu-2.6.31

* git://git.infradead.org/~dwmw2/iommu-2.6.31:
  intel-iommu: Fix enabling snooping feature by mistake
  intel-iommu: Mask physical address to correct page size in intel_map_single()
  intel-iommu: Correct sglist size calculation.
parents da758dde c5b15255
...@@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, ...@@ -1505,7 +1505,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
} }
set_bit(num, iommu->domain_ids); set_bit(num, iommu->domain_ids);
set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain; iommu->domains[num] = domain;
id = num; id = num;
} }
...@@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev) ...@@ -1648,6 +1647,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
tmp->devfn); tmp->devfn);
} }
/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr,
size_t size)
{
host_addr &= ~PAGE_MASK;
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn, struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot) unsigned long nr_pages, int prot)
...@@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -1675,7 +1682,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
uint64_t tmp; uint64_t tmp;
if (!sg_res) { if (!sg_res) {
sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length; sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot; pteval = page_to_phys(sg_page(sg)) | prot;
...@@ -2415,14 +2422,6 @@ error: ...@@ -2415,14 +2422,6 @@ error:
return ret; return ret;
} }
/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr,
size_t size)
{
host_addr &= ~PAGE_MASK;
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
/* This takes a number of _MM_ pages, not VTD pages */ /* This takes a number of _MM_ pages, not VTD pages */
static struct iova *intel_alloc_iova(struct device *dev, static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain, struct dmar_domain *domain,
...@@ -2551,6 +2550,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, ...@@ -2551,6 +2550,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
int prot = 0; int prot = 0;
int ret; int ret;
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
...@@ -2585,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, ...@@ -2585,7 +2585,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
* is not a big problem * is not a big problem
*/ */
ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
paddr >> VTD_PAGE_SHIFT, size, prot); mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret) if (ret)
goto error; goto error;
...@@ -2875,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne ...@@ -2875,7 +2875,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
start_vpfn = mm_to_dma_pfn(iova->pfn_lo); start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) { if (unlikely(ret)) {
/* clear the page */ /* clear the page */
dma_pte_clear_range(domain, start_vpfn, dma_pte_clear_range(domain, start_vpfn,
...@@ -3408,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) ...@@ -3408,6 +3408,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_count = 0; domain->iommu_count = 0;
domain->iommu_coherency = 0; domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
domain->max_addr = 0; domain->max_addr = 0;
/* always allocate the top pgd */ /* always allocate the top pgd */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment