Commit 77afa904 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'dwmw2-iommu/master'

parents ae735d7c 074835f0
...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void) ...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void) void __init pci_swiotlb_init(void)
{ {
if (!iommu_detected || iommu_pass_through) { if (!iommu_detected) {
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
swiotlb = 1; swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
......
...@@ -46,8 +46,7 @@ void __init pci_swiotlb_init(void) ...@@ -46,8 +46,7 @@ void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
iommu_pass_through)
swiotlb = 1; swiotlb = 1;
#endif #endif
if (swiotlb_force) if (swiotlb_force)
......
...@@ -570,9 +570,6 @@ int __init dmar_table_init(void) ...@@ -570,9 +570,6 @@ int __init dmar_table_init(void)
printk(KERN_INFO PREFIX "No ATSR found\n"); printk(KERN_INFO PREFIX "No ATSR found\n");
#endif #endif
#ifdef CONFIG_INTR_REMAP
parse_ioapics_under_ir();
#endif
return 0; return 0;
} }
...@@ -632,20 +629,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -632,20 +629,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
/* Promote an attitude of violence to a BIOS engineer today */
WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
drhd->reg_base_addr,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
goto err_unmap;
}
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu); agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) { if (agaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n", "Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
msagaw = iommu_calculate_max_sagaw(iommu); msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) { if (msagaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)\n", "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
#endif #endif
iommu->agaw = agaw; iommu->agaw = agaw;
...@@ -665,7 +673,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -665,7 +673,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
} }
ver = readl(iommu->reg + DMAR_VER_REG); ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
(unsigned long long)drhd->reg_base_addr, (unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap, (unsigned long long)iommu->cap,
...@@ -675,7 +683,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -675,7 +683,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd->iommu = iommu; drhd->iommu = iommu;
return 0; return 0;
error:
err_unmap:
iounmap(iommu->reg);
error:
kfree(iommu); kfree(iommu);
return -1; return -1;
} }
...@@ -1212,7 +1223,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1212,7 +1223,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
source_id, guest_addr); source_id, guest_addr);
fault_index++; fault_index++;
if (fault_index > cap_num_fault_regs(iommu->cap)) if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0; fault_index = 0;
spin_lock_irqsave(&iommu->register_lock, flag); spin_lock_irqsave(&iommu->register_lock, flag);
} }
...@@ -1305,3 +1316,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu) ...@@ -1305,3 +1316,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
return 0; return 0;
} }
/*
* Check interrupt remapping support in DMAR table description.
*/
int dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
return dmar->flags & 0x1;
}
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/intel-iommu.h> #include <linux/intel-iommu.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/dmi.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include "pci.h" #include "pci.h"
...@@ -251,7 +252,8 @@ static inline int first_pte_in_page(struct dma_pte *pte) ...@@ -251,7 +252,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
* 2. It maps to each iommu if successful. * 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful. * 3. Each iommu mapps to this domain if successful.
*/ */
struct dmar_domain *si_domain; static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
/* devices under the same p2p bridge are owned in one domain */ /* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
...@@ -1157,6 +1159,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) ...@@ -1157,6 +1159,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
pr_debug("Number of Domains supportd <%ld>\n", ndomains); pr_debug("Number of Domains supportd <%ld>\n", ndomains);
nlongs = BITS_TO_LONGS(ndomains); nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
/* TBD: there might be 64K domains, /* TBD: there might be 64K domains,
* consider other allocation for future chip * consider other allocation for future chip
*/ */
...@@ -1169,12 +1173,9 @@ static int iommu_init_domains(struct intel_iommu *iommu) ...@@ -1169,12 +1173,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
GFP_KERNEL); GFP_KERNEL);
if (!iommu->domains) { if (!iommu->domains) {
printk(KERN_ERR "Allocating domain array failed\n"); printk(KERN_ERR "Allocating domain array failed\n");
kfree(iommu->domain_ids);
return -ENOMEM; return -ENOMEM;
} }
spin_lock_init(&iommu->lock);
/* /*
* if Caching mode is set, then invalid translations are tagged * if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it. * with domainid 0. Hence we need to pre-allocate it.
...@@ -1194,6 +1195,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) ...@@ -1194,6 +1195,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
int i; int i;
unsigned long flags; unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
for (; i < cap_ndoms(iommu->cap); ) { for (; i < cap_ndoms(iommu->cap); ) {
domain = iommu->domains[i]; domain = iommu->domains[i];
...@@ -1211,6 +1213,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) ...@@ -1211,6 +1213,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
i = find_next_bit(iommu->domain_ids, i = find_next_bit(iommu->domain_ids,
cap_ndoms(iommu->cap), i+1); cap_ndoms(iommu->cap), i+1);
} }
}
if (iommu->gcmd & DMA_GCMD_TE) if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu); iommu_disable_translation(iommu);
...@@ -1309,7 +1312,6 @@ static void iommu_detach_domain(struct dmar_domain *domain, ...@@ -1309,7 +1312,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
} }
static struct iova_domain reserved_iova_list; static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_alloc_key;
static struct lock_class_key reserved_rbtree_key; static struct lock_class_key reserved_rbtree_key;
static void dmar_init_reserved_ranges(void) static void dmar_init_reserved_ranges(void)
...@@ -1320,8 +1322,6 @@ static void dmar_init_reserved_ranges(void) ...@@ -1320,8 +1322,6 @@ static void dmar_init_reserved_ranges(void)
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
&reserved_alloc_key);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key); &reserved_rbtree_key);
...@@ -1958,13 +1958,34 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, ...@@ -1958,13 +1958,34 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
struct dmar_domain *domain; struct dmar_domain *domain;
int ret; int ret;
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
/* For _hardware_ passthrough, don't bother. But for software
passthrough, we do it anyway -- it may indicate a memory
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
return 0;
}
printk(KERN_INFO printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end); pci_name(pdev), start, end);
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (end >> agaw_to_width(domain->agaw)) {
if (!domain) WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
return -ENOMEM; "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
agaw_to_width(domain->agaw),
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
ret = -EIO;
goto error;
}
ret = iommu_domain_identity_map(domain, start, end); ret = iommu_domain_identity_map(domain, start, end);
if (ret) if (ret)
...@@ -2016,23 +2037,6 @@ static inline void iommu_prepare_isa(void) ...@@ -2016,23 +2037,6 @@ static inline void iommu_prepare_isa(void)
} }
#endif /* !CONFIG_DMAR_FLPY_WA */ #endif /* !CONFIG_DMAR_FLPY_WA */
/* Initialize each context entry as pass through.*/
static int __init init_context_pass_through(void)
{
struct pci_dev *pdev = NULL;
struct dmar_domain *domain;
int ret;
for_each_pci_dev(pdev) {
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
ret = domain_context_mapping(domain, pdev,
CONTEXT_TT_PASS_THROUGH);
if (ret)
return ret;
}
return 0;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width); static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_work_fn(unsigned long start_pfn, static int __init si_domain_work_fn(unsigned long start_pfn,
...@@ -2047,7 +2051,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn, ...@@ -2047,7 +2051,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
} }
static int si_domain_init(void) static int __init si_domain_init(int hw)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu; struct intel_iommu *iommu;
...@@ -2074,6 +2078,9 @@ static int si_domain_init(void) ...@@ -2074,6 +2078,9 @@ static int si_domain_init(void)
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
if (hw)
return 0;
for_each_online_node(nid) { for_each_online_node(nid) {
work_with_active_regions(nid, si_domain_work_fn, &ret); work_with_active_regions(nid, si_domain_work_fn, &ret);
if (ret) if (ret)
...@@ -2100,15 +2107,23 @@ static int identity_mapping(struct pci_dev *pdev) ...@@ -2100,15 +2107,23 @@ static int identity_mapping(struct pci_dev *pdev)
} }
static int domain_add_dev_info(struct dmar_domain *domain, static int domain_add_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev) struct pci_dev *pdev,
int translation)
{ {
struct device_domain_info *info; struct device_domain_info *info;
unsigned long flags; unsigned long flags;
int ret;
info = alloc_devinfo_mem(); info = alloc_devinfo_mem();
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
ret = domain_context_mapping(domain, pdev, translation);
if (ret) {
free_devinfo_mem(info);
return ret;
}
info->segment = pci_domain_nr(pdev->bus); info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number; info->bus = pdev->bus->number;
info->devfn = pdev->devfn; info->devfn = pdev->devfn;
...@@ -2165,27 +2180,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) ...@@ -2165,27 +2180,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
return 1; return 1;
} }
static int iommu_prepare_static_identity_mapping(void) static int __init iommu_prepare_static_identity_mapping(int hw)
{ {
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
int ret; int ret;
ret = si_domain_init(); ret = si_domain_init(hw);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
for_each_pci_dev(pdev) { for_each_pci_dev(pdev) {
if (iommu_should_identity_map(pdev, 1)) { if (iommu_should_identity_map(pdev, 1)) {
printk(KERN_INFO "IOMMU: identity mapping for device %s\n", printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
pci_name(pdev)); hw ? "hardware" : "software", pci_name(pdev));
ret = domain_context_mapping(si_domain, pdev, ret = domain_add_dev_info(si_domain, pdev,
hw ? CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL); CONTEXT_TT_MULTI_LEVEL);
if (ret) if (ret)
return ret; return ret;
ret = domain_add_dev_info(si_domain, pdev);
if (ret)
return ret;
} }
} }
...@@ -2199,14 +2212,6 @@ int __init init_dmars(void) ...@@ -2199,14 +2212,6 @@ int __init init_dmars(void)
struct pci_dev *pdev; struct pci_dev *pdev;
struct intel_iommu *iommu; struct intel_iommu *iommu;
int i, ret; int i, ret;
int pass_through = 1;
/*
* In case pass through can not be enabled, iommu tries to use identity
* mapping.
*/
if (iommu_pass_through)
iommu_identity_mapping = 1;
/* /*
* for each drhd * for each drhd
...@@ -2234,7 +2239,6 @@ int __init init_dmars(void) ...@@ -2234,7 +2239,6 @@ int __init init_dmars(void)
deferred_flush = kzalloc(g_num_of_iommus * deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL); sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) { if (!deferred_flush) {
kfree(g_iommus);
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
...@@ -2261,13 +2265,7 @@ int __init init_dmars(void) ...@@ -2261,13 +2265,7 @@ int __init init_dmars(void)
goto error; goto error;
} }
if (!ecap_pass_through(iommu->ecap)) if (!ecap_pass_through(iommu->ecap))
pass_through = 0; hw_pass_through = 0;
}
if (iommu_pass_through)
if (!pass_through) {
printk(KERN_INFO
"Pass Through is not supported by hardware.\n");
iommu_pass_through = 0;
} }
/* /*
...@@ -2323,30 +2321,24 @@ int __init init_dmars(void) ...@@ -2323,30 +2321,24 @@ int __init init_dmars(void)
} }
} }
/* if (iommu_pass_through)
* If pass through is set and enabled, context entries of all pci iommu_identity_mapping = 1;
* devices are intialized by pass through translation type. #ifdef CONFIG_DMAR_BROKEN_GFX_WA
*/ else
if (iommu_pass_through) { iommu_identity_mapping = 2;
ret = init_context_pass_through(); #endif
if (ret) {
printk(KERN_ERR "IOMMU: Pass through init failed.\n");
iommu_pass_through = 0;
}
}
/* /*
* If pass through is not set or not enabled, setup context entries for * If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static * identity mappings for rmrr, gfx, and isa and may fall back to static
* identity mapping if iommu_identity_mapping is set. * identity mapping if iommu_identity_mapping is set.
*/ */
if (!iommu_pass_through) { if (iommu_identity_mapping) {
#ifdef CONFIG_DMAR_BROKEN_GFX_WA ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (!iommu_identity_mapping) if (ret) {
iommu_identity_mapping = 2; printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
#endif goto error;
if (iommu_identity_mapping) }
iommu_prepare_static_identity_mapping(); }
/* /*
* For each rmrr * For each rmrr
* for each dev attached to rmrr * for each dev attached to rmrr
...@@ -2379,7 +2371,6 @@ int __init init_dmars(void) ...@@ -2379,7 +2371,6 @@ int __init init_dmars(void)
} }
iommu_prepare_isa(); iommu_prepare_isa();
}
/* /*
* for each drhd * for each drhd
...@@ -2454,8 +2445,7 @@ static struct iova *intel_alloc_iova(struct device *dev, ...@@ -2454,8 +2445,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
return iova; return iova;
} }
static struct dmar_domain * static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
get_valid_domain_for_dev(struct pci_dev *pdev)
{ {
struct dmar_domain *domain; struct dmar_domain *domain;
int ret; int ret;
...@@ -2483,6 +2473,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev) ...@@ -2483,6 +2473,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain; return domain;
} }
static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
{
struct device_domain_info *info;
/* No lock here, assumes no domain exit in normal case */
info = dev->dev.archdata.iommu;
if (likely(info))
return info->domain;
return __get_valid_domain_for_dev(dev);
}
static int iommu_dummy(struct pci_dev *pdev) static int iommu_dummy(struct pci_dev *pdev)
{ {
return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
...@@ -2525,10 +2527,10 @@ static int iommu_no_mapping(struct device *dev) ...@@ -2525,10 +2527,10 @@ static int iommu_no_mapping(struct device *dev)
*/ */
if (iommu_should_identity_map(pdev, 0)) { if (iommu_should_identity_map(pdev, 0)) {
int ret; int ret;
ret = domain_add_dev_info(si_domain, pdev); ret = domain_add_dev_info(si_domain, pdev,
if (ret) hw_pass_through ?
return 0; CONTEXT_TT_PASS_THROUGH :
ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); CONTEXT_TT_MULTI_LEVEL);
if (!ret) { if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n", printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev)); pci_name(pdev));
...@@ -2733,12 +2735,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -2733,12 +2735,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
} }
} }
static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
int dir)
{
intel_unmap_page(dev, dev_addr, size, dir, NULL);
}
static void *intel_alloc_coherent(struct device *hwdev, size_t size, static void *intel_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags) dma_addr_t *dma_handle, gfp_t flags)
{ {
...@@ -2771,7 +2767,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, ...@@ -2771,7 +2767,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, order); free_pages((unsigned long)vaddr, order);
} }
...@@ -2807,11 +2803,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, ...@@ -2807,11 +2803,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */ /* free page tables */
dma_pte_free_pagetable(domain, start_pfn, last_pfn); dma_pte_free_pagetable(domain, start_pfn, last_pfn);
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
(last_pfn - start_pfn + 1)); last_pfn - start_pfn + 1);
/* free iova */ /* free iova */
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
} else {
add_unmap(domain, iova);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
*/
}
} }
static int intel_nontranslate_map_sg(struct device *hddev, static int intel_nontranslate_map_sg(struct device *hddev,
...@@ -3194,7 +3197,7 @@ int __init intel_iommu_init(void) ...@@ -3194,7 +3197,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now. * Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping. * Above initialization will also be used by Interrupt-remapping.
*/ */
if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV; return -ENODEV;
iommu_init_mempool(); iommu_init_mempool();
...@@ -3214,14 +3217,7 @@ int __init intel_iommu_init(void) ...@@ -3214,14 +3217,7 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer); init_timer(&unmap_timer);
force_iommu = 1; force_iommu = 1;
if (!iommu_pass_through) {
printk(KERN_INFO
"Multi-level page-table translation for DMAR.\n");
dma_ops = &intel_dma_ops; dma_ops = &intel_dma_ops;
} else
printk(KERN_INFO
"DMAR: Pass through translation for DMAR.\n");
init_iommu_sysfs(); init_iommu_sysfs();
...@@ -3504,7 +3500,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -3504,7 +3500,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct intel_iommu *iommu; struct intel_iommu *iommu;
int addr_width; int addr_width;
u64 end; u64 end;
int ret;
/* normally pdev is not mapped */ /* normally pdev is not mapped */
if (unlikely(domain_context_mapped(pdev))) { if (unlikely(domain_context_mapped(pdev))) {
...@@ -3536,12 +3531,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -3536,12 +3531,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT; return -EFAULT;
} }
ret = domain_add_dev_info(dmar_domain, pdev); return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (ret)
return ret;
ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
return ret;
} }
static void intel_iommu_detach_device(struct iommu_domain *domain, static void intel_iommu_detach_device(struct iommu_domain *domain,
......
...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void) ...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void)
if (disable_intremap) if (disable_intremap)
return 0; return 0;
if (!dmar_ir_support())
return 0;
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim) ...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim)
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
int setup = 0; int setup = 0;
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
return -1;
}
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
void void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{ {
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock); spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT; iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL; iovad->cached32_node = NULL;
...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, unsigned long limit_pfn,
bool size_aligned) bool size_aligned)
{ {
unsigned long flags;
struct iova *new_iova; struct iova *new_iova;
int ret; int ret;
...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned) if (size_aligned)
size = __roundup_pow_of_two(size); size = __roundup_pow_of_two(size);
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned); new_iova, size_aligned);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) { if (ret) {
free_iova_mem(new_iova); free_iova_mem(new_iova);
return NULL; return NULL;
...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova; struct iova *iova;
unsigned int overlap = 0; unsigned int overlap = 0;
spin_lock_irqsave(&iovad->iova_alloc_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
spin_lock(&iovad->iova_rbtree_lock);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node); iova = container_of(node, struct iova, node);
...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi); iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish: finish:
spin_unlock(&iovad->iova_rbtree_lock); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
return iova; return iova;
} }
...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags; unsigned long flags;
struct rb_node *node; struct rb_node *node;
spin_lock_irqsave(&from->iova_alloc_lock, flags); spin_lock_irqsave(&from->iova_rbtree_lock, flags);
spin_lock(&from->iova_rbtree_lock);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova; struct iova *new_iova;
...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo); iova->pfn_lo, iova->pfn_lo);
} }
spin_unlock(&from->iova_rbtree_lock); spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
} }
...@@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) ...@@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev); quirk_usb_handoff_xhci(pdev);
} }
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, ...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
#endif #endif
...@@ -28,7 +28,6 @@ struct iova { ...@@ -28,7 +28,6 @@ struct iova {
/* holds all the iova translations for a domain */ /* holds all the iova translations for a domain */
struct iova_domain { struct iova_domain {
spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */ struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last alloced node */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment