Commit 77afa904 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'dwmw2-iommu/master'

parents ae735d7c 074835f0
...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void) ...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void) void __init pci_swiotlb_init(void)
{ {
if (!iommu_detected || iommu_pass_through) { if (!iommu_detected) {
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
swiotlb = 1; swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
......
...@@ -46,9 +46,8 @@ void __init pci_swiotlb_init(void) ...@@ -46,9 +46,8 @@ void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
iommu_pass_through) swiotlb = 1;
swiotlb = 1;
#endif #endif
if (swiotlb_force) if (swiotlb_force)
swiotlb = 1; swiotlb = 1;
......
...@@ -570,9 +570,6 @@ int __init dmar_table_init(void) ...@@ -570,9 +570,6 @@ int __init dmar_table_init(void)
printk(KERN_INFO PREFIX "No ATSR found\n"); printk(KERN_INFO PREFIX "No ATSR found\n");
#endif #endif
#ifdef CONFIG_INTR_REMAP
parse_ioapics_under_ir();
#endif
return 0; return 0;
} }
...@@ -632,20 +629,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -632,20 +629,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
/* Promote an attitude of violence to a BIOS engineer today */
WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
drhd->reg_base_addr,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
goto err_unmap;
}
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu); agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) { if (agaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n", "Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
msagaw = iommu_calculate_max_sagaw(iommu); msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) { if (msagaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)\n", "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
#endif #endif
iommu->agaw = agaw; iommu->agaw = agaw;
...@@ -665,7 +673,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -665,7 +673,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
} }
ver = readl(iommu->reg + DMAR_VER_REG); ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
(unsigned long long)drhd->reg_base_addr, (unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap, (unsigned long long)iommu->cap,
...@@ -675,7 +683,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -675,7 +683,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd->iommu = iommu; drhd->iommu = iommu;
return 0; return 0;
error:
err_unmap:
iounmap(iommu->reg);
error:
kfree(iommu); kfree(iommu);
return -1; return -1;
} }
...@@ -1212,7 +1223,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1212,7 +1223,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
source_id, guest_addr); source_id, guest_addr);
fault_index++; fault_index++;
if (fault_index > cap_num_fault_regs(iommu->cap)) if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0; fault_index = 0;
spin_lock_irqsave(&iommu->register_lock, flag); spin_lock_irqsave(&iommu->register_lock, flag);
} }
...@@ -1305,3 +1316,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu) ...@@ -1305,3 +1316,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
return 0; return 0;
} }
/*
* Check interrupt remapping support in DMAR table description.
*/
int dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
return dmar->flags & 0x1;
}
This diff is collapsed.
...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void) ...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void)
if (disable_intremap) if (disable_intremap)
return 0; return 0;
if (!dmar_ir_support())
return 0;
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim) ...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim)
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
int setup = 0; int setup = 0;
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
return -1;
}
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
void void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{ {
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock); spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT; iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL; iovad->cached32_node = NULL;
...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, unsigned long limit_pfn,
bool size_aligned) bool size_aligned)
{ {
unsigned long flags;
struct iova *new_iova; struct iova *new_iova;
int ret; int ret;
...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned) if (size_aligned)
size = __roundup_pow_of_two(size); size = __roundup_pow_of_two(size);
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned); new_iova, size_aligned);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) { if (ret) {
free_iova_mem(new_iova); free_iova_mem(new_iova);
return NULL; return NULL;
...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova; struct iova *iova;
unsigned int overlap = 0; unsigned int overlap = 0;
spin_lock_irqsave(&iovad->iova_alloc_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
spin_lock(&iovad->iova_rbtree_lock);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node); iova = container_of(node, struct iova, node);
...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi); iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish: finish:
spin_unlock(&iovad->iova_rbtree_lock); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
return iova; return iova;
} }
...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags; unsigned long flags;
struct rb_node *node; struct rb_node *node;
spin_lock_irqsave(&from->iova_alloc_lock, flags); spin_lock_irqsave(&from->iova_rbtree_lock, flags);
spin_lock(&from->iova_rbtree_lock);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova; struct iova *new_iova;
...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo); iova->pfn_lo, iova->pfn_lo);
} }
spin_unlock(&from->iova_rbtree_lock); spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
} }
...@@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) ...@@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev); quirk_usb_handoff_xhci(pdev);
} }
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, ...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
#endif #endif
...@@ -28,7 +28,6 @@ struct iova { ...@@ -28,7 +28,6 @@ struct iova {
/* holds all the iova translations for a domain */ /* holds all the iova translations for a domain */
struct iova_domain { struct iova_domain {
spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */ struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last alloced node */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment