Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
77afa904
Commit
77afa904
authored
Sep 15, 2009
by
Stephen Rothwell
Browse files
Options
Browse Files
Download
Plain Diff
Merge commit 'dwmw2-iommu/master'
parents
ae735d7c
074835f0
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
187 additions
and
176 deletions
+187
-176
arch/ia64/kernel/pci-swiotlb.c
arch/ia64/kernel/pci-swiotlb.c
+1
-1
arch/x86/kernel/pci-swiotlb.c
arch/x86/kernel/pci-swiotlb.c
+2
-3
drivers/pci/dmar.c
drivers/pci/dmar.c
+29
-8
drivers/pci/intel-iommu.c
drivers/pci/intel-iommu.c
+140
-150
drivers/pci/intr_remapping.c
drivers/pci/intr_remapping.c
+8
-0
drivers/pci/iova.c
drivers/pci/iova.c
+4
-12
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.c
+1
-1
include/linux/intel-iommu.h
include/linux/intel-iommu.h
+2
-0
include/linux/iova.h
include/linux/iova.h
+0
-1
No files found.
arch/ia64/kernel/pci-swiotlb.c
View file @
77afa904
...
...
@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void
__init
pci_swiotlb_init
(
void
)
{
if
(
!
iommu_detected
||
iommu_pass_through
)
{
if
(
!
iommu_detected
)
{
#ifdef CONFIG_IA64_GENERIC
swiotlb
=
1
;
printk
(
KERN_INFO
"PCI-DMA: Re-initialize machine vector.
\n
"
);
...
...
arch/x86/kernel/pci-swiotlb.c
View file @
77afa904
...
...
@@ -46,9 +46,8 @@ void __init pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
if
((
!
iommu_detected
&&
!
no_iommu
&&
max_pfn
>
MAX_DMA32_PFN
)
||
iommu_pass_through
)
swiotlb
=
1
;
if
((
!
iommu_detected
&&
!
no_iommu
&&
max_pfn
>
MAX_DMA32_PFN
))
swiotlb
=
1
;
#endif
if
(
swiotlb_force
)
swiotlb
=
1
;
...
...
drivers/pci/dmar.c
View file @
77afa904
...
...
@@ -570,9 +570,6 @@ int __init dmar_table_init(void)
printk
(
KERN_INFO
PREFIX
"No ATSR found
\n
"
);
#endif
#ifdef CONFIG_INTR_REMAP
parse_ioapics_under_ir
();
#endif
return
0
;
}
...
...
@@ -632,20 +629,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu
->
cap
=
dmar_readq
(
iommu
->
reg
+
DMAR_CAP_REG
);
iommu
->
ecap
=
dmar_readq
(
iommu
->
reg
+
DMAR_ECAP_REG
);
if
(
iommu
->
cap
==
(
uint64_t
)
-
1
&&
iommu
->
ecap
==
(
uint64_t
)
-
1
)
{
/* Promote an attitude of violence to a BIOS engineer today */
WARN
(
1
,
"Your BIOS is broken; DMAR reported at address %llx returns all ones!
\n
"
"BIOS vendor: %s; Ver: %s; Product Version: %s
\n
"
,
drhd
->
reg_base_addr
,
dmi_get_system_info
(
DMI_BIOS_VENDOR
),
dmi_get_system_info
(
DMI_BIOS_VERSION
),
dmi_get_system_info
(
DMI_PRODUCT_VERSION
));
goto
err_unmap
;
}
#ifdef CONFIG_DMAR
agaw
=
iommu_calculate_agaw
(
iommu
);
if
(
agaw
<
0
)
{
printk
(
KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)
\n
"
,
iommu
->
seq_id
);
goto
err
or
;
goto
err
_unmap
;
}
msagaw
=
iommu_calculate_max_sagaw
(
iommu
);
if
(
msagaw
<
0
)
{
printk
(
KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)
\n
"
,
iommu
->
seq_id
);
goto
err
or
;
goto
err
_unmap
;
}
#endif
iommu
->
agaw
=
agaw
;
...
...
@@ -665,7 +673,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
}
ver
=
readl
(
iommu
->
reg
+
DMAR_VER_REG
);
pr_
debug
(
"IOMMU %llx: ver %d:%d cap %llx ecap %llx
\n
"
,
pr_
info
(
"IOMMU %llx: ver %d:%d cap %llx ecap %llx
\n
"
,
(
unsigned
long
long
)
drhd
->
reg_base_addr
,
DMAR_VER_MAJOR
(
ver
),
DMAR_VER_MINOR
(
ver
),
(
unsigned
long
long
)
iommu
->
cap
,
...
...
@@ -675,7 +683,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd
->
iommu
=
iommu
;
return
0
;
error:
err_unmap:
iounmap
(
iommu
->
reg
);
error:
kfree
(
iommu
);
return
-
1
;
}
...
...
@@ -1212,7 +1223,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
source_id
,
guest_addr
);
fault_index
++
;
if
(
fault_index
>
cap_num_fault_regs
(
iommu
->
cap
))
if
(
fault_index
>
=
cap_num_fault_regs
(
iommu
->
cap
))
fault_index
=
0
;
spin_lock_irqsave
(
&
iommu
->
register_lock
,
flag
);
}
...
...
@@ -1305,3 +1316,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
return
0
;
}
/*
* Check interrupt remapping support in DMAR table description.
*/
int
dmar_ir_support
(
void
)
{
struct
acpi_table_dmar
*
dmar
;
dmar
=
(
struct
acpi_table_dmar
*
)
dmar_tbl
;
return
dmar
->
flags
&
0x1
;
}
drivers/pci/intel-iommu.c
View file @
77afa904
...
...
@@ -37,6 +37,7 @@
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/sysdev.h>
#include <linux/dmi.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "pci.h"
...
...
@@ -251,7 +252,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
* 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful.
*/
struct
dmar_domain
*
si_domain
;
static
struct
dmar_domain
*
si_domain
;
static
int
hw_pass_through
=
1
;
/* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
...
...
@@ -1157,6 +1159,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
pr_debug
(
"Number of Domains supportd <%ld>
\n
"
,
ndomains
);
nlongs
=
BITS_TO_LONGS
(
ndomains
);
spin_lock_init
(
&
iommu
->
lock
);
/* TBD: there might be 64K domains,
* consider other allocation for future chip
*/
...
...
@@ -1169,12 +1173,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
GFP_KERNEL
);
if
(
!
iommu
->
domains
)
{
printk
(
KERN_ERR
"Allocating domain array failed
\n
"
);
kfree
(
iommu
->
domain_ids
);
return
-
ENOMEM
;
}
spin_lock_init
(
&
iommu
->
lock
);
/*
* if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it.
...
...
@@ -1194,22 +1195,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
int
i
;
unsigned
long
flags
;
i
=
find_first_bit
(
iommu
->
domain_ids
,
cap_ndoms
(
iommu
->
cap
));
for
(;
i
<
cap_ndoms
(
iommu
->
cap
);
)
{
domain
=
iommu
->
domains
[
i
];
clear_bit
(
i
,
iommu
->
domain_ids
);
if
((
iommu
->
domains
)
&&
(
iommu
->
domain_ids
))
{
i
=
find_first_bit
(
iommu
->
domain_ids
,
cap_ndoms
(
iommu
->
cap
));
for
(;
i
<
cap_ndoms
(
iommu
->
cap
);
)
{
domain
=
iommu
->
domains
[
i
];
clear_bit
(
i
,
iommu
->
domain_ids
);
spin_lock_irqsave
(
&
domain
->
iommu_lock
,
flags
);
if
(
--
domain
->
iommu_count
==
0
)
{
if
(
domain
->
flags
&
DOMAIN_FLAG_VIRTUAL_MACHINE
)
vm_domain_exit
(
domain
);
else
domain_exit
(
domain
);
}
spin_unlock_irqrestore
(
&
domain
->
iommu_lock
,
flags
);
spin_lock_irqsave
(
&
domain
->
iommu_lock
,
flags
);
if
(
--
domain
->
iommu_count
==
0
)
{
if
(
domain
->
flags
&
DOMAIN_FLAG_VIRTUAL_MACHINE
)
vm_domain_exit
(
domain
);
else
domain_exit
(
domain
);
i
=
find_next_bit
(
iommu
->
domain_ids
,
cap_ndoms
(
iommu
->
cap
),
i
+
1
);
}
spin_unlock_irqrestore
(
&
domain
->
iommu_lock
,
flags
);
i
=
find_next_bit
(
iommu
->
domain_ids
,
cap_ndoms
(
iommu
->
cap
),
i
+
1
);
}
if
(
iommu
->
gcmd
&
DMA_GCMD_TE
)
...
...
@@ -1309,7 +1312,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
}
static
struct
iova_domain
reserved_iova_list
;
static
struct
lock_class_key
reserved_alloc_key
;
static
struct
lock_class_key
reserved_rbtree_key
;
static
void
dmar_init_reserved_ranges
(
void
)
...
...
@@ -1320,8 +1322,6 @@ static void dmar_init_reserved_ranges(void)
init_iova_domain
(
&
reserved_iova_list
,
DMA_32BIT_PFN
);
lockdep_set_class
(
&
reserved_iova_list
.
iova_alloc_lock
,
&
reserved_alloc_key
);
lockdep_set_class
(
&
reserved_iova_list
.
iova_rbtree_lock
,
&
reserved_rbtree_key
);
...
...
@@ -1958,14 +1958,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
struct
dmar_domain
*
domain
;
int
ret
;
printk
(
KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]
\n
"
,
pci_name
(
pdev
),
start
,
end
);
domain
=
get_domain_for_dev
(
pdev
,
DEFAULT_DOMAIN_ADDRESS_WIDTH
);
if
(
!
domain
)
return
-
ENOMEM
;
/* For _hardware_ passthrough, don't bother. But for software
passthrough, we do it anyway -- it may indicate a memory
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if
(
domain
==
si_domain
&&
hw_pass_through
)
{
printk
(
"Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]
\n
"
,
pci_name
(
pdev
),
start
,
end
);
return
0
;
}
printk
(
KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]
\n
"
,
pci_name
(
pdev
),
start
,
end
);
if
(
end
>>
agaw_to_width
(
domain
->
agaw
))
{
WARN
(
1
,
"Your BIOS is broken; RMRR exceeds permitted address width (%d bits)
\n
"
"BIOS vendor: %s; Ver: %s; Product Version: %s
\n
"
,
agaw_to_width
(
domain
->
agaw
),
dmi_get_system_info
(
DMI_BIOS_VENDOR
),
dmi_get_system_info
(
DMI_BIOS_VERSION
),
dmi_get_system_info
(
DMI_PRODUCT_VERSION
));
ret
=
-
EIO
;
goto
error
;
}
ret
=
iommu_domain_identity_map
(
domain
,
start
,
end
);
if
(
ret
)
goto
error
;
...
...
@@ -2016,23 +2037,6 @@ static inline void iommu_prepare_isa(void)
}
#endif
/* !CONFIG_DMAR_FLPY_WA */
/* Initialize each context entry as pass through.*/
static
int
__init
init_context_pass_through
(
void
)
{
struct
pci_dev
*
pdev
=
NULL
;
struct
dmar_domain
*
domain
;
int
ret
;
for_each_pci_dev
(
pdev
)
{
domain
=
get_domain_for_dev
(
pdev
,
DEFAULT_DOMAIN_ADDRESS_WIDTH
);
ret
=
domain_context_mapping
(
domain
,
pdev
,
CONTEXT_TT_PASS_THROUGH
);
if
(
ret
)
return
ret
;
}
return
0
;
}
static
int
md_domain_init
(
struct
dmar_domain
*
domain
,
int
guest_width
);
static
int
__init
si_domain_work_fn
(
unsigned
long
start_pfn
,
...
...
@@ -2047,7 +2051,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
}
static
int
si_domain_init
(
void
)
static
int
__init
si_domain_init
(
int
hw
)
{
struct
dmar_drhd_unit
*
drhd
;
struct
intel_iommu
*
iommu
;
...
...
@@ -2074,6 +2078,9 @@ static int si_domain_init(void)
si_domain
->
flags
=
DOMAIN_FLAG_STATIC_IDENTITY
;
if
(
hw
)
return
0
;
for_each_online_node
(
nid
)
{
work_with_active_regions
(
nid
,
si_domain_work_fn
,
&
ret
);
if
(
ret
)
...
...
@@ -2100,15 +2107,23 @@ static int identity_mapping(struct pci_dev *pdev)
}
static
int
domain_add_dev_info
(
struct
dmar_domain
*
domain
,
struct
pci_dev
*
pdev
)
struct
pci_dev
*
pdev
,
int
translation
)
{
struct
device_domain_info
*
info
;
unsigned
long
flags
;
int
ret
;
info
=
alloc_devinfo_mem
();
if
(
!
info
)
return
-
ENOMEM
;
ret
=
domain_context_mapping
(
domain
,
pdev
,
translation
);
if
(
ret
)
{
free_devinfo_mem
(
info
);
return
ret
;
}
info
->
segment
=
pci_domain_nr
(
pdev
->
bus
);
info
->
bus
=
pdev
->
bus
->
number
;
info
->
devfn
=
pdev
->
devfn
;
...
...
@@ -2165,27 +2180,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
return
1
;
}
static
int
iommu_prepare_static_identity_mapping
(
void
)
static
int
__init
iommu_prepare_static_identity_mapping
(
int
hw
)
{
struct
pci_dev
*
pdev
=
NULL
;
int
ret
;
ret
=
si_domain_init
();
ret
=
si_domain_init
(
hw
);
if
(
ret
)
return
-
EFAULT
;
for_each_pci_dev
(
pdev
)
{
if
(
iommu_should_identity_map
(
pdev
,
1
))
{
printk
(
KERN_INFO
"IOMMU: identity mapping for device %s
\n
"
,
pci_name
(
pdev
));
printk
(
KERN_INFO
"IOMMU:
%s
identity mapping for device %s
\n
"
,
hw
?
"hardware"
:
"software"
,
pci_name
(
pdev
));
ret
=
domain_context_mapping
(
si_domain
,
pdev
,
ret
=
domain_add_dev_info
(
si_domain
,
pdev
,
hw
?
CONTEXT_TT_PASS_THROUGH
:
CONTEXT_TT_MULTI_LEVEL
);
if
(
ret
)
return
ret
;
ret
=
domain_add_dev_info
(
si_domain
,
pdev
);
if
(
ret
)
return
ret
;
}
}
...
...
@@ -2199,14 +2212,6 @@ int __init init_dmars(void)
struct
pci_dev
*
pdev
;
struct
intel_iommu
*
iommu
;
int
i
,
ret
;
int
pass_through
=
1
;
/*
* In case pass through can not be enabled, iommu tries to use identity
* mapping.
*/
if
(
iommu_pass_through
)
iommu_identity_mapping
=
1
;
/*
* for each drhd
...
...
@@ -2234,7 +2239,6 @@ int __init init_dmars(void)
deferred_flush
=
kzalloc
(
g_num_of_iommus
*
sizeof
(
struct
deferred_flush_tables
),
GFP_KERNEL
);
if
(
!
deferred_flush
)
{
kfree
(
g_iommus
);
ret
=
-
ENOMEM
;
goto
error
;
}
...
...
@@ -2261,14 +2265,8 @@ int __init init_dmars(void)
goto
error
;
}
if
(
!
ecap_pass_through
(
iommu
->
ecap
))
pass_through
=
0
;
hw_
pass_through
=
0
;
}
if
(
iommu_pass_through
)
if
(
!
pass_through
)
{
printk
(
KERN_INFO
"Pass Through is not supported by hardware.
\n
"
);
iommu_pass_through
=
0
;
}
/*
* Start from the sane iommu hardware state.
...
...
@@ -2323,64 +2321,57 @@ int __init init_dmars(void)
}
}
if
(
iommu_pass_through
)
iommu_identity_mapping
=
1
;
#ifdef CONFIG_DMAR_BROKEN_GFX_WA
else
iommu_identity_mapping
=
2
;
#endif
/*
* If pass through is set and enabled, context entries of all pci
* devices are intialized by pass through translation type.
* If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static
* identity mapping if iommu_identity_mapping is set.
*/
if
(
iommu_
pass_through
)
{
ret
=
i
nit_context_pass_through
(
);
if
(
iommu_
identity_mapping
)
{
ret
=
i
ommu_prepare_static_identity_mapping
(
hw_pass_through
);
if
(
ret
)
{
printk
(
KERN_
ERR
"IOMMU: Pass through init failed.
\n
"
);
iommu_pass_through
=
0
;
printk
(
KERN_
CRIT
"Failed to setup IOMMU pass-through
\n
"
);
goto
error
;
}
}
/*
* If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static
* identity mapping if iommu_identity_mapping is set.
* For each rmrr
* for each dev attached to rmrr
* do
* locate drhd for dev, alloc domain for dev
* allocate free domain
* allocate page table entries for rmrr
* if context not allocated for bus
* allocate and init context
* set present in root table for this bus
* init context with domain, translation etc
* endfor
* endfor
*/
if
(
!
iommu_pass_through
)
{
#ifdef CONFIG_DMAR_BROKEN_GFX_WA
if
(
!
iommu_identity_mapping
)
iommu_identity_mapping
=
2
;
#endif
if
(
iommu_identity_mapping
)
iommu_prepare_static_identity_mapping
();
/*
* For each rmrr
* for each dev attached to rmrr
* do
* locate drhd for dev, alloc domain for dev
* allocate free domain
* allocate page table entries for rmrr
* if context not allocated for bus
* allocate and init context
* set present in root table for this bus
* init context with domain, translation etc
* endfor
* endfor
*/
printk
(
KERN_INFO
"IOMMU: Setting RMRR:
\n
"
);
for_each_rmrr_units
(
rmrr
)
{
for
(
i
=
0
;
i
<
rmrr
->
devices_cnt
;
i
++
)
{
pdev
=
rmrr
->
devices
[
i
];
/*
* some BIOS lists non-exist devices in DMAR
* table.
*/
if
(
!
pdev
)
continue
;
ret
=
iommu_prepare_rmrr_dev
(
rmrr
,
pdev
);
if
(
ret
)
printk
(
KERN_ERR
"IOMMU: mapping reserved region failed
\n
"
);
}
printk
(
KERN_INFO
"IOMMU: Setting RMRR:
\n
"
);
for_each_rmrr_units
(
rmrr
)
{
for
(
i
=
0
;
i
<
rmrr
->
devices_cnt
;
i
++
)
{
pdev
=
rmrr
->
devices
[
i
];
/*
* some BIOS lists non-exist devices in DMAR
* table.
*/
if
(
!
pdev
)
continue
;
ret
=
iommu_prepare_rmrr_dev
(
rmrr
,
pdev
);
if
(
ret
)
printk
(
KERN_ERR
"IOMMU: mapping reserved region failed
\n
"
);
}
iommu_prepare_isa
();
}
iommu_prepare_isa
();
/*
* for each drhd
* enable fault log
...
...
@@ -2454,8 +2445,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
return
iova
;
}
static
struct
dmar_domain
*
get_valid_domain_for_dev
(
struct
pci_dev
*
pdev
)
static
struct
dmar_domain
*
__get_valid_domain_for_dev
(
struct
pci_dev
*
pdev
)
{
struct
dmar_domain
*
domain
;
int
ret
;
...
...
@@ -2483,6 +2473,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return
domain
;
}
static
inline
struct
dmar_domain
*
get_valid_domain_for_dev
(
struct
pci_dev
*
dev
)
{
struct
device_domain_info
*
info
;
/* No lock here, assumes no domain exit in normal case */
info
=
dev
->
dev
.
archdata
.
iommu
;
if
(
likely
(
info
))
return
info
->
domain
;
return
__get_valid_domain_for_dev
(
dev
);
}
static
int
iommu_dummy
(
struct
pci_dev
*
pdev
)
{
return
pdev
->
dev
.
archdata
.
iommu
==
DUMMY_DEVICE_DOMAIN_INFO
;
...
...
@@ -2525,10 +2527,10 @@ static int iommu_no_mapping(struct device *dev)
*/
if
(
iommu_should_identity_map
(
pdev
,
0
))
{
int
ret
;
ret
=
domain_add_dev_info
(
si_domain
,
pdev
);
if
(
ret
)
return
0
;
ret
=
domain_context_mapping
(
si_domain
,
pdev
,
CONTEXT_TT_MULTI_LEVEL
);
ret
=
domain_add_dev_info
(
si_domain
,
pdev
,
hw_pass_through
?
CONTEXT_TT_PASS_THROUGH
:
CONTEXT_TT_MULTI_LEVEL
);
if
(
!
ret
)
{
printk
(
KERN_INFO
"64bit %s uses identity mapping
\n
"
,
pci_name
(
pdev
));
...
...
@@ -2733,12 +2735,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
}
}
static
void
intel_unmap_single
(
struct
device
*
dev
,
dma_addr_t
dev_addr
,
size_t
size
,
int
dir
)
{
intel_unmap_page
(
dev
,
dev_addr
,
size
,
dir
,
NULL
);
}
static
void
*
intel_alloc_coherent
(
struct
device
*
hwdev
,
size_t
size
,
dma_addr_t
*
dma_handle
,
gfp_t
flags
)
{
...
...
@@ -2771,7 +2767,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
size
=
PAGE_ALIGN
(
size
);
order
=
get_order
(
size
);
intel_unmap_
single
(
hwdev
,
dma_handle
,
size
,
DMA_BIDIRECTIONA
L
);
intel_unmap_
page
(
hwdev
,
dma_handle
,
size
,
DMA_BIDIRECTIONAL
,
NUL
L
);
free_pages
((
unsigned
long
)
vaddr
,
order
);
}
...
...
@@ -2807,11 +2803,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */
dma_pte_free_pagetable
(
domain
,
start_pfn
,
last_pfn
);
iommu_flush_iotlb_psi
(
iommu
,
domain
->
id
,
start_pfn
,
(
last_pfn
-
start_pfn
+
1
));
/* free iova */
__free_iova
(
&
domain
->
iovad
,
iova
);
if
(
intel_iommu_strict
)
{
iommu_flush_iotlb_psi
(
iommu
,
domain
->
id
,
start_pfn
,
last_pfn
-
start_pfn
+
1
);
/* free iova */
__free_iova
(
&
domain
->
iovad
,
iova
);
}
else
{
add_unmap
(
domain
,
iova
);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
*/
}
}
static
int
intel_nontranslate_map_sg
(
struct
device
*
hddev
,
...
...
@@ -3194,7 +3197,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
if
(
no_iommu
||
(
swiotlb
&&
!
iommu_pass_through
)
||
dmar_disabled
)
if
(
no_iommu
||
swiotlb
||
dmar_disabled
)
return
-
ENODEV
;
iommu_init_mempool
();
...
...
@@ -3214,14 +3217,7 @@ int __init intel_iommu_init(void)
init_timer
(
&
unmap_timer
);
force_iommu
=
1
;
if
(
!
iommu_pass_through
)
{
printk
(
KERN_INFO
"Multi-level page-table translation for DMAR.
\n
"
);
dma_ops
=
&
intel_dma_ops
;
}
else
printk
(
KERN_INFO
"DMAR: Pass through translation for DMAR.
\n
"
);
dma_ops
=
&
intel_dma_ops
;
init_iommu_sysfs
();
...
...
@@ -3504,7 +3500,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct
intel_iommu
*
iommu
;
int
addr_width
;
u64
end
;
int
ret
;
/* normally pdev is not mapped */
if
(
unlikely
(
domain_context_mapped
(
pdev
)))
{
...
...
@@ -3536,12 +3531,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return
-
EFAULT
;
}
ret
=
domain_add_dev_info
(
dmar_domain
,
pdev
);
if
(
ret
)
return
ret
;
ret
=
domain_context_mapping
(
dmar_domain
,
pdev
,
CONTEXT_TT_MULTI_LEVEL
);
return
ret
;
return
domain_add_dev_info
(
dmar_domain
,
pdev
,
CONTEXT_TT_MULTI_LEVEL
);
}
static
void
intel_iommu_detach_device
(
struct
iommu_domain
*
domain
,
...
...
drivers/pci/intr_remapping.c
View file @
77afa904
...
...
@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void)
if
(
disable_intremap
)
return
0
;
if
(
!
dmar_ir_support
())
return
0
;
for_each_drhd_unit
(
drhd
)
{
struct
intel_iommu
*
iommu
=
drhd
->
iommu
;
...
...
@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim)
struct
dmar_drhd_unit
*
drhd
;
int
setup
=
0
;
if
(
parse_ioapics_under_ir
()
!=
1
)
{
printk
(
KERN_INFO
"Not enable interrupt remapping
\n
"
);
return
-
1
;
}
for_each_drhd_unit
(
drhd
)
{
struct
intel_iommu
*
iommu
=
drhd
->
iommu
;
...
...
drivers/pci/iova.c
View file @
77afa904
...
...
@@ -22,7 +22,6 @@
void
init_iova_domain
(
struct
iova_domain
*
iovad
,
unsigned
long
pfn_32bit
)
{
spin_lock_init
(
&
iovad
->
iova_alloc_lock
);
spin_lock_init
(
&
iovad
->
iova_rbtree_lock
);
iovad
->
rbroot
=
RB_ROOT
;
iovad
->
cached32_node
=
NULL
;
...
...
@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned
long
limit_pfn
,
bool
size_aligned
)
{
unsigned
long
flags
;
struct
iova
*
new_iova
;
int
ret
;
...
...
@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if
(
size_aligned
)
size
=
__roundup_pow_of_two
(
size
);
spin_lock_irqsave
(
&
iovad
->
iova_alloc_lock
,
flags
);
ret
=
__alloc_and_insert_iova_range
(
iovad
,
size
,
limit_pfn
,
new_iova
,
size_aligned
);
spin_unlock_irqrestore
(
&
iovad
->
iova_alloc_lock
,
flags
);
if
(
ret
)
{
free_iova_mem
(
new_iova
);
return
NULL
;
...
...
@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct
iova
*
iova
;
unsigned
int
overlap
=
0
;
spin_lock_irqsave
(
&
iovad
->
iova_alloc_lock
,
flags
);
spin_lock
(
&
iovad
->
iova_rbtree_lock
);
spin_lock_irqsave
(
&
iovad
->
iova_rbtree_lock
,
flags
);
for
(
node
=
rb_first
(
&
iovad
->
rbroot
);
node
;
node
=
rb_next
(
node
))
{
if
(
__is_range_overlap
(
node
,
pfn_lo
,
pfn_hi
))
{
iova
=
container_of
(
node
,
struct
iova
,
node
);
...
...
@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova
=
__insert_new_range
(
iovad
,
pfn_lo
,
pfn_hi
);
finish:
spin_unlock
(
&
iovad
->
iova_rbtree_lock
);
spin_unlock_irqrestore
(
&
iovad
->
iova_alloc_lock
,
flags
);
spin_unlock_irqrestore
(
&
iovad
->
iova_rbtree_lock
,
flags
);
return
iova
;
}
...
...
@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned
long
flags
;
struct
rb_node
*
node
;
spin_lock_irqsave
(
&
from
->
iova_alloc_lock
,
flags
);
spin_lock
(
&
from
->
iova_rbtree_lock
);
spin_lock_irqsave
(
&
from
->
iova_rbtree_lock
,
flags
);
for
(
node
=
rb_first
(
&
from
->
rbroot
);
node
;
node
=
rb_next
(
node
))
{
struct
iova
*
iova
=
container_of
(
node
,
struct
iova
,
node
);
struct
iova
*
new_iova
;
...
...
@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk
(
KERN_ERR
"Reserve iova range %lx@%lx failed
\n
"
,
iova
->
pfn_lo
,
iova
->
pfn_lo
);
}
spin_unlock
(
&
from
->
iova_rbtree_lock
);
spin_unlock_irqrestore
(
&
from
->
iova_alloc_lock
,
flags
);
spin_unlock_irqrestore
(
&
from
->
iova_rbtree_lock
,
flags
);
}
drivers/usb/host/pci-quirks.c
View file @
77afa904
...
...
@@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
else
if
(
pdev
->
class
==
PCI_CLASS_SERIAL_USB_XHCI
)
quirk_usb_handoff_xhci
(
pdev
);
}
DECLARE_PCI_FIXUP_
FINAL
(
PCI_ANY_ID
,
PCI_ANY_ID
,
quirk_usb_early_handoff
);
DECLARE_PCI_FIXUP_
HEADER
(
PCI_ANY_ID
,
PCI_ANY_ID
,
quirk_usb_early_handoff
);
include/linux/intel-iommu.h
View file @
77afa904
...
...
@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
extern
int
qi_submit_sync
(
struct
qi_desc
*
desc
,
struct
intel_iommu
*
iommu
);
extern
int
dmar_ir_support
(
void
);
#endif
include/linux/iova.h
View file @
77afa904
...
...
@@ -28,7 +28,6 @@ struct iova {
/* holds all the iova translations for a domain */
struct
iova_domain
{
spinlock_t
iova_alloc_lock
;
/* Lock to protect iova allocation */
spinlock_t
iova_rbtree_lock
;
/* Lock to protect update of rbtree */
struct
rb_root
rbroot
;
/* iova domain rbtree root */
struct
rb_node
*
cached32_node
;
/* Save last alloced node */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment