Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
f8cc5756
Commit
f8cc5756
authored
Oct 14, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
parents
bf7c7dec
b4d1b825
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
215 additions
and
392 deletions
+215
-392
arch/sparc64/kernel/pci_iommu.c
arch/sparc64/kernel/pci_iommu.c
+159
-204
arch/sparc64/kernel/pci_psycho.c
arch/sparc64/kernel/pci_psycho.c
+5
-39
arch/sparc64/kernel/pci_sabre.c
arch/sparc64/kernel/pci_sabre.c
+4
-35
arch/sparc64/kernel/pci_schizo.c
arch/sparc64/kernel/pci_schizo.c
+3
-54
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+0
-7
arch/sparc64/mm/ultra.S
arch/sparc64/mm/ultra.S
+0
-16
arch/sparc64/prom/misc.c
arch/sparc64/prom/misc.c
+0
-12
drivers/scsi/qlogicpti.c
drivers/scsi/qlogicpti.c
+35
-4
include/asm-sparc64/pbm.h
include/asm-sparc64/pbm.h
+9
-21
No files found.
arch/sparc64/kernel/pci_iommu.c
View file @
f8cc5756
...
...
@@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu)
/* Ensure completion of previous PIO writes. */
(
void
)
pci_iommu_read
(
iommu
->
write_complete_reg
);
/* Now update everyone's flush point. */
for
(
entry
=
0
;
entry
<
PBM_NCLUSTERS
;
entry
++
)
{
iommu
->
alloc_info
[
entry
].
flush
=
iommu
->
alloc_info
[
entry
].
next
;
}
}
#define IOPTE_CONSISTENT(CTX) \
...
...
@@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
iopte_val
(
*
iopte
)
=
val
;
}
void
pci_iommu_table_init
(
struct
pci_iommu
*
iommu
,
int
tsbsize
)
/* Based largely upon the ppc64 iommu allocator. */
static
long
pci_arena_alloc
(
struct
pci_iommu
*
iommu
,
unsigned
long
npages
)
{
int
i
;
tsbsize
/=
sizeof
(
iopte_t
);
for
(
i
=
0
;
i
<
tsbsize
;
i
++
)
iopte_make_dummy
(
iommu
,
&
iommu
->
page_table
[
i
]);
}
static
iopte_t
*
alloc_streaming_cluster
(
struct
pci_iommu
*
iommu
,
unsigned
long
npages
)
{
iopte_t
*
iopte
,
*
limit
,
*
first
;
unsigned
long
cnum
,
ent
,
flush_point
;
cnum
=
0
;
while
((
1UL
<<
cnum
)
<
npages
)
cnum
++
;
iopte
=
(
iommu
->
page_table
+
(
cnum
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
)));
if
(
cnum
==
0
)
limit
=
(
iommu
->
page_table
+
iommu
->
lowest_consistent_map
);
else
limit
=
(
iopte
+
(
1
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
)));
iopte
+=
((
ent
=
iommu
->
alloc_info
[
cnum
].
next
)
<<
cnum
);
flush_point
=
iommu
->
alloc_info
[
cnum
].
flush
;
first
=
iopte
;
for
(;;)
{
if
(
IOPTE_IS_DUMMY
(
iommu
,
iopte
))
{
if
((
iopte
+
(
1
<<
cnum
))
>=
limit
)
ent
=
0
;
else
ent
=
ent
+
1
;
iommu
->
alloc_info
[
cnum
].
next
=
ent
;
if
(
ent
==
flush_point
)
__iommu_flushall
(
iommu
);
break
;
struct
pci_iommu_arena
*
arena
=
&
iommu
->
arena
;
unsigned
long
n
,
i
,
start
,
end
,
limit
;
int
pass
;
limit
=
arena
->
limit
;
start
=
arena
->
hint
;
pass
=
0
;
again:
n
=
find_next_zero_bit
(
arena
->
map
,
limit
,
start
);
end
=
n
+
npages
;
if
(
unlikely
(
end
>=
limit
))
{
if
(
likely
(
pass
<
1
))
{
limit
=
start
;
start
=
0
;
__iommu_flushall
(
iommu
);
pass
++
;
goto
again
;
}
else
{
/* Scanned the whole thing, give up. */
return
-
1
;
}
iopte
+=
(
1
<<
cnum
);
ent
++
;
if
(
iopte
>=
limit
)
{
iopte
=
(
iommu
->
page_table
+
(
cnum
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
)));
ent
=
0
;
}
for
(
i
=
n
;
i
<
end
;
i
++
)
{
if
(
test_bit
(
i
,
arena
->
map
))
{
start
=
i
+
1
;
goto
again
;
}
if
(
ent
==
flush_point
)
__iommu_flushall
(
iommu
);
if
(
iopte
==
first
)
goto
bad
;
}
/* I've got your streaming cluster right here buddy boy... */
return
iopte
;
for
(
i
=
n
;
i
<
end
;
i
++
)
__set_bit
(
i
,
arena
->
map
)
;
bad:
printk
(
KERN_EMERG
"pci_iommu: alloc_streaming_cluster of npages(%ld) failed!
\n
"
,
npages
);
return
NULL
;
arena
->
hint
=
end
;
return
n
;
}
static
void
free_streaming_cluster
(
struct
pci_iommu
*
iommu
,
dma_addr_t
base
,
unsigned
long
npages
,
unsigned
long
ctx
)
static
void
pci_arena_free
(
struct
pci_iommu_arena
*
arena
,
unsigned
long
base
,
unsigned
long
npages
)
{
unsigned
long
cnum
,
ent
;
unsigned
long
i
;
cnum
=
0
;
while
((
1UL
<<
cnum
)
<
npages
)
cnum
++
;
for
(
i
=
base
;
i
<
(
base
+
npages
);
i
++
)
__clear_bit
(
i
,
arena
->
map
);
}
ent
=
(
base
<<
(
32
-
IO_PAGE_SHIFT
+
PBM_LOGCLUSTERS
-
iommu
->
page_table_sz_bits
))
>>
(
32
+
PBM_LOGCLUSTERS
+
cnum
-
iommu
->
page_table_sz_bits
);
void
pci_iommu_table_init
(
struct
pci_iommu
*
iommu
,
int
tsbsize
,
u32
dma_offset
,
u32
dma_addr_mask
)
{
unsigned
long
i
,
tsbbase
,
order
,
sz
,
num_tsb_entries
;
num_tsb_entries
=
tsbsize
/
sizeof
(
iopte_t
);
/* Setup initial software IOMMU state. */
spin_lock_init
(
&
iommu
->
lock
);
iommu
->
ctx_lowest_free
=
1
;
iommu
->
page_table_map_base
=
dma_offset
;
iommu
->
dma_addr_mask
=
dma_addr_mask
;
/* Allocate and initialize the free area map. */
sz
=
num_tsb_entries
/
8
;
sz
=
(
sz
+
7UL
)
&
~
7UL
;
iommu
->
arena
.
map
=
kmalloc
(
sz
,
GFP_KERNEL
);
if
(
!
iommu
->
arena
.
map
)
{
prom_printf
(
"PCI_IOMMU: Error, kmalloc(arena.map) failed.
\n
"
);
prom_halt
();
}
memset
(
iommu
->
arena
.
map
,
0
,
sz
);
iommu
->
arena
.
limit
=
num_tsb_entries
;
/* If the global flush might not have caught this entry,
* adjust the flush point such that we will flush before
* ever trying to reuse it.
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
if
(
between
(
ent
,
iommu
->
alloc_info
[
cnum
].
next
,
iommu
->
alloc_info
[
cnum
].
flush
))
iommu
->
alloc_info
[
cnum
].
flush
=
ent
;
#undef between
iommu
->
dummy_page
=
__get_free_pages
(
GFP_KERNEL
,
0
);
if
(
!
iommu
->
dummy_page
)
{
prom_printf
(
"PCI_IOMMU: Error, gfp(dummy_page) failed.
\n
"
);
prom_halt
();
}
memset
((
void
*
)
iommu
->
dummy_page
,
0
,
PAGE_SIZE
);
iommu
->
dummy_page_pa
=
(
unsigned
long
)
__pa
(
iommu
->
dummy_page
);
/* Now allocate and setup the IOMMU page table itself. */
order
=
get_order
(
tsbsize
);
tsbbase
=
__get_free_pages
(
GFP_KERNEL
,
order
);
if
(
!
tsbbase
)
{
prom_printf
(
"PCI_IOMMU: Error, gfp(tsb) failed.
\n
"
);
prom_halt
();
}
iommu
->
page_table
=
(
iopte_t
*
)
tsbbase
;
for
(
i
=
0
;
i
<
num_tsb_entries
;
i
++
)
iopte_make_dummy
(
iommu
,
&
iommu
->
page_table
[
i
]);
}
/* We allocate consistent mappings from the end of cluster zero. */
static
iopte_t
*
alloc_consistent_cluster
(
struct
pci_iommu
*
iommu
,
unsigned
long
npages
)
static
inline
iopte_t
*
alloc_npages
(
struct
pci_iommu
*
iommu
,
unsigned
long
npages
)
{
iopte_t
*
iopte
;
long
entry
;
iopte
=
iommu
->
page_table
+
(
1
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
));
while
(
iopte
>
iommu
->
page_table
)
{
iopte
--
;
if
(
IOPTE_IS_DUMMY
(
iommu
,
iopte
))
{
unsigned
long
tmp
=
npages
;
entry
=
pci_arena_alloc
(
iommu
,
npages
);
if
(
unlikely
(
entry
<
0
))
return
NULL
;
while
(
--
tmp
)
{
iopte
--
;
if
(
!
IOPTE_IS_DUMMY
(
iommu
,
iopte
))
break
;
}
if
(
tmp
==
0
)
{
u32
entry
=
(
iopte
-
iommu
->
page_table
);
return
iommu
->
page_table
+
entry
;
}
if
(
entry
<
iommu
->
lowest_consistent_map
)
iommu
->
lowest_consistent_map
=
entry
;
return
iopte
;
}
}
}
return
NULL
;
static
inline
void
free_npages
(
struct
pci_iommu
*
iommu
,
dma_addr_t
base
,
unsigned
long
npages
)
{
pci_arena_free
(
&
iommu
->
arena
,
base
>>
IO_PAGE_SHIFT
,
npages
);
}
static
int
iommu_alloc_ctx
(
struct
pci_iommu
*
iommu
)
...
...
@@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
struct
pcidev_cookie
*
pcp
;
struct
pci_iommu
*
iommu
;
iopte_t
*
iopte
;
unsigned
long
flags
,
order
,
first_page
,
ctx
;
unsigned
long
flags
,
order
,
first_page
;
void
*
ret
;
int
npages
;
...
...
@@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
iommu
=
pcp
->
pbm
->
iommu
;
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
iopte
=
alloc_consistent_cluster
(
iommu
,
size
>>
IO_PAGE_SHIFT
);
if
(
iopte
==
NULL
)
{
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
iopte
=
alloc_npages
(
iommu
,
size
>>
IO_PAGE_SHIFT
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
if
(
unlikely
(
iopte
==
NULL
))
{
free_pages
(
first_page
,
order
);
return
NULL
;
}
...
...
@@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
((
iopte
-
iommu
->
page_table
)
<<
IO_PAGE_SHIFT
));
ret
=
(
void
*
)
first_page
;
npages
=
size
>>
IO_PAGE_SHIFT
;
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
iommu_alloc_ctx
(
iommu
);
first_page
=
__pa
(
first_page
);
while
(
npages
--
)
{
iopte_val
(
*
iopte
)
=
(
IOPTE_CONSISTENT
(
ctx
)
|
iopte_val
(
*
iopte
)
=
(
IOPTE_CONSISTENT
(
0UL
)
|
IOPTE_WRITE
|
(
first_page
&
IOPTE_PAGE
));
iopte
++
;
first_page
+=
IO_PAGE_SIZE
;
}
{
int
i
;
u32
daddr
=
*
dma_addrp
;
npages
=
size
>>
IO_PAGE_SHIFT
;
for
(
i
=
0
;
i
<
npages
;
i
++
)
{
pci_iommu_write
(
iommu
->
iommu_flush
,
daddr
);
daddr
+=
IO_PAGE_SIZE
;
}
}
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
return
ret
;
}
...
...
@@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
struct
pcidev_cookie
*
pcp
;
struct
pci_iommu
*
iommu
;
iopte_t
*
iopte
;
unsigned
long
flags
,
order
,
npages
,
i
,
ctx
;
unsigned
long
flags
,
order
,
npages
;
npages
=
IO_PAGE_ALIGN
(
size
)
>>
IO_PAGE_SHIFT
;
pcp
=
pdev
->
sysdata
;
...
...
@@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
if
((
iopte
-
iommu
->
page_table
)
==
iommu
->
lowest_consistent_map
)
{
iopte_t
*
walk
=
iopte
+
npages
;
iopte_t
*
limit
;
limit
=
(
iommu
->
page_table
+
(
1
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
)));
while
(
walk
<
limit
)
{
if
(
!
IOPTE_IS_DUMMY
(
iommu
,
walk
))
break
;
walk
++
;
}
iommu
->
lowest_consistent_map
=
(
walk
-
iommu
->
page_table
);
}
/* Data for consistent mappings cannot enter the streaming
* buffers, so we only need to update the TSB. We flush
* the IOMMU here as well to prevent conflicts with the
* streaming mapping deferred tlb flush scheme.
*/
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
(
iopte_val
(
*
iopte
)
&
IOPTE_CONTEXT
)
>>
47UL
;
for
(
i
=
0
;
i
<
npages
;
i
++
,
iopte
++
)
iopte_make_dummy
(
iommu
,
iopte
);
if
(
iommu
->
iommu_ctxflush
)
{
pci_iommu_write
(
iommu
->
iommu_ctxflush
,
ctx
);
}
else
{
for
(
i
=
0
;
i
<
npages
;
i
++
)
{
u32
daddr
=
dvma
+
(
i
<<
IO_PAGE_SHIFT
);
pci_iommu_write
(
iommu
->
iommu_flush
,
daddr
);
}
}
iommu_free_ctx
(
iommu
,
ctx
);
free_npages
(
iommu
,
dvma
,
npages
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
...
...
@@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
iommu
=
pcp
->
pbm
->
iommu
;
strbuf
=
&
pcp
->
pbm
->
stc
;
if
(
direction
==
PCI_DMA_NONE
)
BUG
()
;
if
(
unlikely
(
direction
==
PCI_DMA_NONE
)
)
goto
bad_no_ctx
;
oaddr
=
(
unsigned
long
)
ptr
;
npages
=
IO_PAGE_ALIGN
(
oaddr
+
sz
)
-
(
oaddr
&
IO_PAGE_MASK
);
npages
>>=
IO_PAGE_SHIFT
;
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
base
=
alloc_npages
(
iommu
,
npages
);
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
iommu_alloc_ctx
(
iommu
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
base
=
alloc_streaming_cluster
(
iommu
,
npages
);
if
(
base
==
NULL
)
if
(
unlikely
(
!
base
))
goto
bad
;
bus_addr
=
(
iommu
->
page_table_map_base
+
((
base
-
iommu
->
page_table
)
<<
IO_PAGE_SHIFT
));
ret
=
bus_addr
|
(
oaddr
&
~
IO_PAGE_MASK
);
base_paddr
=
__pa
(
oaddr
&
IO_PAGE_MASK
);
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
iommu_alloc_ctx
(
iommu
);
if
(
strbuf
->
strbuf_enabled
)
iopte_protection
=
IOPTE_STREAMING
(
ctx
);
else
...
...
@@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
for
(
i
=
0
;
i
<
npages
;
i
++
,
base
++
,
base_paddr
+=
IO_PAGE_SIZE
)
iopte_val
(
*
base
)
=
iopte_protection
|
base_paddr
;
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
return
ret
;
bad:
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
iommu_free_ctx
(
iommu
,
ctx
);
bad_no_ctx:
if
(
printk_ratelimit
())
WARN_ON
(
1
);
return
PCI_DMA_ERROR_CODE
;
}
...
...
@@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
struct
pci_iommu
*
iommu
;
struct
pci_strbuf
*
strbuf
;
iopte_t
*
base
;
unsigned
long
flags
,
npages
,
ctx
;
unsigned
long
flags
,
npages
,
ctx
,
i
;
if
(
direction
==
PCI_DMA_NONE
)
BUG
();
if
(
unlikely
(
direction
==
PCI_DMA_NONE
))
{
if
(
printk_ratelimit
())
WARN_ON
(
1
);
return
;
}
pcp
=
pdev
->
sysdata
;
iommu
=
pcp
->
pbm
->
iommu
;
...
...
@@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
/* Step 1: Kick data out of streaming buffers if necessary. */
if
(
strbuf
->
strbuf_enabled
)
pci_strbuf_flush
(
strbuf
,
iommu
,
bus_addr
,
ctx
,
npages
,
direction
);
pci_strbuf_flush
(
strbuf
,
iommu
,
bus_addr
,
ctx
,
npages
,
direction
);
/* Step 2: Clear out first TSB entry. */
iopte_make_dummy
(
iommu
,
base
);
/* Step 2: Clear out TSB entries. */
for
(
i
=
0
;
i
<
npages
;
i
++
)
iopte_make_dummy
(
iommu
,
base
+
i
);
free_streaming_cluster
(
iommu
,
bus_addr
-
iommu
->
page_table_map_base
,
npages
,
ctx
);
free_npages
(
iommu
,
bus_addr
-
iommu
->
page_table_map_base
,
npages
);
iommu_free_ctx
(
iommu
,
ctx
);
...
...
@@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
pci_map_single
(
pdev
,
(
page_address
(
sglist
->
page
)
+
sglist
->
offset
),
sglist
->
length
,
direction
);
if
(
unlikely
(
sglist
->
dma_address
==
PCI_DMA_ERROR_CODE
))
return
0
;
sglist
->
dma_length
=
sglist
->
length
;
return
1
;
}
...
...
@@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
iommu
=
pcp
->
pbm
->
iommu
;
strbuf
=
&
pcp
->
pbm
->
stc
;
if
(
direction
==
PCI_DMA_NONE
)
BUG
()
;
if
(
unlikely
(
direction
==
PCI_DMA_NONE
)
)
goto
bad_no_ctx
;
/* Step 1: Prepare scatter list. */
npages
=
prepare_sg
(
sglist
,
nelems
);
/* Step 2: Allocate a cluster. */
/* Step 2: Allocate a cluster
and context, if necessary
. */
spin_lock_irqsave
(
&
iommu
->
lock
,
flags
);
base
=
alloc_streaming_cluster
(
iommu
,
npages
);
base
=
alloc_npages
(
iommu
,
npages
);
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
iommu_alloc_ctx
(
iommu
);
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
if
(
base
==
NULL
)
goto
bad
;
dma_base
=
iommu
->
page_table_map_base
+
((
base
-
iommu
->
page_table
)
<<
IO_PAGE_SHIFT
);
dma_base
=
iommu
->
page_table_map_base
+
((
base
-
iommu
->
page_table
)
<<
IO_PAGE_SHIFT
);
/* Step 3: Normalize DMA addresses. */
used
=
nelems
;
...
...
@@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
}
used
=
nelems
-
used
;
/* Step 4: Choose a context if necessary. */
ctx
=
0
;
if
(
iommu
->
iommu_ctxflush
)
ctx
=
iommu_alloc_ctx
(
iommu
);
/* Step 5: Create the mappings. */
/* Step 4: Create the mappings. */
if
(
strbuf
->
strbuf_enabled
)
iopte_protection
=
IOPTE_STREAMING
(
ctx
);
else
iopte_protection
=
IOPTE_CONSISTENT
(
ctx
);
if
(
direction
!=
PCI_DMA_TODEVICE
)
iopte_protection
|=
IOPTE_WRITE
;
fill_sg
(
base
,
sglist
,
used
,
nelems
,
iopte_protection
);
fill_sg
(
base
,
sglist
,
used
,
nelems
,
iopte_protection
);
#ifdef VERIFY_SG
verify_sglist
(
sglist
,
nelems
,
base
,
npages
);
#endif
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
return
used
;
bad:
spin_unlock_irqrestore
(
&
iommu
->
lock
,
flags
);
return
PCI_DMA_ERROR_CODE
;
iommu_free_ctx
(
iommu
,
ctx
);
bad_no_ctx:
if
(
printk_ratelimit
())
WARN_ON
(
1
);
return
0
;
}
/* Unmap a set of streaming mode DMA translations. */
...
...
@@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
unsigned
long
flags
,
ctx
,
i
,
npages
;
u32
bus_addr
;
if
(
direction
==
PCI_DMA_NONE
)
BUG
();
if
(
unlikely
(
direction
==
PCI_DMA_NONE
))
{
if
(
printk_ratelimit
())
WARN_ON
(
1
);
}
pcp
=
pdev
->
sysdata
;
iommu
=
pcp
->
pbm
->
iommu
;
...
...
@@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
if
(
sglist
[
i
].
dma_length
==
0
)
break
;
i
--
;
npages
=
(
IO_PAGE_ALIGN
(
sglist
[
i
].
dma_address
+
sglist
[
i
].
dma_length
)
-
bus_addr
)
>>
IO_PAGE_SHIFT
;
npages
=
(
IO_PAGE_ALIGN
(
sglist
[
i
].
dma_address
+
sglist
[
i
].
dma_length
)
-
bus_addr
)
>>
IO_PAGE_SHIFT
;
base
=
iommu
->
page_table
+
((
bus_addr
-
iommu
->
page_table_map_base
)
>>
IO_PAGE_SHIFT
);
...
...
@@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
if
(
strbuf
->
strbuf_enabled
)
pci_strbuf_flush
(
strbuf
,
iommu
,
bus_addr
,
ctx
,
npages
,
direction
);
/* Step 2: Clear out first TSB entry. */
iopte_make_dummy
(
iommu
,
base
);
/* Step 2: Clear out the TSB entries. */
for
(
i
=
0
;
i
<
npages
;
i
++
)
iopte_make_dummy
(
iommu
,
base
+
i
);
free_streaming_cluster
(
iommu
,
bus_addr
-
iommu
->
page_table_map_base
,
npages
,
ctx
);
free_npages
(
iommu
,
bus_addr
-
iommu
->
page_table_map_base
,
npages
);
iommu_free_ctx
(
iommu
,
ctx
);
...
...
arch/sparc64/kernel/pci_psycho.c
View file @
f8cc5756
...
...
@@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p)
static
void
psycho_iommu_init
(
struct
pci_controller_info
*
p
)
{
struct
pci_iommu
*
iommu
=
p
->
pbm_A
.
iommu
;
unsigned
long
tsbbase
,
i
;
unsigned
long
i
;
u64
control
;
/* Setup initial software IOMMU state. */
spin_lock_init
(
&
iommu
->
lock
);
iommu
->
ctx_lowest_free
=
1
;
/* Register addresses. */
iommu
->
iommu_control
=
p
->
pbm_A
.
controller_regs
+
PSYCHO_IOMMU_CONTROL
;
iommu
->
iommu_tsbbase
=
p
->
pbm_A
.
controller_regs
+
PSYCHO_IOMMU_TSBBASE
;
...
...
@@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
pci_iommu_table_init
(
iommu
,
IO_TSB_SIZE
,
0xc0000000
,
0xffffffff
);
iommu
->
dummy_page
=
__get_free_pages
(
GFP_KERNEL
,
0
);
if
(
!
iommu
->
dummy_page
)
{
prom_printf
(
"PSYCHO_IOMMU: Error, gfp(dummy_page) failed.
\n
"
);
prom_halt
();
}
memset
((
void
*
)
iommu
->
dummy_page
,
0
,
PAGE_SIZE
);
iommu
->
dummy_page_pa
=
(
unsigned
long
)
__pa
(
iommu
->
dummy_page
);
/* Using assumed page size 8K with 128K entries we need 1MB iommu page
* table (128K ioptes * 8 bytes per iopte). This is
* page order 7 on UltraSparc.
*/
tsbbase
=
__get_free_pages
(
GFP_KERNEL
,
get_order
(
IO_TSB_SIZE
));
if
(
!
tsbbase
)
{
prom_printf
(
"PSYCHO_IOMMU: Error, gfp(tsb) failed.
\n
"
);
prom_halt
();
}
iommu
->
page_table
=
(
iopte_t
*
)
tsbbase
;
iommu
->
page_table_sz_bits
=
17
;
iommu
->
page_table_map_base
=
0xc0000000
;
iommu
->
dma_addr_mask
=
0xffffffff
;
pci_iommu_table_init
(
iommu
,
IO_TSB_SIZE
);
/* We start with no consistent mappings. */
iommu
->
lowest_consistent_map
=
1
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
);
for
(
i
=
0
;
i
<
PBM_NCLUSTERS
;
i
++
)
{
iommu
->
alloc_info
[
i
].
flush
=
0
;
iommu
->
alloc_info
[
i
].
next
=
0
;
}
psycho_write
(
p
->
pbm_A
.
controller_regs
+
PSYCHO_IOMMU_TSBBASE
,
__pa
(
tsbbase
));
psycho_write
(
p
->
pbm_A
.
controller_regs
+
PSYCHO_IOMMU_TSBBASE
,
__pa
(
iommu
->
page_table
));
control
=
psycho_read
(
p
->
pbm_A
.
controller_regs
+
PSYCHO_IOMMU_CONTROL
);
control
&=
~
(
PSYCHO_IOMMU_CTRL_TSBSZ
|
PSYCHO_IOMMU_CTRL_TBWSZ
);
...
...
@@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
psycho_write
(
p
->
pbm_A
.
controller_regs
+
PSYCHO_IOMMU_CONTROL
,
control
);
/* If necessary, hook us up for starfire IRQ translations. */
if
(
this_is_starfire
)
if
(
this_is_starfire
)
p
->
starfire_cookie
=
starfire_hookup
(
p
->
pbm_A
.
portid
);
else
p
->
starfire_cookie
=
NULL
;
...
...
arch/sparc64/kernel/pci_sabre.c
View file @
f8cc5756
...
...
@@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
u32
dma_mask
)
{
struct
pci_iommu
*
iommu
=
p
->
pbm_A
.
iommu
;
unsigned
long
tsbbase
,
i
,
order
;
unsigned
long
i
;
u64
control
;
/* Setup initial software IOMMU state. */
spin_lock_init
(
&
iommu
->
lock
);
iommu
->
ctx_lowest_free
=
1
;
/* Register addresses. */
iommu
->
iommu_control
=
p
->
pbm_A
.
controller_regs
+
SABRE_IOMMU_CONTROL
;
iommu
->
iommu_tsbbase
=
p
->
pbm_A
.
controller_regs
+
SABRE_IOMMU_TSBBASE
;
...
...
@@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p,
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
pci_iommu_table_init
(
iommu
,
tsbsize
*
1024
*
8
,
dvma_offset
,
dma_mask
);
iommu
->
dummy_page
=
__get_free_pages
(
GFP_KERNEL
,
0
);
if
(
!
iommu
->
dummy_page
)
{
prom_printf
(
"PSYCHO_IOMMU: Error, gfp(dummy_page) failed.
\n
"
);
prom_halt
();
}
memset
((
void
*
)
iommu
->
dummy_page
,
0
,
PAGE_SIZE
);
iommu
->
dummy_page_pa
=
(
unsigned
long
)
__pa
(
iommu
->
dummy_page
);
tsbbase
=
__get_free_pages
(
GFP_KERNEL
,
order
=
get_order
(
tsbsize
*
1024
*
8
));
if
(
!
tsbbase
)
{
prom_printf
(
"SABRE_IOMMU: Error, gfp(tsb) failed.
\n
"
);
prom_halt
();
}
iommu
->
page_table
=
(
iopte_t
*
)
tsbbase
;
iommu
->
page_table_map_base
=
dvma_offset
;
iommu
->
dma_addr_mask
=
dma_mask
;
pci_iommu_table_init
(
iommu
,
PAGE_SIZE
<<
order
);
sabre_write
(
p
->
pbm_A
.
controller_regs
+
SABRE_IOMMU_TSBBASE
,
__pa
(
tsbbase
));
sabre_write
(
p
->
pbm_A
.
controller_regs
+
SABRE_IOMMU_TSBBASE
,
__pa
(
iommu
->
page_table
));
control
=
sabre_read
(
p
->
pbm_A
.
controller_regs
+
SABRE_IOMMU_CONTROL
);
control
&=
~
(
SABRE_IOMMUCTRL_TSBSZ
|
SABRE_IOMMUCTRL_TBWSZ
);
...
...
@@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
switch
(
tsbsize
)
{
case
64
:
control
|=
SABRE_IOMMU_TSBSZ_64K
;
iommu
->
page_table_sz_bits
=
16
;
break
;
case
128
:
control
|=
SABRE_IOMMU_TSBSZ_128K
;
iommu
->
page_table_sz_bits
=
17
;
break
;
default:
prom_printf
(
"iommu_init: Illegal TSB size %d
\n
"
,
tsbsize
);
...
...
@@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p,
break
;
}
sabre_write
(
p
->
pbm_A
.
controller_regs
+
SABRE_IOMMU_CONTROL
,
control
);
/* We start with no consistent mappings. */
iommu
->
lowest_consistent_map
=
1
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
);
for
(
i
=
0
;
i
<
PBM_NCLUSTERS
;
i
++
)
{
iommu
->
alloc_info
[
i
].
flush
=
0
;
iommu
->
alloc_info
[
i
].
next
=
0
;
}
}
static
void
pbm_register_toplevel_resources
(
struct
pci_controller_info
*
p
,
...
...
arch/sparc64/kernel/pci_schizo.c
View file @
f8cc5756
...
...
@@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
static
void
schizo_pbm_iommu_init
(
struct
pci_pbm_info
*
pbm
)
{
struct
pci_iommu
*
iommu
=
pbm
->
iommu
;
unsigned
long
tsbbase
,
i
,
tagbase
,
database
,
order
;
unsigned
long
i
,
tagbase
,
database
;
u32
vdma
[
2
],
dma_mask
;
u64
control
;
int
err
,
tsbsize
;
...
...
@@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
prom_halt
();
};
/* Setup initial software IOMMU state. */
spin_lock_init
(
&
iommu
->
lock
);
iommu
->
ctx_lowest_free
=
1
;
/* Register addresses, SCHIZO has iommu ctx flushing. */
iommu
->
iommu_control
=
pbm
->
pbm_regs
+
SCHIZO_IOMMU_CONTROL
;
iommu
->
iommu_tsbbase
=
pbm
->
pbm_regs
+
SCHIZO_IOMMU_TSBBASE
;
...
...
@@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
pci_iommu_table_init
(
iommu
,
tsbsize
*
8
*
1024
,
vdma
[
0
],
dma_mask
);
iommu
->
dummy_page
=
__get_free_pages
(
GFP_KERNEL
,
0
);
if
(
!
iommu
->
dummy_page
)
{
prom_printf
(
"PSYCHO_IOMMU: Error, gfp(dummy_page) failed.
\n
"
);
prom_halt
();
}
memset
((
void
*
)
iommu
->
dummy_page
,
0
,
PAGE_SIZE
);
iommu
->
dummy_page_pa
=
(
unsigned
long
)
__pa
(
iommu
->
dummy_page
);
/* Using assumed page size 8K with 128K entries we need 1MB iommu page
* table (128K ioptes * 8 bytes per iopte). This is
* page order 7 on UltraSparc.
*/
order
=
get_order
(
tsbsize
*
8
*
1024
);
tsbbase
=
__get_free_pages
(
GFP_KERNEL
,
order
);
if
(
!
tsbbase
)
{
prom_printf
(
"%s: Error, gfp(tsb) failed.
\n
"
,
pbm
->
name
);
prom_halt
();
}
iommu
->
page_table
=
(
iopte_t
*
)
tsbbase
;
iommu
->
page_table_map_base
=
vdma
[
0
];
iommu
->
dma_addr_mask
=
dma_mask
;
pci_iommu_table_init
(
iommu
,
PAGE_SIZE
<<
order
);
switch
(
tsbsize
)
{
case
64
:
iommu
->
page_table_sz_bits
=
16
;
break
;
case
128
:
iommu
->
page_table_sz_bits
=
17
;
break
;
default:
prom_printf
(
"iommu_init: Illegal TSB size %d
\n
"
,
tsbsize
);
prom_halt
();
break
;
};
/* We start with no consistent mappings. */
iommu
->
lowest_consistent_map
=
1
<<
(
iommu
->
page_table_sz_bits
-
PBM_LOGCLUSTERS
);
for
(
i
=
0
;
i
<
PBM_NCLUSTERS
;
i
++
)
{
iommu
->
alloc_info
[
i
].
flush
=
0
;
iommu
->
alloc_info
[
i
].
next
=
0
;
}
schizo_write
(
iommu
->
iommu_tsbbase
,
__pa
(
tsbbase
));
schizo_write
(
iommu
->
iommu_tsbbase
,
__pa
(
iommu
->
page_table
));
control
=
schizo_read
(
iommu
->
iommu_control
);
control
&=
~
(
SCHIZO_IOMMU_CTRL_TSBSZ
|
SCHIZO_IOMMU_CTRL_TBWSZ
);
...
...
arch/sparc64/kernel/smp.c
View file @
f8cc5756
...
...
@@ -1001,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable
();
}
extern
unsigned
long
xcall_promstop
;
void
smp_promstop_others
(
void
)
{
smp_cross_call
(
&
xcall_promstop
,
0
,
0
,
0
);
}
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
...
...
arch/sparc64/mm/ultra.S
View file @
f8cc5756
...
...
@@ -453,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
nop
nop
.
globl
xcall_promstop
xcall_promstop
:
rdpr
%
pstate
,
%
g2
wrpr
%
g2
,
PSTATE_IG
|
PSTATE_AG
,
%
pstate
rdpr
%
pil
,
%
g2
wrpr
%
g0
,
15
,
%
pil
sethi
%
hi
(
109
f
),
%
g7
b
,
pt
%
xcc
,
etrap_irq
109
:
or
%
g7
,
%
lo
(
109
b
),
%
g7
flushw
call
prom_stopself
nop
/
*
We
should
not
return
,
just
spin
if
we
do
...
*/
1
:
b
,
a
,
pt
%
xcc
,
1
b
nop
.
data
errata32_hwbug
:
...
...
arch/sparc64/prom/misc.c
View file @
f8cc5756
...
...
@@ -68,19 +68,11 @@ void prom_cmdline(void)
local_irq_restore
(
flags
);
}
#ifdef CONFIG_SMP
extern
void
smp_promstop_others
(
void
);
#endif
/* Drop into the prom, but completely terminate the program.
* No chance of continuing.
*/
void
prom_halt
(
void
)
{
#ifdef CONFIG_SMP
smp_promstop_others
();
udelay
(
8000
);
#endif
again:
p1275_cmd
(
"exit"
,
P1275_INOUT
(
0
,
0
));
goto
again
;
/* PROM is out to get me -DaveM */
...
...
@@ -88,10 +80,6 @@ again:
void
prom_halt_power_off
(
void
)
{
#ifdef CONFIG_SMP
smp_promstop_others
();
udelay
(
8000
);
#endif
p1275_cmd
(
"SUNW,power-off"
,
P1275_INOUT
(
0
,
0
));
/* if nothing else helps, we just halt */
...
...
drivers/scsi/qlogicpti.c
View file @
f8cc5756
...
...
@@ -1119,6 +1119,36 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
host
->
sg_tablesize
=
QLOGICPTI_MAX_SG
(
num_free
);
}
static
unsigned
int
scsi_rbuf_get
(
struct
scsi_cmnd
*
cmd
,
unsigned
char
**
buf_out
)
{
unsigned
char
*
buf
;
unsigned
int
buflen
;
if
(
cmd
->
use_sg
)
{
struct
scatterlist
*
sg
;
sg
=
(
struct
scatterlist
*
)
cmd
->
request_buffer
;
buf
=
kmap_atomic
(
sg
->
page
,
KM_IRQ0
)
+
sg
->
offset
;
buflen
=
sg
->
length
;
}
else
{
buf
=
cmd
->
request_buffer
;
buflen
=
cmd
->
request_bufflen
;
}
*
buf_out
=
buf
;
return
buflen
;
}
static
void
scsi_rbuf_put
(
struct
scsi_cmnd
*
cmd
,
unsigned
char
*
buf
)
{
if
(
cmd
->
use_sg
)
{
struct
scatterlist
*
sg
;
sg
=
(
struct
scatterlist
*
)
cmd
->
request_buffer
;
kunmap_atomic
(
buf
-
sg
->
offset
,
KM_IRQ0
);
}
}
/*
* Until we scan the entire bus with inquiries, go throught this fella...
*/
...
...
@@ -1145,11 +1175,9 @@ static void ourdone(struct scsi_cmnd *Cmnd)
int
ok
=
host_byte
(
Cmnd
->
result
)
==
DID_OK
;
if
(
Cmnd
->
cmnd
[
0
]
==
0x12
&&
ok
)
{
unsigned
char
*
iqd
;
unsigned
int
iqd_len
;
if
(
Cmnd
->
use_sg
!=
0
)
BUG
();
iqd
=
((
unsigned
char
*
)
Cmnd
->
buffer
);
iqd_len
=
scsi_rbuf_get
(
Cmnd
,
&
iqd
);
/* tags handled in midlayer */
/* enable sync mode? */
...
...
@@ -1163,6 +1191,9 @@ static void ourdone(struct scsi_cmnd *Cmnd)
if
(
iqd
[
7
]
&
0x20
)
{
qpti
->
dev_param
[
tgt
].
device_flags
|=
0x20
;
}
scsi_rbuf_put
(
Cmnd
,
iqd
);
qpti
->
sbits
|=
(
1
<<
tgt
);
}
else
if
(
!
ok
)
{
qpti
->
sbits
|=
(
1
<<
tgt
);
...
...
include/asm-sparc64/pbm.h
View file @
f8cc5756
...
...
@@ -27,23 +27,27 @@
* PCI bus.
*/
#define PBM_LOGCLUSTERS 3
#define PBM_NCLUSTERS (1 << PBM_LOGCLUSTERS)
struct
pci_controller_info
;
/* This contains the software state necessary to drive a PCI
* controller's IOMMU.
*/
struct
pci_iommu_arena
{
unsigned
long
*
map
;
unsigned
int
hint
;
unsigned
int
limit
;
};
struct
pci_iommu
{
/* This protects the controller's IOMMU and all
* streaming buffers underneath.
*/
spinlock_t
lock
;
struct
pci_iommu_arena
arena
;
/* IOMMU page table, a linear array of ioptes. */
iopte_t
*
page_table
;
/* The page table itself. */
int
page_table_sz_bits
;
/* log2 of ow many pages does it map? */
/* Base PCI memory space address where IOMMU mappings
* begin.
...
...
@@ -62,12 +66,6 @@ struct pci_iommu {
*/
unsigned
long
write_complete_reg
;
/* The lowest used consistent mapping entry. Since
* we allocate consistent maps out of cluster 0 this
* is relative to the beginning of closter 0.
*/
u32
lowest_consistent_map
;
/* In order to deal with some buggy third-party PCI bridges that
* do wrong prefetching, we never mark valid mappings as invalid.
* Instead we point them at this dummy page.
...
...
@@ -75,16 +73,6 @@ struct pci_iommu {
unsigned
long
dummy_page
;
unsigned
long
dummy_page_pa
;
/* If PBM_NCLUSTERS is ever decreased to 4 or lower,
* or if largest supported page_table_sz * 8K goes above
* 2GB, you must increase the size of the type of
* these counters. You have been duly warned. -DaveM
*/
struct
{
u16
next
;
u16
flush
;
}
alloc_info
[
PBM_NCLUSTERS
];
/* CTX allocation. */
unsigned
long
ctx_lowest_free
;
unsigned
long
ctx_bitmap
[
IOMMU_NUM_CTXS
/
(
sizeof
(
unsigned
long
)
*
8
)];
...
...
@@ -102,7 +90,7 @@ struct pci_iommu {
u32
dma_addr_mask
;
};
extern
void
pci_iommu_table_init
(
struct
pci_iommu
*
,
int
);
extern
void
pci_iommu_table_init
(
struct
pci_iommu
*
iommu
,
int
tsbsize
,
u32
dma_offset
,
u32
dma_addr_mask
);
/* This describes a PCI bus module's streaming buffer. */
struct
pci_strbuf
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment