Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
4c27bd33
Commit
4c27bd33
authored
Feb 11, 2010
by
Ben Skeggs
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
drm/nv50: more efficient clearing of gpu page table entries
Signed-off-by:
Ben Skeggs
<
bskeggs@redhat.com
>
parent
66b6ebac
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
44 additions
and
24 deletions
+44
-24
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_mem.c
+44
-24
No files found.
drivers/gpu/drm/nouveau/nouveau_mem.c
View file @
4c27bd33
...
...
@@ -291,31 +291,17 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
pages
=
size
>>
16
;
dev_priv
->
engine
.
instmem
.
prepare_access
(
dev
,
true
);
if
(
flags
&
0x80000000
)
{
while
(
pages
--
)
{
struct
nouveau_gpuobj
*
pt
=
dev_priv
->
vm_vram_pt
[
virt
>>
29
]
;
unsigned
pte
=
((
virt
&
0x1fffffffULL
)
>>
16
)
<<
1
;
while
(
pages
--
)
{
struct
nouveau_gpuobj
*
pt
=
dev_priv
->
vm_vram_pt
[
virt
>>
29
];
unsigned
pte
=
((
virt
&
0x1fffffffULL
)
>>
16
)
<<
1
;
unsigned
offset_h
=
upper_32_bits
(
phys
)
&
0xff
;
unsigned
offset_l
=
lower_32_bits
(
phys
)
;
nv_wo32
(
dev
,
pt
,
pte
++
,
0x00000000
);
nv_wo32
(
dev
,
pt
,
pte
++
,
0x00000000
);
nv_wo32
(
dev
,
pt
,
pte
++
,
offset_l
|
1
);
nv_wo32
(
dev
,
pt
,
pte
++
,
offset_h
|
flags
);
virt
+=
(
1
<<
16
);
}
}
else
{
while
(
pages
--
)
{
struct
nouveau_gpuobj
*
pt
=
dev_priv
->
vm_vram_pt
[
virt
>>
29
];
unsigned
pte
=
((
virt
&
0x1fffffffULL
)
>>
16
)
<<
1
;
unsigned
offset_h
=
upper_32_bits
(
phys
)
&
0xff
;
unsigned
offset_l
=
lower_32_bits
(
phys
);
nv_wo32
(
dev
,
pt
,
pte
++
,
offset_l
|
1
);
nv_wo32
(
dev
,
pt
,
pte
++
,
offset_h
|
flags
);
phys
+=
(
1
<<
16
);
virt
+=
(
1
<<
16
);
}
phys
+=
(
1
<<
16
);
virt
+=
(
1
<<
16
);
}
dev_priv
->
engine
.
instmem
.
finish_access
(
dev
);
...
...
@@ -339,7 +325,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
void
nv50_mem_vm_unbind
(
struct
drm_device
*
dev
,
uint64_t
virt
,
uint32_t
size
)
{
nv50_mem_vm_bind_linear
(
dev
,
virt
,
size
,
0x80000000
,
0
);
struct
drm_nouveau_private
*
dev_priv
=
dev
->
dev_private
;
struct
nouveau_gpuobj
*
pgt
;
unsigned
pages
,
pte
,
end
;
virt
-=
dev_priv
->
vm_vram_base
;
pages
=
(
size
>>
16
)
<<
1
;
dev_priv
->
engine
.
instmem
.
prepare_access
(
dev
,
true
);
while
(
pages
)
{
pgt
=
dev_priv
->
vm_vram_pt
[
virt
>>
29
];
pte
=
(
virt
&
0x1ffe0000ULL
)
>>
15
;
end
=
pte
+
pages
;
if
(
end
>
16384
)
end
=
16384
;
pages
-=
(
end
-
pte
);
virt
+=
(
end
-
pte
)
<<
15
;
while
(
pte
<
end
)
nv_wo32
(
dev
,
pgt
,
pte
++
,
0
);
}
dev_priv
->
engine
.
instmem
.
finish_access
(
dev
);
nv_wr32
(
dev
,
0x100c80
,
0x00050001
);
if
(
!
nv_wait
(
0x100c80
,
0x00000001
,
0x00000000
))
{
NV_ERROR
(
dev
,
"timeout: (0x100c80 & 1) == 0 (2)
\n
"
);
NV_ERROR
(
dev
,
"0x100c80 = 0x%08x
\n
"
,
nv_rd32
(
dev
,
0x100c80
));
return
;
}
nv_wr32
(
dev
,
0x100c80
,
0x00000001
);
if
(
!
nv_wait
(
0x100c80
,
0x00000001
,
0x00000000
))
{
NV_ERROR
(
dev
,
"timeout: (0x100c80 & 1) == 0 (2)
\n
"
);
NV_ERROR
(
dev
,
"0x100c80 = 0x%08x
\n
"
,
nv_rd32
(
dev
,
0x100c80
));
}
}
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment