Commit 6cb8e1f7 authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/radeon/kms: fix bo's fence association

Previous code did associate fence to bo before the fence was emited
and it also didn't lock protected access to ttm sync_obj member.
Both of this flaw leads to possible race between different code
path. This patch fix this by associating fence only once the fence
is emitted and properly lock protect access to sync_obj member of
ttm.

Fix:
https://bugs.freedesktop.org/show_bug.cgi?id=26438
and likely similar others bugs
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent e821767b
......@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
&p->validated);
}
}
return radeon_bo_list_validate(&p->validated, p->ib->fence);
return radeon_bo_list_validate(&p->validated);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
......@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
if (error && parser->ib) {
radeon_bo_list_unvalidate(&parser->validated,
parser->ib->fence);
} else {
radeon_bo_list_unreserve(&parser->validated);
if (!error && parser->ib) {
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
radeon_bo_list_unreserve(&parser->validated);
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
mutex_lock(&parser->rdev->ddev->struct_mutex);
......
......@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
}
}
int radeon_bo_list_validate(struct list_head *head, void *fence)
int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
int r;
r = radeon_bo_list_reserve(head);
......@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
if (fence) {
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
bo->tbo.sync_obj = radeon_fence_ref(fence);
bo->tbo.sync_obj_arg = NULL;
}
if (old_fence) {
radeon_fence_unref(&old_fence);
}
}
return 0;
}
void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
void radeon_bo_list_fence(struct list_head *head, void *fence)
{
struct radeon_bo_list *lobj;
struct radeon_fence *old_fence;
if (fence)
list_for_each_entry(lobj, head, list) {
old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
if (old_fence == fence) {
lobj->bo->tbo.sync_obj = NULL;
radeon_fence_unref(&old_fence);
}
struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
list_for_each_entry(lobj, head, list) {
bo = lobj->bo;
spin_lock(&bo->tbo.lock);
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
bo->tbo.sync_obj = radeon_fence_ref(fence);
bo->tbo.sync_obj_arg = NULL;
spin_unlock(&bo->tbo.lock);
if (old_fence) {
radeon_fence_unref(&old_fence);
}
radeon_bo_list_unreserve(head);
}
}
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
......
......@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head, void *fence);
extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
extern int radeon_bo_list_validate(struct list_head *head);
extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment