Commit ecb114a1 authored by Dave Airlie's avatar Dave Airlie

drm/radeon/kms: IB locking dumps out a lockdep ordering issue

We sometimes lock IB then the ring and sometimes the ring then
the IB. This is mostly due to the IB locking not being well defined
about what data in the structs it actually locks. Define what I
believe is the correct behaviour and gets rid of the lock dep ordering
warning.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 42dea5dd
...@@ -538,8 +538,8 @@ int r600_vb_ib_get(struct radeon_device *rdev) ...@@ -538,8 +538,8 @@ int r600_vb_ib_get(struct radeon_device *rdev)
void r600_vb_ib_put(struct radeon_device *rdev) void r600_vb_ib_put(struct radeon_device *rdev)
{ {
mutex_lock(&rdev->ib_pool.mutex);
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
......
...@@ -402,6 +402,10 @@ struct radeon_ib { ...@@ -402,6 +402,10 @@ struct radeon_ib {
uint32_t length_dw; uint32_t length_dw;
}; };
/*
* locking -
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool { struct radeon_ib_pool {
struct mutex mutex; struct mutex mutex;
struct radeon_object *robj; struct radeon_object *robj;
......
...@@ -56,10 +56,12 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) ...@@ -56,10 +56,12 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
set_bit(i, rdev->ib_pool.alloc_bm); set_bit(i, rdev->ib_pool.alloc_bm);
rdev->ib_pool.ibs[i].length_dw = 0; rdev->ib_pool.ibs[i].length_dw = 0;
*ib = &rdev->ib_pool.ibs[i]; *ib = &rdev->ib_pool.ibs[i];
mutex_unlock(&rdev->ib_pool.mutex);
goto out; goto out;
} }
if (list_empty(&rdev->ib_pool.scheduled_ibs)) { if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
/* we go do nothings here */ /* we go do nothings here */
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("all IB allocated none scheduled.\n"); DRM_ERROR("all IB allocated none scheduled.\n");
r = -EINVAL; r = -EINVAL;
goto out; goto out;
...@@ -69,10 +71,13 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) ...@@ -69,10 +71,13 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
struct radeon_ib, list); struct radeon_ib, list);
if (nib->fence == NULL) { if (nib->fence == NULL) {
/* we go do nothings here */ /* we go do nothings here */
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
mutex_unlock(&rdev->ib_pool.mutex);
r = radeon_fence_wait(nib->fence, false); r = radeon_fence_wait(nib->fence, false);
if (r) { if (r) {
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
...@@ -81,12 +86,17 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) ...@@ -81,12 +86,17 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
goto out; goto out;
} }
radeon_fence_unref(&nib->fence); radeon_fence_unref(&nib->fence);
nib->length_dw = 0; nib->length_dw = 0;
/* scheduled list is accessed here */
mutex_lock(&rdev->ib_pool.mutex);
list_del(&nib->list); list_del(&nib->list);
INIT_LIST_HEAD(&nib->list); INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex);
*ib = nib; *ib = nib;
out: out:
mutex_unlock(&rdev->ib_pool.mutex);
if (r) { if (r) {
radeon_fence_unref(&fence); radeon_fence_unref(&fence);
} else { } else {
...@@ -110,9 +120,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) ...@@ -110,9 +120,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
return; return;
} }
list_del(&tmp->list); list_del(&tmp->list);
if (tmp->fence) { INIT_LIST_HEAD(&tmp->list);
if (tmp->fence)
radeon_fence_unref(&tmp->fence); radeon_fence_unref(&tmp->fence);
}
tmp->length_dw = 0; tmp->length_dw = 0;
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
...@@ -122,25 +133,24 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -122,25 +133,24 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{ {
int r = 0; int r = 0;
mutex_lock(&rdev->ib_pool.mutex);
if (!ib->length_dw || !rdev->cp.ready) { if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */ /* TODO: Nothings in the ib we should report. */
mutex_unlock(&rdev->ib_pool.mutex);
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
return -EINVAL; return -EINVAL;
} }
/* 64 dwords should be enough for fence too */ /* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, 64); r = radeon_ring_lock(rdev, 64);
if (r) { if (r) {
DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
mutex_unlock(&rdev->ib_pool.mutex);
return r; return r;
} }
radeon_ring_ib_execute(rdev, ib); radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence); radeon_fence_emit(rdev, ib->fence);
radeon_ring_unlock_commit(rdev); mutex_lock(&rdev->ib_pool.mutex);
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
mutex_unlock(&rdev->ib_pool.mutex); mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment