Commit a77f1718 authored by Matt Turner's avatar Matt Turner Committed by Dave Airlie

drm/radeon/kms: use RADEON_GPU_PAGE_SIZE instead of 4096

Signed-off-by: default avatarMatt Turner <mattst88@gmail.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent dfdd6467
...@@ -240,7 +240,7 @@ int r100_wb_init(struct radeon_device *rdev) ...@@ -240,7 +240,7 @@ int r100_wb_init(struct radeon_device *rdev)
int r; int r;
if (rdev->wb.wb_obj == NULL) { if (rdev->wb.wb_obj == NULL) {
r = radeon_object_create(rdev, NULL, 4096, r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
true, true,
RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_GTT,
false, &rdev->wb.wb_obj); false, &rdev->wb.wb_obj);
......
...@@ -113,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) ...@@ -113,7 +113,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096; tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
......
...@@ -1269,9 +1269,9 @@ int r600_cp_resume(struct radeon_device *rdev) ...@@ -1269,9 +1269,9 @@ int r600_cp_resume(struct radeon_device *rdev)
rb_bufsz = drm_order(rdev->cp.ring_size / 8); rb_bufsz = drm_order(rdev->cp.ring_size / 8);
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE | WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
(drm_order(4096/8) << 8) | rb_bufsz); (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz);
#else #else
WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz); WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz);
#endif #endif
WREG32(CP_SEM_WAIT_TIMER, 0x4); WREG32(CP_SEM_WAIT_TIMER, 0x4);
...@@ -1400,7 +1400,7 @@ int r600_wb_enable(struct radeon_device *rdev) ...@@ -1400,7 +1400,7 @@ int r600_wb_enable(struct radeon_device *rdev)
int r; int r;
if (rdev->wb.wb_obj == NULL) { if (rdev->wb.wb_obj == NULL) {
r = radeon_object_create(rdev, NULL, 4096, true, r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
if (r) { if (r) {
dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
...@@ -1450,8 +1450,8 @@ int r600_copy_blit(struct radeon_device *rdev, ...@@ -1450,8 +1450,8 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence) unsigned num_pages, struct radeon_fence *fence)
{ {
r600_blit_prepare_copy(rdev, num_pages * 4096); r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096); r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence); r600_blit_done_copy(rdev, fence);
return 0; return 0;
} }
......
...@@ -276,6 +276,8 @@ union radeon_gart_table { ...@@ -276,6 +276,8 @@ union radeon_gart_table {
struct radeon_gart_table_vram vram; struct radeon_gart_table_vram vram;
}; };
#define RADEON_GPU_PAGE_SIZE 4096
struct radeon_gart { struct radeon_gart {
dma_addr_t table_addr; dma_addr_t table_addr;
unsigned num_gpu_pages; unsigned num_gpu_pages;
......
...@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, ...@@ -63,7 +63,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy_dma(rdev, saddr, daddr, size / 4096, fence); r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, ...@@ -88,7 +88,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy_blit(rdev, saddr, daddr, size / 4096, fence); r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
......
...@@ -140,15 +140,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -140,15 +140,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
WARN(1, "trying to unbind memory to unitialized GART !\n"); WARN(1, "trying to unbind memory to unitialized GART !\n");
return; return;
} }
t = offset / 4096; t = offset / RADEON_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / 4096); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) { if (rdev->gart.pages[p]) {
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL; rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = 0; rdev->gart.pages_addr[p] = 0;
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, 0); radeon_gart_set_page(rdev, t, 0);
} }
} }
...@@ -169,8 +169,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -169,8 +169,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
DRM_ERROR("trying to bind memory to unitialized GART !\n"); DRM_ERROR("trying to bind memory to unitialized GART !\n");
return -EINVAL; return -EINVAL;
} }
t = offset / 4096; t = offset / RADEON_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / 4096); p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
/* we need to support large memory configurations */ /* we need to support large memory configurations */
...@@ -185,9 +185,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -185,9 +185,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
} }
rdev->gart.pages[p] = pagelist[i]; rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p]; page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base); radeon_gart_set_page(rdev, t, page_base);
page_base += 4096; page_base += RADEON_GPU_PAGE_SIZE;
} }
} }
mb(); mb();
...@@ -200,14 +200,14 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -200,14 +200,14 @@ int radeon_gart_init(struct radeon_device *rdev)
if (rdev->gart.pages) { if (rdev->gart.pages) {
return 0; return 0;
} }
/* We need PAGE_SIZE >= 4096 */ /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
if (PAGE_SIZE < 4096) { if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n"); DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL; return -EINVAL;
} }
/* Compute table size */ /* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096; rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */ /* Allocate pages table */
......
...@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -42,7 +42,7 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests = /* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size * (Total GTT - IB pool - writeback page - ring buffer) / test size
*/ */
n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - 4096 - n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
rdev->cp.ring_size) / size; rdev->cp.ring_size) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
...@@ -102,7 +102,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -102,7 +102,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy(rdev, gtt_addr, vram_addr, size / 4096, fence); r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup; goto out_cleanup;
...@@ -145,7 +145,7 @@ void radeon_test_moves(struct radeon_device *rdev) ...@@ -145,7 +145,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup; goto out_cleanup;
} }
r = radeon_copy(rdev, vram_addr, gtt_addr, size / 4096, fence); r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) { if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup; goto out_cleanup;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment