Commit 9148bce4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/ttm: export some functions useful to drivers using ttm
  drm/radeon/kms/avivo: fix typo in new_pll module description
  drm/radeon/kms: Convert radeon to new ttm_bo_init
  drm/ttm: Convert ttm_buffer_object_init to use ttm_placement
parents aad3bf04 d1ede145
...@@ -121,7 +121,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); ...@@ -121,7 +121,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444); module_param_named(tv, radeon_tv, int, 0444);
MODULE_PARM_DESC(r4xx_atom, "Select new PLL code for AVIVO chips"); MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
module_param_named(new_pll, radeon_new_pll, int, 0444); module_param_named(new_pll, radeon_new_pll, int, 0444);
static int radeon_suspend(struct drm_device *dev, pm_message_t state) static int radeon_suspend(struct drm_device *dev, pm_message_t state)
......
...@@ -56,25 +56,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) ...@@ -56,25 +56,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo); kfree(bo);
} }
static inline u32 radeon_ttm_flags_from_domain(u32 domain)
{
u32 flags = 0;
if (domain & RADEON_GEM_DOMAIN_VRAM) {
flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
}
if (domain & RADEON_GEM_DOMAIN_GTT) {
flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
}
if (!flags) {
flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
}
return flags;
}
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{ {
u32 c = 0; u32 c = 0;
...@@ -100,7 +81,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, ...@@ -100,7 +81,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
{ {
struct radeon_bo *bo; struct radeon_bo *bo;
enum ttm_bo_type type; enum ttm_bo_type type;
u32 flags;
int r; int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
...@@ -120,16 +100,16 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, ...@@ -120,16 +100,16 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
bo->surface_reg = -1; bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->list);
flags = radeon_ttm_flags_from_domain(domain); radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */ /* Kernel allocation are uninterruptible */
r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
flags, 0, 0, !kernel, NULL, size, &bo->placement, 0, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy); &radeon_ttm_bo_destroy);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
dev_err(rdev->dev, dev_err(rdev->dev,
"object_init failed for (%ld, 0x%08X)\n", "object_init failed for (%lu, 0x%08X)\n",
size, flags); size, domain);
return r; return r;
} }
*bo_ptr = bo; *bo_ptr = bo;
...@@ -199,7 +179,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) ...@@ -199,7 +179,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
radeon_ttm_placement_from_domain(bo, domain); radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) { if (likely(r == 0)) {
bo->pin_count = 1; bo->pin_count = 1;
if (gpu_addr != NULL) if (gpu_addr != NULL)
...@@ -223,7 +203,7 @@ int radeon_bo_unpin(struct radeon_bo *bo) ...@@ -223,7 +203,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0; return 0;
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r; return r;
...@@ -336,8 +316,7 @@ int radeon_bo_list_validate(struct list_head *head, void *fence) ...@@ -336,8 +316,7 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
radeon_ttm_placement_from_domain(bo, radeon_ttm_placement_from_domain(bo,
lobj->rdomain); lobj->rdomain);
} }
r = ttm_buffer_object_validate(&bo->tbo, r = ttm_bo_validate(&bo->tbo, &bo->placement,
&bo->placement,
true, false); true, false);
if (unlikely(r)) if (unlikely(r))
return r; return r;
......
...@@ -185,6 +185,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) ...@@ -185,6 +185,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
} }
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_bo_wait_unreserved);
static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{ {
...@@ -946,6 +947,7 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) ...@@ -946,6 +947,7 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
return wait_event_interruptible(bo->event_queue, return wait_event_interruptible(bo->event_queue,
atomic_read(&bo->cpu_writers) == 0); atomic_read(&bo->cpu_writers) == 0);
} }
EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
...@@ -1002,7 +1004,7 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, ...@@ -1002,7 +1004,7 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
return -1; return -1;
} }
int ttm_buffer_object_validate(struct ttm_buffer_object *bo, int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible, bool no_wait) bool interruptible, bool no_wait)
{ {
...@@ -1040,44 +1042,48 @@ int ttm_buffer_object_validate(struct ttm_buffer_object *bo, ...@@ -1040,44 +1042,48 @@ int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
} }
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_buffer_object_validate); EXPORT_SYMBOL(ttm_bo_validate);
int int ttm_bo_check_placement(struct ttm_buffer_object *bo,
ttm_bo_check_placement(struct ttm_buffer_object *bo, struct ttm_placement *placement)
uint32_t set_flags, uint32_t clr_flags)
{ {
uint32_t new_mask = set_flags | clr_flags; int i;
if ((bo->type == ttm_bo_type_user) && if (placement->fpfn || placement->lpfn) {
(clr_flags & TTM_PL_FLAG_CACHED)) { if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
printk(KERN_ERR TTM_PFX printk(KERN_ERR TTM_PFX "Page number range to small "
"User buffers require cache-coherent memory.\n"); "Need %lu pages, range is [%u, %u]\n",
bo->mem.num_pages, placement->fpfn,
placement->lpfn);
return -EINVAL; return -EINVAL;
} }
}
for (i = 0; i < placement->num_placement; i++) {
if (!capable(CAP_SYS_ADMIN)) { if (!capable(CAP_SYS_ADMIN)) {
if (new_mask & TTM_PL_FLAG_NO_EVICT) { if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
printk(KERN_ERR TTM_PFX "Need to be root to modify" printk(KERN_ERR TTM_PFX "Need to be root to "
" NO_EVICT status.\n"); "modify NO_EVICT status.\n");
return -EINVAL; return -EINVAL;
} }
}
if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) && }
(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { for (i = 0; i < placement->num_busy_placement; i++) {
printk(KERN_ERR TTM_PFX if (!capable(CAP_SYS_ADMIN)) {
"Incompatible memory specification" if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
" for NO_EVICT buffer.\n"); printk(KERN_ERR TTM_PFX "Need to be root to "
"modify NO_EVICT status.\n");
return -EINVAL; return -EINVAL;
} }
} }
}
return 0; return 0;
} }
int ttm_buffer_object_init(struct ttm_bo_device *bdev, int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
unsigned long size, unsigned long size,
enum ttm_bo_type type, enum ttm_bo_type type,
uint32_t flags, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
unsigned long buffer_start, unsigned long buffer_start,
bool interruptible, bool interruptible,
...@@ -1085,10 +1091,8 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -1085,10 +1091,8 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
size_t acc_size, size_t acc_size,
void (*destroy) (struct ttm_buffer_object *)) void (*destroy) (struct ttm_buffer_object *))
{ {
int i, c, ret = 0; int ret = 0;
unsigned long num_pages; unsigned long num_pages;
uint32_t placements[8];
struct ttm_placement placement;
size += buffer_start & ~PAGE_MASK; size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
...@@ -1123,38 +1127,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -1123,38 +1127,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
bo->acc_size = acc_size; bo->acc_size = acc_size;
atomic_inc(&bo->glob->bo_count); atomic_inc(&bo->glob->bo_count);
ret = ttm_bo_check_placement(bo, flags, 0ULL); ret = ttm_bo_check_placement(bo, placement);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err;
/*
* If no caching attributes are set, accept any form of caching.
*/
if ((flags & TTM_PL_MASK_CACHING) == 0)
flags |= TTM_PL_MASK_CACHING;
/* /*
* For ttm_bo_type_device buffers, allocate * For ttm_bo_type_device buffers, allocate
* address space from the device. * address space from the device.
*/ */
if (bo->type == ttm_bo_type_device) { if (bo->type == ttm_bo_type_device) {
ret = ttm_bo_setup_vm(bo); ret = ttm_bo_setup_vm(bo);
if (ret) if (ret)
goto out_err; goto out_err;
} }
placement.fpfn = 0; ret = ttm_bo_validate(bo, placement, interruptible, false);
placement.lpfn = 0;
for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
if (flags & (1 << i))
placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
placement.placement = placements;
placement.num_placement = c;
placement.busy_placement = placements;
placement.num_busy_placement = c;
ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -1167,7 +1154,7 @@ out_err: ...@@ -1167,7 +1154,7 @@ out_err:
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_buffer_object_init); EXPORT_SYMBOL(ttm_bo_init);
static inline size_t ttm_bo_size(struct ttm_bo_global *glob, static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
unsigned long num_pages) unsigned long num_pages)
...@@ -1178,10 +1165,10 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob, ...@@ -1178,10 +1165,10 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
return glob->ttm_bo_size + 2 * page_array_size; return glob->ttm_bo_size + 2 * page_array_size;
} }
int ttm_buffer_object_create(struct ttm_bo_device *bdev, int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size, unsigned long size,
enum ttm_bo_type type, enum ttm_bo_type type,
uint32_t flags, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
unsigned long buffer_start, unsigned long buffer_start,
bool interruptible, bool interruptible,
...@@ -1205,9 +1192,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, ...@@ -1205,9 +1192,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return -ENOMEM; return -ENOMEM;
} }
ret = ttm_buffer_object_init(bdev, bo, size, type, flags, ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
page_alignment, buffer_start, buffer_start, interruptible,
interruptible,
persistant_swap_storage, acc_size, NULL); persistant_swap_storage, acc_size, NULL);
if (likely(ret == 0)) if (likely(ret == 0))
*p_bo = bo; *p_bo = bo;
...@@ -1743,12 +1729,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ...@@ -1743,12 +1729,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{ {
if (atomic_dec_and_test(&bo->cpu_writers)) if (atomic_dec_and_test(&bo->cpu_writers))
wake_up_all(&bo->event_queue); wake_up_all(&bo->event_queue);
} }
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
/** /**
* A buffer object shrink method that tries to swap out the first * A buffer object shrink method that tries to swap out the first
......
...@@ -308,7 +308,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) ...@@ -308,7 +308,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
/** /**
* ttm_buffer_object_validate * ttm_bo_validate
* *
* @bo: The buffer object. * @bo: The buffer object.
* @placement: Proposed placement for the buffer object. * @placement: Proposed placement for the buffer object.
...@@ -323,7 +323,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, ...@@ -323,7 +323,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* -EBUSY if no_wait is true and buffer busy. * -EBUSY if no_wait is true and buffer busy.
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible, bool no_wait); bool interruptible, bool no_wait);
...@@ -362,7 +362,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); ...@@ -362,7 +362,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/** /**
* ttm_buffer_object_init * ttm_bo_init
* *
* @bdev: Pointer to a ttm_bo_device struct. * @bdev: Pointer to a ttm_bo_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized. * @bo: Pointer to a ttm_buffer_object to be initialized.
...@@ -393,11 +393,11 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); ...@@ -393,11 +393,11 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/ */
extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, extern int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
unsigned long size, unsigned long size,
enum ttm_bo_type type, enum ttm_bo_type type,
uint32_t flags, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
unsigned long buffer_start, unsigned long buffer_start,
bool interrubtible, bool interrubtible,
...@@ -424,19 +424,18 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, ...@@ -424,19 +424,18 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
* GEM user interface. * GEM user interface.
* @p_bo: On successful completion *p_bo points to the created object. * @p_bo: On successful completion *p_bo points to the created object.
* *
* This function allocates a ttm_buffer_object, and then calls * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
* ttm_buffer_object_init on that object. * on that object. The destroy function is set to kfree().
* The destroy function is set to kfree().
* Returns * Returns
* -ENOMEM: Out of memory. * -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags. * -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while waiting for resources. * -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/ */
extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, extern int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size, unsigned long size,
enum ttm_bo_type type, enum ttm_bo_type type,
uint32_t flags, struct ttm_placement *placement,
uint32_t page_alignment, uint32_t page_alignment,
unsigned long buffer_start, unsigned long buffer_start,
bool interruptible, bool interruptible,
...@@ -447,17 +446,15 @@ extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, ...@@ -447,17 +446,15 @@ extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
* ttm_bo_check_placement * ttm_bo_check_placement
* *
* @bo: the buffer object. * @bo: the buffer object.
* @set_flags: placement flags to set. * @placement: placements
* @clr_flags: placement flags to clear.
* *
* Performs minimal validity checking on an intended change of * Performs minimal validity checking on an intended change of
* placement flags. * placement flags.
* Returns * Returns
* -EINVAL: Intended change is invalid or not allowed. * -EINVAL: Intended change is invalid or not allowed.
*/ */
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
uint32_t set_flags, uint32_t clr_flags); struct ttm_placement *placement);
/** /**
* ttm_bo_init_mm * ttm_bo_init_mm
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment