Commit a6b54f3f authored by =?utf-8?q?Michel_D=C3=A4nzer?='s avatar =?utf-8?q?Michel_D=C3=A4nzer?= Committed by airlied

drm: i915: Add ioctl for scheduling buffer swaps at vertical blanks.

This uses the core facility to schedule a driver callback that will be called
ASAP after the given vertical blank interrupt with the HW lock held.
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent 049b3233
......@@ -162,6 +162,7 @@ static int i915_initialize(drm_device_t * dev,
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
dev_priv->front_offset = init->front_offset;
dev_priv->current_page = 0;
......@@ -782,6 +783,7 @@ drm_ioctl_desc_t i915_ioctls[] = {
[DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
[DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
......
......@@ -132,6 +132,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
......@@ -243,4 +244,12 @@ typedef struct drm_i915_vblank_pipe {
int pipe;
} drm_i915_vblank_pipe_t;
/* Schedule buffer swap at given vertical blank:
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
unsigned int pipe;
unsigned int sequence;
} drm_i915_vblank_swap_t;
#endif /* _I915_DRM_H_ */
......@@ -71,6 +71,13 @@ struct mem_block {
DRMFILE filp; /* 0: free, -1: heap, other: real files */
};
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
unsigned int pipe;
unsigned int sequence;
} drm_i915_vbl_swap_t;
typedef struct drm_i915_private {
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
......@@ -83,6 +90,7 @@ typedef struct drm_i915_private {
dma_addr_t dma_status_page;
unsigned long counter;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
......@@ -98,6 +106,10 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
} drm_i915_private_t;
extern drm_ioctl_desc_t i915_ioctls[];
......@@ -124,6 +136,7 @@ extern void i915_driver_irq_postinstall(drm_device_t * dev);
extern void i915_driver_irq_uninstall(drm_device_t * dev);
extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
extern int i915_vblank_swap(DRM_IOCTL_ARGS);
/* i915_mem.c */
extern int i915_mem_alloc(DRM_IOCTL_ARGS);
......@@ -257,6 +270,10 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
......
......@@ -37,6 +37,99 @@
#define MAX_NOPID ((u32)~0)
/**
* Emit blits for scheduled buffer swaps.
*
* This function will be called with the HW lock held.
*/
static void i915_vblank_tasklet(drm_device_t *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned int irqflags;
struct list_head *list, *tmp;
DRM_DEBUG("\n");
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 :
&dev->vbl_received;
if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) {
drm_drawable_info_t *drw;
spin_unlock(&dev_priv->swaps_lock);
spin_lock(&dev->drw_lock);
drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
if (drw) {
int i, num_rects = drw->num_rects;
drm_clip_rect_t *rect = drw->rects;
drm_i915_sarea_t *sarea_priv =
dev_priv->sarea_priv;
u32 cpp = dev_priv->cpp;
u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB)
: XY_SRC_COPY_BLT_CMD;
u32 pitchropcpp = (sarea_priv->pitch * cpp) |
(0xcc << 16) | (cpp << 23) |
(1 << 24);
RING_LOCALS;
i915_kernel_lost_context(dev);
BEGIN_LP_RING(6);
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(0);
OUT_RING(0);
OUT_RING(sarea_priv->width |
sarea_priv->height << 16);
OUT_RING(sarea_priv->width |
sarea_priv->height << 16);
OUT_RING(0);
ADVANCE_LP_RING();
sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
for (i = 0; i < num_rects; i++, rect++) {
BEGIN_LP_RING(8);
OUT_RING(cmd);
OUT_RING(pitchropcpp);
OUT_RING((rect->y1 << 16) | rect->x1);
OUT_RING((rect->y2 << 16) | rect->x2);
OUT_RING(sarea_priv->front_offset);
OUT_RING((rect->y1 << 16) | rect->x1);
OUT_RING(pitchropcpp & 0xffff);
OUT_RING(sarea_priv->back_offset);
ADVANCE_LP_RING();
}
}
spin_unlock(&dev->drw_lock);
spin_lock(&dev_priv->swaps_lock);
list_del(list);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
dev_priv->swaps_pending--;
}
}
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
}
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
......@@ -72,6 +165,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
drm_locked_tasklet(dev, i915_vblank_tasklet);
}
return IRQ_HANDLED;
......@@ -271,6 +366,90 @@ int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
return 0;
}
/**
* Schedule buffer swap at given vertical blank.
*/
int i915_vblank_swap(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t swap;
drm_i915_vbl_swap_t *vbl_swap;
unsigned int irqflags;
struct list_head *list;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return DRM_ERR(EINVAL);
}
if (dev_priv->sarea_priv->rotation) {
DRM_DEBUG("Rotation not supported\n");
return DRM_ERR(EINVAL);
}
if (dev_priv->swaps_pending >= 100) {
DRM_DEBUG("Too many swaps queued\n");
return DRM_ERR(EBUSY);
}
DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
sizeof(swap));
if (swap.pipe > 1 || !(dev_priv->vblank_pipe & (1 << swap.pipe))) {
DRM_ERROR("Invalid pipe %d\n", swap.pipe);
return DRM_ERR(EINVAL);
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
if (!drm_get_drawable_info(dev, swap.drawable)) {
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_ERROR("Invalid drawable ID %d\n", swap.drawable);
return DRM_ERR(EINVAL);
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_for_each(list, &dev_priv->vbl_swaps.head) {
vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
if (vbl_swap->drw_id == swap.drawable &&
vbl_swap->pipe == swap.pipe &&
vbl_swap->sequence == swap.sequence) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
DRM_DEBUG("Already scheduled\n");
return 0;
}
}
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n");
return DRM_ERR(ENOMEM);
}
DRM_DEBUG("\n");
vbl_swap->drw_id = swap.drawable;
vbl_swap->pipe = swap.pipe;
vbl_swap->sequence = swap.sequence;
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
return 0;
}
/* drm_dma.h hooks
*/
void i915_driver_irq_preinstall(drm_device_t * dev)
......@@ -286,6 +465,10 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
i915_enable_interrupt(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment