Commit d94adb34 authored by Xiang, Haihao's avatar Xiang, Haihao

i965_drv_video: clean up batchbuffer interface

Signed-off-by: default avatarXiang, Haihao <haihao.xiang@intel.com>
parent 94e00c8a
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -198,93 +198,113 @@ i965_avc_hw_scoreboard_states_setup(struct i965_avc_hw_scoreboard_context *avc_h
static void
i965_avc_hw_scoreboard_pipeline_select(VADriverContextP ctx)
{
BEGIN_BATCH(ctx, 1);
OUT_BATCH(ctx, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
ADVANCE_BATCH(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
ADVANCE_BATCH(batch);
}
static void
i965_avc_hw_scoreboard_urb_layout(VADriverContextP ctx, struct i965_avc_hw_scoreboard_context *avc_hw_scoreboard_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
unsigned int vfe_fence, cs_fence;
vfe_fence = avc_hw_scoreboard_context->urb.cs_start;
cs_fence = URB_SIZE((&i965->intel));
BEGIN_BATCH(ctx, 3);
OUT_BATCH(ctx, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx,
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
OUT_BATCH(batch, 0);
OUT_BATCH(batch,
(vfe_fence << UF2_VFE_FENCE_SHIFT) | /* VFE_SIZE */
(cs_fence << UF2_CS_FENCE_SHIFT)); /* CS_SIZE */
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_avc_hw_scoreboard_state_base_address(VADriverContextP ctx)
{
BEGIN_BATCH(ctx, 8);
OUT_BATCH(ctx, CMD_STATE_BASE_ADDRESS | 6);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 8);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(batch);
}
static void
i965_avc_hw_scoreboard_state_pointers(VADriverContextP ctx, struct i965_avc_hw_scoreboard_context *avc_hw_scoreboard_context)
{
BEGIN_BATCH(ctx, 3);
OUT_BATCH(ctx, CMD_MEDIA_STATE_POINTERS | 1);
OUT_BATCH(ctx, 0);
OUT_RELOC(ctx, avc_hw_scoreboard_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
OUT_BATCH(batch, 0);
OUT_RELOC(batch, avc_hw_scoreboard_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH(batch);
}
static void
i965_avc_hw_scoreboard_cs_urb_layout(VADriverContextP ctx, struct i965_avc_hw_scoreboard_context *avc_hw_scoreboard_context)
{
BEGIN_BATCH(ctx, 2);
OUT_BATCH(ctx, CMD_CS_URB_STATE | 0);
OUT_BATCH(ctx,
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
OUT_BATCH(batch,
((avc_hw_scoreboard_context->urb.size_cs_entry - 1) << 4) | /* URB Entry Allocation Size */
(avc_hw_scoreboard_context->urb.num_cs_entries << 0)); /* Number of URB Entries */
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_avc_hw_scoreboard_constant_buffer(VADriverContextP ctx, struct i965_avc_hw_scoreboard_context *avc_hw_scoreboard_context)
{
BEGIN_BATCH(ctx, 2);
OUT_BATCH(ctx, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
OUT_RELOC(ctx, avc_hw_scoreboard_context->curbe.bo,
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
OUT_RELOC(batch, avc_hw_scoreboard_context->curbe.bo,
I915_GEM_DOMAIN_INSTRUCTION, 0,
avc_hw_scoreboard_context->urb.size_cs_entry - 1);
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_avc_hw_scoreboard_objects(VADriverContextP ctx, struct i965_avc_hw_scoreboard_context *avc_hw_scoreboard_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
int number_mb_cmds = 512;
int starting_mb_number = avc_hw_scoreboard_context->inline_data.starting_mb_number;
int i;
for (i = 0; i < avc_hw_scoreboard_context->inline_data.num_mb_cmds / 512; i++) {
BEGIN_BATCH(ctx, 6);
OUT_BATCH(ctx, CMD_MEDIA_OBJECT | 4);
OUT_BATCH(ctx, 0); /* interface descriptor offset: 0 */
OUT_BATCH(ctx, 0); /* no indirect data */
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, ((number_mb_cmds << 16) |
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_MEDIA_OBJECT | 4);
OUT_BATCH(batch, 0); /* interface descriptor offset: 0 */
OUT_BATCH(batch, 0); /* no indirect data */
OUT_BATCH(batch, 0);
OUT_BATCH(batch, ((number_mb_cmds << 16) |
(starting_mb_number << 0)));
OUT_BATCH(ctx, avc_hw_scoreboard_context->inline_data.pic_width_in_mbs);
ADVANCE_BATCH(ctx);
OUT_BATCH(batch, avc_hw_scoreboard_context->inline_data.pic_width_in_mbs);
ADVANCE_BATCH(batch);
starting_mb_number += 512;
}
......@@ -292,23 +312,26 @@ i965_avc_hw_scoreboard_objects(VADriverContextP ctx, struct i965_avc_hw_scoreboa
number_mb_cmds = avc_hw_scoreboard_context->inline_data.num_mb_cmds % 512;
if (number_mb_cmds) {
BEGIN_BATCH(ctx, 6);
OUT_BATCH(ctx, CMD_MEDIA_OBJECT | 4);
OUT_BATCH(ctx, 0); /* interface descriptor offset: 0 */
OUT_BATCH(ctx, 0); /* no indirect data */
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, ((number_mb_cmds << 16) |
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_MEDIA_OBJECT | 4);
OUT_BATCH(batch, 0); /* interface descriptor offset: 0 */
OUT_BATCH(batch, 0); /* no indirect data */
OUT_BATCH(batch, 0);
OUT_BATCH(batch, ((number_mb_cmds << 16) |
(starting_mb_number << 0)));
OUT_BATCH(ctx, avc_hw_scoreboard_context->inline_data.pic_width_in_mbs);
ADVANCE_BATCH(ctx);
OUT_BATCH(batch, avc_hw_scoreboard_context->inline_data.pic_width_in_mbs);
ADVANCE_BATCH(batch);
}
}
static void
i965_avc_hw_scoreboard_pipeline_setup(VADriverContextP ctx, struct i965_avc_hw_scoreboard_context *avc_hw_scoreboard_context)
{
intel_batchbuffer_start_atomic(ctx, 0x1000);
intel_batchbuffer_emit_mi_flush(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
intel_batchbuffer_start_atomic(batch, 0x1000);
intel_batchbuffer_emit_mi_flush(batch);
i965_avc_hw_scoreboard_pipeline_select(ctx);
i965_avc_hw_scoreboard_state_base_address(ctx);
i965_avc_hw_scoreboard_state_pointers(ctx, avc_hw_scoreboard_context);
......@@ -316,7 +339,7 @@ i965_avc_hw_scoreboard_pipeline_setup(VADriverContextP ctx, struct i965_avc_hw_s
i965_avc_hw_scoreboard_cs_urb_layout(ctx, avc_hw_scoreboard_context);
i965_avc_hw_scoreboard_constant_buffer(ctx, avc_hw_scoreboard_context);
i965_avc_hw_scoreboard_objects(ctx, avc_hw_scoreboard_context);
intel_batchbuffer_end_atomic(ctx);
intel_batchbuffer_end_atomic(batch);
}
void
......
......@@ -398,14 +398,19 @@ i965_avc_ildb_states_setup(VADriverContextP ctx,
static void
i965_avc_ildb_pipeline_select(VADriverContextP ctx)
{
BEGIN_BATCH(ctx, 1);
OUT_BATCH(ctx, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
ADVANCE_BATCH(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
ADVANCE_BATCH(batch);
}
static void
i965_avc_ildb_urb_layout(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
......@@ -414,119 +419,132 @@ i965_avc_ildb_urb_layout(VADriverContextP ctx, struct i965_h264_context *i965_h2
vfe_fence = avc_ildb_context->urb.cs_start;
cs_fence = URB_SIZE((&i965->intel));
BEGIN_BATCH(ctx, 3);
OUT_BATCH(ctx, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx,
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
OUT_BATCH(batch, 0);
OUT_BATCH(batch,
(vfe_fence << UF2_VFE_FENCE_SHIFT) | /* VFE_SIZE */
(cs_fence << UF2_CS_FENCE_SHIFT)); /* CS_SIZE */
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_avc_ildb_state_base_address(VADriverContextP ctx)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
if (IS_IRONLAKE(i965->intel.device_id)) {
BEGIN_BATCH(ctx, 8);
OUT_BATCH(ctx, CMD_STATE_BASE_ADDRESS | 6);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(ctx);
BEGIN_BATCH(batch, 8);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(batch);
} else {
BEGIN_BATCH(ctx, 6);
OUT_BATCH(ctx, CMD_STATE_BASE_ADDRESS | 4);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(ctx);
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(batch);
}
}
static void
i965_avc_ildb_state_pointers(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
BEGIN_BATCH(ctx, 3);
OUT_BATCH(ctx, CMD_MEDIA_STATE_POINTERS | 1);
OUT_BATCH(ctx, 0);
OUT_RELOC(ctx, avc_ildb_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH(ctx);
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
OUT_BATCH(batch, 0);
OUT_RELOC(batch, avc_ildb_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH(batch);
}
static void
i965_avc_ildb_cs_urb_layout(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
BEGIN_BATCH(ctx, 2);
OUT_BATCH(ctx, CMD_CS_URB_STATE | 0);
OUT_BATCH(ctx,
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
OUT_BATCH(batch,
((avc_ildb_context->urb.size_cs_entry - 1) << 4) | /* URB Entry Allocation Size */
(avc_ildb_context->urb.num_cs_entries << 0)); /* Number of URB Entries */
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_avc_ildb_constant_buffer(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
BEGIN_BATCH(ctx, 2);
OUT_BATCH(ctx, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
OUT_RELOC(ctx, avc_ildb_context->curbe.bo,
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
OUT_RELOC(batch, avc_ildb_context->curbe.bo,
I915_GEM_DOMAIN_INSTRUCTION, 0,
avc_ildb_context->urb.size_cs_entry - 1);
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_avc_ildb_objects(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
BEGIN_BATCH(ctx, 6);
OUT_BATCH(ctx, CMD_MEDIA_OBJECT | 4);
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_MEDIA_OBJECT | 4);
switch (avc_ildb_context->picture_type) {
case PICTURE_FRAME:
OUT_BATCH(ctx, AVC_ILDB_ROOT_Y_ILDB_FRAME);
OUT_BATCH(batch, AVC_ILDB_ROOT_Y_ILDB_FRAME);
break;
case PICTURE_FIELD:
OUT_BATCH(ctx, AVC_ILDB_ROOT_Y_ILDB_FIELD);
OUT_BATCH(batch, AVC_ILDB_ROOT_Y_ILDB_FIELD);
break;
case PICTURE_MBAFF:
OUT_BATCH(ctx, AVC_ILDB_ROOT_Y_ILDB_MBAFF);
OUT_BATCH(batch, AVC_ILDB_ROOT_Y_ILDB_MBAFF);
break;
default:
assert(0);
OUT_BATCH(ctx, 0);
OUT_BATCH(batch, 0);
break;
}
OUT_BATCH(ctx, 0); /* no indirect data */
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, 0);
ADVANCE_BATCH(ctx);
OUT_BATCH(batch, 0); /* no indirect data */
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
ADVANCE_BATCH(batch);
}
static void
i965_avc_ildb_pipeline_setup(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
intel_batchbuffer_emit_mi_flush(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
intel_batchbuffer_emit_mi_flush(batch);
i965_avc_ildb_pipeline_select(ctx);
i965_avc_ildb_state_base_address(ctx);
i965_avc_ildb_state_pointers(ctx, i965_h264_context);
......
......@@ -57,8 +57,6 @@
/* DW1 */
# define CMD_CLEAR_PARAMS_DEPTH_CLEAR_VALID (1 << 15)
#define CMD_PIPE_CONTROL CMD(3, 2, 0)
/* for GEN6+ */
#define GEN6_3DSTATE_SAMPLER_STATE_POINTERS CMD(3, 0, 0x02)
# define GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS (1 << 12)
......
......@@ -1550,13 +1550,15 @@ i965_QuerySurfaceStatus(VADriverContextP ctx,
VASurfaceID render_target,
VASurfaceStatus *status) /* out */
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct object_surface *obj_surface = SURFACE(render_target);
assert(obj_surface);
/* Commit pending operations to the HW */
intel_batchbuffer_flush(ctx);
intel_batchbuffer_flush(batch);
/* Usually GEM will handle synchronization with the graphics hardware */
#if 0
......@@ -2134,6 +2136,8 @@ i965_GetImage(VADriverContextP ctx,
unsigned int height,
VAImageID image)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct i965_render_state *render_state = &i965->render_state;
......@@ -2155,7 +2159,7 @@ i965_GetImage(VADriverContextP ctx,
return VA_STATUS_ERROR_INVALID_PARAMETER;
/* Commit pending operations to the HW */
intel_batchbuffer_flush(ctx);
intel_batchbuffer_flush(batch);
VAStatus va_status;
void *image_data = NULL;
......
......@@ -46,95 +46,109 @@
static void
i965_media_pipeline_select(VADriverContextP ctx)
{
BEGIN_BATCH(ctx, 1);
OUT_BATCH(ctx, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
ADVANCE_BATCH(ctx);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
ADVANCE_BATCH(batch);
}
static void
i965_media_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
unsigned int vfe_fence, cs_fence;
vfe_fence = media_context->urb.cs_start;
cs_fence = URB_SIZE((&i965->intel));
BEGIN_BATCH(ctx, 3);
OUT_BATCH(ctx, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx,
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
OUT_BATCH(batch, 0);
OUT_BATCH(batch,
(vfe_fence << UF2_VFE_FENCE_SHIFT) | /* VFE_SIZE */
(cs_fence << UF2_CS_FENCE_SHIFT)); /* CS_SIZE */
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_media_state_base_address(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
if (IS_IRONLAKE(i965->intel.device_id)) {
BEGIN_BATCH(ctx, 8);
OUT_BATCH(ctx, CMD_STATE_BASE_ADDRESS | 6);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
BEGIN_BATCH(batch, 8);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
if (media_context->indirect_object.bo) {
OUT_RELOC(ctx, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
OUT_RELOC(batch, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
media_context->indirect_object.offset | BASE_ADDRESS_MODIFY);
} else {
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
}
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(ctx);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(batch);
} else {
BEGIN_BATCH(ctx, 6);
OUT_BATCH(ctx, CMD_STATE_BASE_ADDRESS | 4);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 4);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
if (media_context->indirect_object.bo) {
OUT_RELOC(ctx, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
OUT_RELOC(batch, media_context->indirect_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
media_context->indirect_object.offset | BASE_ADDRESS_MODIFY);
} else {
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
}
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(ctx, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(ctx);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
ADVANCE_BATCH(batch);
}
}
static void
i965_media_state_pointers(VADriverContextP ctx, struct i965_media_context *media_context)
{
BEGIN_BATCH(ctx, 3);
OUT_BATCH(ctx, CMD_MEDIA_STATE_POINTERS | 1);
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
if (media_context->extended_state.enabled)
OUT_RELOC(ctx, media_context->extended_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
OUT_RELOC(batch, media_context->extended_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
else
OUT_BATCH(ctx, 0);
OUT_BATCH(batch, 0);
OUT_RELOC(ctx, media_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH(ctx);
OUT_RELOC(batch, media_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
ADVANCE_BATCH(batch);
}
static void
i965_media_cs_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
{
BEGIN_BATCH(ctx, 2);
OUT_BATCH(ctx, CMD_CS_URB_STATE | 0);
OUT_BATCH(ctx,
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
OUT_BATCH(batch,
((media_context->urb.size_cs_entry - 1) << 4) | /* URB Entry Allocation Size */
(media_context->urb.num_cs_entries << 0)); /* Number of URB Entries */
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
......@@ -148,26 +162,32 @@ i965_media_pipeline_state(VADriverContextP ctx, struct i965_media_context *media
static void
i965_media_constant_buffer(VADriverContextP ctx, struct decode_state *decode_state, struct i965_media_context *media_context)
{
BEGIN_BATCH(ctx, 2);
OUT_BATCH(ctx, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
OUT_RELOC(ctx, media_context->curbe.bo,
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
OUT_RELOC(batch, media_context->curbe.bo,
I915_GEM_DOMAIN_INSTRUCTION, 0,
media_context->urb.size_cs_entry - 1);
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
}
static void
i965_media_depth_buffer(VADriverContextP ctx)
{
BEGIN_BATCH(ctx, 6);
OUT_BATCH(ctx, CMD_DEPTH_BUFFER | 4);
OUT_BATCH(ctx, (I965_DEPTHFORMAT_D32_FLOAT << 18) |
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_DEPTH_BUFFER | 4);
OUT_BATCH(batch, (I965_DEPTHFORMAT_D32_FLOAT << 18) |
(I965_SURFACE_NULL << 29));
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, 0);
ADVANCE_BATCH(ctx);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
ADVANCE_BATCH(batch);
}
static void
......@@ -175,8 +195,11 @@ i965_media_pipeline_setup(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_media_context *media_context)
{
intel_batchbuffer_start_atomic(ctx, 0x1000);
intel_batchbuffer_emit_mi_flush(ctx); /* step 1 */
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
intel_batchbuffer_start_atomic(batch, 0x1000);
intel_batchbuffer_emit_mi_flush(batch); /* step 1 */
i965_media_depth_buffer(ctx);
i965_media_pipeline_select(ctx); /* step 2 */
i965_media_urb_layout(ctx, media_context); /* step 3 */
......@@ -184,7 +207,7 @@ i965_media_pipeline_setup(VADriverContextP ctx,
i965_media_constant_buffer(ctx, decode_state, media_context); /* step 5 */
assert(media_context->media_objects);
media_context->media_objects(ctx, decode_state, media_context); /* step 6 */
intel_batchbuffer_end_atomic(ctx);
intel_batchbuffer_end_atomic(batch);
}
static void
......
......@@ -710,6 +710,8 @@ i965_media_h264_objects(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_h264_context *i965_h264_context;
unsigned int *object_command;
......@@ -725,19 +727,19 @@ i965_media_h264_objects(VADriverContextP ctx,
*object_command = MI_BATCH_BUFFER_END;
dri_bo_unmap(i965_h264_context->avc_it_command_mb_info.bo);
BEGIN_BATCH(ctx, 2);
OUT_BATCH(ctx, MI_BATCH_BUFFER_START | (2 << 6));
OUT_RELOC(ctx, i965_h264_context->avc_it_command_mb_info.bo,
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
OUT_RELOC(batch, i965_h264_context->avc_it_command_mb_info.bo,
I915_GEM_DOMAIN_COMMAND, 0,
0);
ADVANCE_BATCH(ctx);
ADVANCE_BATCH(batch);
/* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
* will cause control to pass back to ring buffer
*/
intel_batchbuffer_end_atomic(ctx);
intel_batchbuffer_flush(ctx);
intel_batchbuffer_start_atomic(ctx, 0x1000);
intel_batchbuffer_end_atomic(batch);
intel_batchbuffer_flush(batch);
intel_batchbuffer_start_atomic(batch, 0x1000);
i965_avc_ildb(ctx, decode_state, i965_h264_context);
}
......
......@@ -882,6 +882,8 @@ i965_media_mpeg2_objects(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
int i, j;
VASliceParameterBufferMPEG2 *slice_param;
......@@ -893,20 +895,20 @@ i965_media_mpeg2_objects(VADriverContextP ctx,
for (i = 0; i < decode_state->slice_params[j]->num_elements; i++) {
assert(slice_param->slice_data_flag == VA_SLICE_DATA_FLAG_ALL);
BEGIN_BATCH(ctx, 6);
OUT_BATCH(ctx, CMD_MEDIA_OBJECT | 4);
OUT_BATCH(ctx, 0);
OUT_BATCH(ctx, slice_param->slice_data_size - (slice_param->macroblock_offset >> 3));
OUT_RELOC(ctx, decode_state->slice_datas[j]->bo,
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_MEDIA_OBJECT | 4);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, slice_param->slice_data_size - (slice_param->macroblock_offset >> 3));
OUT_RELOC(batch, decode_state->slice_datas[j]->bo,
I915_GEM_DOMAIN_SAMPLER, 0,
slice_param->slice_data_offset + (slice_param->macroblock_offset >> 3));
OUT_BATCH(ctx,
OUT_BATCH(batch,
((slice_param->slice_horizontal_position << 24) |
(slice_param->slice_vertical_position << 16) |
(127 << 8) |
(slice_param->macroblock_offset & 0x7)));
OUT_BATCH(ctx, slice_param->quantiser_scale_code << 24);
ADVANCE_BATCH(ctx);
OUT_BATCH(batch, slice_param->quantiser_scale_code << 24);
ADVANCE_BATCH(batch);
slice_param++;
}
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -81,8 +81,7 @@ intel_driver_init(VADriverContextP ctx)
intel->has_blt = has_blt;
intel_memman_init(intel);
intel_batchbuffer_init(intel);
intel->batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
return True;
}
......@@ -92,8 +91,7 @@ intel_driver_terminate(VADriverContextP ctx)
struct intel_driver_data *intel = intel_driver_data(ctx);
intel_memman_terminate(intel);
intel_batchbuffer_terminate(intel);
intel_batchbuffer_free(intel->batch);
pthread_mutex_destroy(&intel->ctxmutex);
return True;
......
......@@ -22,6 +22,7 @@
#define CMD_MI (0x0 << 29)
#define CMD_2D (0x2 << 29)
#define CMD_3D (0x3 << 29)
#define MI_NOOP (CMD_MI | 0)
......@@ -43,6 +44,21 @@
#define BR13_565 (0x1 << 24)
#define BR13_8888 (0x3 << 24)
#define CMD_PIPE_CONTROL (CMD_3D | (3 << 27) | (2 << 24) | (0 << 16))
#define CMD_PIPE_CONTROL_NOWRITE (0 << 14)
#define CMD_PIPE_CONTROL_WRITE_QWORD (1 << 14)
#define CMD_PIPE_CONTROL_WRITE_DEPTH (2 << 14)
#define CMD_PIPE_CONTROL_WRITE_TIME (3 << 14)
#define CMD_PIPE_CONTROL_DEPTH_STALL (1 << 13)
#define CMD_PIPE_CONTROL_WC_FLUSH (1 << 12)
#define CMD_PIPE_CONTROL_IS_FLUSH (1 << 11)
#define CMD_PIPE_CONTROL_TC_FLUSH (1 << 10)
#define CMD_PIPE_CONTROL_NOTIFY_ENABLE (1 << 8)
#define CMD_PIPE_CONTROL_GLOBAL_GTT (1 << 2)
#define CMD_PIPE_CONTROL_LOCAL_PGTT (0 << 2)
#define CMD_PIPE_CONTROL_DEPTH_CACHE_FLUSH (1 << 0)
struct intel_batchbuffer;
#define ALIGN(i, n) (((i) + (n) - 1) & ~((n) - 1))
......@@ -90,7 +106,6 @@ struct intel_driver_data
int locked;
struct intel_batchbuffer *batch;
struct intel_batchbuffer *batch_bcs;
dri_bufmgr *bufmgr;
unsigned int has_exec2 : 1; /* Flag: has execbuffer2? */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment