Commit 3452cb67 authored by Xiang, Haihao's avatar Xiang, Haihao

i965_drv_video: move batchbuffer to context

Signed-off-by: default avatarXiang, Haihao <haihao.xiang@intel.com>
parent d94adb34
This diff is collapsed.
This diff is collapsed.
......@@ -434,20 +434,18 @@ static VAStatus gen6_vme_vme_state_setup(VADriverContextP ctx,
return VA_STATUS_SUCCESS;
}
static void gen6_vme_pipeline_select(VADriverContextP ctx)
static void gen6_vme_pipeline_select(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
ADVANCE_BATCH(batch);
}
static void gen6_vme_state_base_address(VADriverContextP ctx)
static void gen6_vme_state_base_address(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
BEGIN_BATCH(batch, 10);
......@@ -474,8 +472,7 @@ static void gen6_vme_state_base_address(VADriverContextP ctx)
static void gen6_vme_vfe_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
BEGIN_BATCH(batch, 8);
......@@ -498,8 +495,7 @@ static void gen6_vme_vfe_state(VADriverContextP ctx, struct gen6_encoder_context
static void gen6_vme_curbe_load(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
BEGIN_BATCH(batch, 4);
......@@ -515,8 +511,7 @@ static void gen6_vme_curbe_load(VADriverContextP ctx, struct gen6_encoder_contex
static void gen6_vme_idrt(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
BEGIN_BATCH(batch, 4);
......@@ -532,11 +527,11 @@ static void gen6_vme_idrt(VADriverContextP ctx, struct gen6_encoder_context *gen
static int gen6_vme_media_object(VADriverContextP ctx,
struct encode_state *encode_state,
int mb_x, int mb_y,
int kernel)
int kernel,
struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
struct object_surface *obj_surface = SURFACE(encode_state->current_render_target);
int mb_width = ALIGN(obj_surface->orig_width, 16) / 16;
int len_in_dowrds = 6 + 1;
......@@ -617,8 +612,7 @@ static void gen6_vme_pipeline_programing(VADriverContextP ctx,
struct encode_state *encode_state,
struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
VAEncSliceParameterBuffer *pSliceParameter = (VAEncSliceParameterBuffer *)encode_state->slice_params[0]->buffer;
VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param->buffer;
int is_intra = pSliceParameter->slice_flags.bits.is_intra;
......@@ -642,10 +636,10 @@ static void gen6_vme_pipeline_programing(VADriverContextP ctx,
ADVANCE_BATCH(batch);
/*Step2: State command PIPELINE_SELECT*/
gen6_vme_pipeline_select(ctx);
gen6_vme_pipeline_select(ctx, gen6_encoder_context);
/*Step3: State commands configuring pipeline states*/
gen6_vme_state_base_address(ctx);
gen6_vme_state_base_address(ctx, gen6_encoder_context);
gen6_vme_vfe_state(ctx, gen6_encoder_context);
gen6_vme_curbe_load(ctx, gen6_encoder_context);
gen6_vme_idrt(ctx, gen6_encoder_context);
......@@ -654,7 +648,7 @@ static void gen6_vme_pipeline_programing(VADriverContextP ctx,
}
/*Step4: Primitive commands*/
object_len_in_bytes = gen6_vme_media_object(ctx, encode_state, x, y, is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER);
object_len_in_bytes = gen6_vme_media_object(ctx, encode_state, x, y, is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER, gen6_encoder_context);
if (intel_batchbuffer_check_free_space(batch, object_len_in_bytes) == 0) {
assert(0);
......@@ -693,8 +687,7 @@ static VAStatus gen6_vme_run(VADriverContextP ctx,
struct encode_state *encode_state,
struct gen6_encoder_context *gen6_encoder_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
intel_batchbuffer_flush(batch);
......
......@@ -97,10 +97,13 @@ i965_avc_bsd_init_avc_bsd_surface(VADriverContextP ctx,
}
static void
i965_bsd_ind_obj_base_address(VADriverContextP ctx, struct decode_state *decode_state, int slice)
i965_bsd_ind_obj_base_address(VADriverContextP ctx,
struct decode_state *decode_state,
int slice,
struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
dri_bo *ind_bo = decode_state->slice_datas[slice]->bo;
......@@ -118,8 +121,7 @@ i965_avc_bsd_img_state(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
int qm_present_flag;
int img_struct;
int mbaff_frame_flag;
......@@ -209,10 +211,11 @@ i965_avc_bsd_img_state(VADriverContextP ctx,
}
static void
i965_avc_bsd_qm_state(VADriverContextP ctx, struct decode_state *decode_state)
i965_avc_bsd_qm_state(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
int cmd_len;
VAIQMatrixBufferH264 *iq_matrix;
VAPictureParameterBufferH264 *pic_param;
......@@ -256,8 +259,7 @@ i965_avc_bsd_slice_state(VADriverContextP ctx,
VASliceParameterBufferH264 *slice_param,
struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
int present_flag, cmd_len, list, j;
struct {
unsigned char bottom_idc:1;
......@@ -428,9 +430,8 @@ i965_avc_bsd_buf_base_state(VADriverContextP ctx,
VASliceParameterBufferH264 *slice_param,
struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965_h264_context->batch;
struct i965_avc_bsd_context *i965_avc_bsd_context;
int i, j;
VAPictureH264 *va_pic;
......@@ -608,8 +609,7 @@ g4x_avc_bsd_object(VADriverContextP ctx,
int slice_index,
struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
int width_in_mbs = pic_param->picture_width_in_mbs_minus1 + 1;
int height_in_mbs = pic_param->picture_height_in_mbs_minus1 + 1; /* frame height */
......@@ -738,8 +738,7 @@ ironlake_avc_bsd_object(VADriverContextP ctx,
int slice_index,
struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
int width_in_mbs = pic_param->picture_width_in_mbs_minus1 + 1;
int height_in_mbs = pic_param->picture_height_in_mbs_minus1 + 1; /* frame height */
......@@ -1025,9 +1024,8 @@ i965_avc_bsd_frame_store_index(VADriverContextP ctx,
void
i965_avc_bsd_pipeline(VADriverContextP ctx, struct decode_state *decode_state, void *h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_h264_context *i965_h264_context = (struct i965_h264_context *)h264_context;
struct intel_batchbuffer *batch = i965_h264_context->batch;
VAPictureParameterBufferH264 *pic_param;
VASliceParameterBufferH264 *slice_param;
int i, j;
......@@ -1064,13 +1062,13 @@ i965_avc_bsd_pipeline(VADriverContextP ctx, struct decode_state *decode_state, v
intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
i965_avc_bsd_img_state(ctx, decode_state, i965_h264_context);
i965_avc_bsd_qm_state(ctx, decode_state);
i965_avc_bsd_qm_state(ctx, decode_state, i965_h264_context);
for (j = 0; j < decode_state->num_slice_params; j++) {
assert(decode_state->slice_params && decode_state->slice_params[j]->buffer);
slice_param = (VASliceParameterBufferH264 *)decode_state->slice_params[j]->buffer;
i965_bsd_ind_obj_base_address(ctx, decode_state, j);
i965_bsd_ind_obj_base_address(ctx, decode_state, j, i965_h264_context);
assert(decode_state->slice_params[j]->num_elements == 1); /* FIXME */
for (i = 0; i < decode_state->slice_params[j]->num_elements; i++) {
......
This diff is collapsed.
......@@ -396,10 +396,9 @@ i965_avc_ildb_states_setup(VADriverContextP ctx,
}
static void
i965_avc_ildb_pipeline_select(VADriverContextP ctx)
i965_avc_ildb_pipeline_select(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
......@@ -409,11 +408,9 @@ i965_avc_ildb_pipeline_select(VADriverContextP ctx)
static void
i965_avc_ildb_urb_layout(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
struct intel_batchbuffer *batch = i965_h264_context->batch;
unsigned int vfe_fence, cs_fence;
vfe_fence = avc_ildb_context->urb.cs_start;
......@@ -429,11 +426,10 @@ i965_avc_ildb_urb_layout(VADriverContextP ctx, struct i965_h264_context *i965_h2
}
static void
i965_avc_ildb_state_base_address(VADriverContextP ctx)
i965_avc_ildb_state_base_address(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965_h264_context->batch;
if (IS_IRONLAKE(i965->intel.device_id)) {
BEGIN_BATCH(batch, 8);
......@@ -461,9 +457,8 @@ i965_avc_ildb_state_base_address(VADriverContextP ctx)
static void
i965_avc_ildb_state_pointers(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
struct intel_batchbuffer *batch = i965_h264_context->batch;
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
......@@ -475,9 +470,8 @@ i965_avc_ildb_state_pointers(VADriverContextP ctx, struct i965_h264_context *i96
static void
i965_avc_ildb_cs_urb_layout(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
struct intel_batchbuffer *batch = i965_h264_context->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
......@@ -490,9 +484,8 @@ i965_avc_ildb_cs_urb_layout(VADriverContextP ctx, struct i965_h264_context *i965
static void
i965_avc_ildb_constant_buffer(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
struct intel_batchbuffer *batch = i965_h264_context->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
......@@ -505,9 +498,8 @@ i965_avc_ildb_constant_buffer(VADriverContextP ctx, struct i965_h264_context *i9
static void
i965_avc_ildb_objects(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_avc_ildb_context *avc_ildb_context = &i965_h264_context->avc_ildb_context;
struct intel_batchbuffer *batch = i965_h264_context->batch;
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_MEDIA_OBJECT | 4);
......@@ -541,12 +533,11 @@ i965_avc_ildb_objects(VADriverContextP ctx, struct i965_h264_context *i965_h264_
static void
i965_avc_ildb_pipeline_setup(VADriverContextP ctx, struct i965_h264_context *i965_h264_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = i965_h264_context->batch;
intel_batchbuffer_emit_mi_flush(batch);
i965_avc_ildb_pipeline_select(ctx);
i965_avc_ildb_state_base_address(ctx);
i965_avc_ildb_pipeline_select(ctx, i965_h264_context);
i965_avc_ildb_state_base_address(ctx, i965_h264_context);
i965_avc_ildb_state_pointers(ctx, i965_h264_context);
i965_avc_ildb_urb_layout(ctx, i965_h264_context);
i965_avc_ildb_cs_urb_layout(ctx, i965_h264_context);
......
......@@ -1550,16 +1550,11 @@ i965_QuerySurfaceStatus(VADriverContextP ctx,
VASurfaceID render_target,
VASurfaceStatus *status) /* out */
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct object_surface *obj_surface = SURFACE(render_target);
assert(obj_surface);
/* Commit pending operations to the HW */
intel_batchbuffer_flush(batch);
/* Usually GEM will handle synchronization with the graphics hardware */
#if 0
if (obj_surface->bo) {
......@@ -1654,6 +1649,8 @@ i965_Init(VADriverContextP ctx)
if (i965_render_init(ctx) == False)
return VA_STATUS_ERROR_UNKNOWN;
i965->batch = intel_batchbuffer_new(&i965->intel, I915_EXEC_RENDER);
return VA_STATUS_SUCCESS;
}
......@@ -2136,8 +2133,6 @@ i965_GetImage(VADriverContextP ctx,
unsigned int height,
VAImageID image)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct i965_render_state *render_state = &i965->render_state;
......@@ -2158,9 +2153,6 @@ i965_GetImage(VADriverContextP ctx,
y + height > obj_image->image.height)
return VA_STATUS_ERROR_INVALID_PARAMETER;
/* Commit pending operations to the HW */
intel_batchbuffer_flush(batch);
VAStatus va_status;
void *image_data = NULL;
......@@ -2311,6 +2303,9 @@ i965_Terminate(VADriverContextP ctx)
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
if (i965->batch)
intel_batchbuffer_free(i965->batch);
if (i965_render_terminate(ctx) == False)
return VA_STATUS_ERROR_UNKNOWN;
......
......@@ -116,6 +116,7 @@ struct hw_context
union codec_state *codec_state,
struct hw_context *hw_context);
void (*destroy)(void *);
struct intel_batchbuffer *batch;
};
struct object_context
......@@ -214,8 +215,10 @@ struct i965_driver_data
struct object_heap buffer_heap;
struct object_heap image_heap;
struct object_heap subpic_heap;
struct i965_render_state render_state;
struct hw_codec_info *codec_info;
struct intel_batchbuffer *batch;
struct i965_render_state render_state;
void *pp_context;
};
......
......@@ -62,16 +62,20 @@ gen6_encoder_context_destroy(void *hw_context)
gen6_mfc_context_destroy(&gen6_encoder_context->mfc_context);
gen6_vme_context_destroy(&gen6_encoder_context->vme_context);
intel_batchbuffer_free(gen6_encoder_context->base.batch);
free(gen6_encoder_context);
}
struct hw_context *
gen6_enc_hw_context_init(VADriverContextP ctx, VAProfile profile)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct gen6_encoder_context *gen6_encoder_context = calloc(1, sizeof(struct gen6_encoder_context));
gen6_encoder_context->base.destroy = gen6_encoder_context_destroy;
gen6_encoder_context->base.run = gen6_encoder_end_picture;
gen6_encoder_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
gen6_vme_context_init(ctx, &gen6_encoder_context->vme_context);
gen6_mfc_context_init(ctx, &gen6_encoder_context->mfc_context);
......
......@@ -44,10 +44,9 @@
#include "i965_media_h264.h"
static void
i965_media_pipeline_select(VADriverContextP ctx)
i965_media_pipeline_select(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = media_context->base.batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
......@@ -57,10 +56,8 @@ i965_media_pipeline_select(VADriverContextP ctx)
static void
i965_media_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = media_context->base.batch;
unsigned int vfe_fence, cs_fence;
vfe_fence = media_context->urb.cs_start;
......@@ -78,9 +75,8 @@ i965_media_urb_layout(VADriverContextP ctx, struct i965_media_context *media_con
static void
i965_media_state_base_address(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = media_context->base.batch;
if (IS_IRONLAKE(i965->intel.device_id)) {
BEGIN_BATCH(batch, 8);
......@@ -122,8 +118,7 @@ i965_media_state_base_address(VADriverContextP ctx, struct i965_media_context *m
static void
i965_media_state_pointers(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = media_context->base.batch;
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
......@@ -140,8 +135,7 @@ i965_media_state_pointers(VADriverContextP ctx, struct i965_media_context *media
static void
i965_media_cs_urb_layout(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = media_context->base.batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
......@@ -162,8 +156,7 @@ i965_media_pipeline_state(VADriverContextP ctx, struct i965_media_context *media
static void
i965_media_constant_buffer(VADriverContextP ctx, struct decode_state *decode_state, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = media_context->base.batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
......@@ -174,10 +167,9 @@ i965_media_constant_buffer(VADriverContextP ctx, struct decode_state *decode_sta
}
static void
i965_media_depth_buffer(VADriverContextP ctx)
i965_media_depth_buffer(VADriverContextP ctx, struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = media_context->base.batch;
BEGIN_BATCH(batch, 6);
OUT_BATCH(batch, CMD_DEPTH_BUFFER | 4);
......@@ -195,13 +187,12 @@ i965_media_pipeline_setup(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = media_context->base.batch;
intel_batchbuffer_start_atomic(batch, 0x1000);
intel_batchbuffer_emit_mi_flush(batch); /* step 1 */
i965_media_depth_buffer(ctx);
i965_media_pipeline_select(ctx); /* step 2 */
intel_batchbuffer_emit_mi_flush(batch); /* step 1 */
i965_media_depth_buffer(ctx, media_context);
i965_media_pipeline_select(ctx, media_context); /* step 2 */
i965_media_urb_layout(ctx, media_context); /* step 3 */
i965_media_pipeline_state(ctx, media_context); /* step 4 */
i965_media_constant_buffer(ctx, decode_state, media_context); /* step 5 */
......@@ -292,6 +283,7 @@ i965_media_decode_picture(VADriverContextP ctx,
assert(media_context->media_states_setup);
media_context->media_states_setup(ctx, decode_state, media_context);
i965_media_pipeline_setup(ctx, decode_state, media_context);
intel_batchbuffer_flush(hw_context->batch);
}
static void
......@@ -326,16 +318,19 @@ i965_media_context_destroy(void *hw_context)
dri_bo_unreference(media_context->indirect_object.bo);
media_context->indirect_object.bo = NULL;
intel_batchbuffer_free(media_context->base.batch);
free(media_context);
}
struct hw_context *
g4x_dec_hw_context_init(VADriverContextP ctx, VAProfile profile)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct i965_media_context *media_context = calloc(1, sizeof(struct i965_media_context));
media_context->base.destroy = i965_media_context_destroy;
media_context->base.run = i965_media_decode_picture;
media_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
switch (profile) {
case VAProfileMPEG2Simple:
......@@ -360,10 +355,12 @@ g4x_dec_hw_context_init(VADriverContextP ctx, VAProfile profile)
struct hw_context *
ironlake_dec_hw_context_init(VADriverContextP ctx, VAProfile profile)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct i965_media_context *media_context = calloc(1, sizeof(struct i965_media_context));
media_context->base.destroy = i965_media_context_destroy;
media_context->base.run = i965_media_decode_picture;
media_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
switch (profile) {
case VAProfileMPEG2Simple:
......
......@@ -710,8 +710,7 @@ i965_media_h264_objects(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct intel_batchbuffer *batch = media_context->base.batch;
struct i965_h264_context *i965_h264_context;
unsigned int *object_command;
......@@ -883,6 +882,8 @@ i965_media_h264_dec_context_init(VADriverContextP ctx, struct i965_media_context
i965_h264_context->fsid_list[i].frame_store_id = -1;
}
i965_h264_context->batch = media_context->base.batch;
media_context->private_context = i965_h264_context;
media_context->free_private_context = i965_media_h264_free_private_context;
......
......@@ -66,6 +66,7 @@ struct i965_h264_context
} fsid_list[16];
struct i965_kernel avc_kernels[NUM_H264_AVC_KERNELS];
struct intel_batchbuffer *batch;
};
void i965_media_h264_decode_init(VADriverContextP ctx, struct decode_state *decode_state, struct i965_media_context *media_context);
......
......@@ -882,10 +882,9 @@ i965_media_mpeg2_objects(VADriverContextP ctx,
struct decode_state *decode_state,
struct i965_media_context *media_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
int i, j;
struct intel_batchbuffer *batch = media_context->base.batch;
VASliceParameterBufferMPEG2 *slice_param;
int i, j;
for (j = 0; j < decode_state->num_slice_params; j++) {
assert(decode_state->slice_params[j] && decode_state->slice_params[j]->buffer);
......
......@@ -399,8 +399,8 @@ ironlake_pp_states_setup(VADriverContextP ctx)
static void
ironlake_pp_pipeline_select(VADriverContextP ctx)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
......@@ -410,8 +410,8 @@ ironlake_pp_pipeline_select(VADriverContextP ctx)
static void
ironlake_pp_urb_layout(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
unsigned int vfe_fence, cs_fence;
vfe_fence = pp_context->urb.cs_start;
......@@ -429,8 +429,8 @@ ironlake_pp_urb_layout(VADriverContextP ctx, struct i965_post_processing_context
static void
ironlake_pp_state_base_address(VADriverContextP ctx)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 8);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
......@@ -447,8 +447,8 @@ ironlake_pp_state_base_address(VADriverContextP ctx)
static void
ironlake_pp_state_pointers(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 3);
OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
......@@ -460,8 +460,8 @@ ironlake_pp_state_pointers(VADriverContextP ctx, struct i965_post_processing_con
static void
ironlake_pp_cs_urb_layout(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
......@@ -474,8 +474,8 @@ ironlake_pp_cs_urb_layout(VADriverContextP ctx, struct i965_post_processing_cont
static void
ironlake_pp_constant_buffer(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 2);
OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
......@@ -488,8 +488,8 @@ ironlake_pp_constant_buffer(VADriverContextP ctx, struct i965_post_processing_co
static void
ironlake_pp_object_walker(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
int x, x_steps, y, y_steps;
x_steps = pp_context->pp_x_steps(&pp_context->private_context);
......@@ -517,9 +517,8 @@ ironlake_pp_object_walker(VADriverContextP ctx, struct i965_post_processing_cont
static void
ironlake_pp_pipeline_setup(VADriverContextP ctx)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
struct i965_post_processing_context *pp_context = i965->pp_context;
intel_batchbuffer_start_atomic(batch, 0x1000);
......@@ -2068,8 +2067,8 @@ gen6_pp_states_setup(VADriverContextP ctx)
static void
gen6_pp_pipeline_select(VADriverContextP ctx)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 1);
OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
......@@ -2079,8 +2078,8 @@ gen6_pp_pipeline_select(VADriverContextP ctx)
static void
gen6_pp_state_base_address(VADriverContextP ctx)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 10);
OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
......@@ -2099,8 +2098,8 @@ gen6_pp_state_base_address(VADriverContextP ctx)
static void
gen6_pp_vfe_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 8);
OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (8 - 2));
......@@ -2121,8 +2120,8 @@ gen6_pp_vfe_state(VADriverContextP ctx, struct i965_post_processing_context *pp_
static void
gen6_pp_curbe_load(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
assert(pp_context->urb.size_cs_entry * pp_context->urb.num_cs_entries * 512 <= pp_context->curbe.bo->size);
......@@ -2141,8 +2140,8 @@ gen6_pp_curbe_load(VADriverContextP ctx, struct i965_post_processing_context *pp
static void
gen6_interface_descriptor_load(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
BEGIN_BATCH(batch, 4);
OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
......@@ -2159,8 +2158,8 @@ gen6_interface_descriptor_load(VADriverContextP ctx, struct i965_post_processing
static void
gen6_pp_object_walker(VADriverContextP ctx, struct i965_post_processing_context *pp_context)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
int x, x_steps, y, y_steps;
x_steps = pp_context->pp_x_steps(&pp_context->private_context);
......@@ -2190,9 +2189,8 @@ gen6_pp_object_walker(VADriverContextP ctx, struct i965_post_processing_context
static void
gen6_pp_pipeline_setup(VADriverContextP ctx)
{
struct intel_driver_data *intel = intel_driver_data(ctx);
struct intel_batchbuffer *batch = intel->batch;
struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch = i965->batch;
struct i965_post_processing_context *pp_context = i965->pp_context;
intel_batchbuffer_start_atomic(batch, 0x1000);
......
This diff is collapsed.
......@@ -81,7 +81,6 @@ intel_driver_init(VADriverContextP ctx)
intel->has_blt = has_blt;
intel_memman_init(intel);
intel->batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
return True;
}
......@@ -91,7 +90,6 @@ intel_driver_terminate(VADriverContextP ctx)
struct intel_driver_data *intel = intel_driver_data(ctx);
intel_memman_terminate(intel);
intel_batchbuffer_free(intel->batch);
pthread_mutex_destroy(&intel->ctxmutex);
return True;
......
......@@ -105,7 +105,6 @@ struct intel_driver_data
pthread_mutex_t ctxmutex;
int locked;
struct intel_batchbuffer *batch;
dri_bufmgr *bufmgr;
unsigned int has_exec2 : 1; /* Flag: has execbuffer2? */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment