i965: Add hardware context support.
With fixes and updates from Ben Widawsky and comments from Paul Berry. v2: Use drm_intel_gem_context_destroy to destroy hardware context; remove useless initialization of hw_ctx, both suggested by Eric. Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Acked-by: Paul Berry <stereotype441@gmail.com>
This commit is contained in:
@@ -33,7 +33,7 @@ USER_CXXFLAGS="$CXXFLAGS"
|
||||
dnl Versions for external dependencies
|
||||
LIBDRM_REQUIRED=2.4.24
|
||||
LIBDRM_RADEON_REQUIRED=2.4.31
|
||||
LIBDRM_INTEL_REQUIRED=2.4.34
|
||||
LIBDRM_INTEL_REQUIRED=2.4.37
|
||||
LIBDRM_NVVIEUX_REQUIRED=2.4.33
|
||||
LIBDRM_NOUVEAU_REQUIRED=2.4.33
|
||||
DRI2PROTO_REQUIRED=2.6
|
||||
|
@@ -302,6 +302,7 @@ brwCreateContext(int api,
|
||||
|
||||
brw->prim_restart.in_progress = false;
|
||||
brw->prim_restart.enable_cut_index = false;
|
||||
intel->hw_ctx = drm_intel_gem_context_create(intel->bufmgr);
|
||||
|
||||
brw_init_state( brw );
|
||||
|
||||
|
@@ -80,6 +80,8 @@ static void brw_destroy_context( struct intel_context *intel )
|
||||
|
||||
free(brw->curbe.last_buf);
|
||||
free(brw->curbe.next_buf);
|
||||
|
||||
drm_intel_gem_context_destroy(intel->hw_ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -166,11 +168,16 @@ static void brw_new_batch( struct intel_context *intel )
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
|
||||
/* Mark all context state as needing to be re-emitted.
|
||||
* This is probably not as severe as on 915, since almost all of our state
|
||||
* is just in referenced buffers.
|
||||
/* If the kernel supports hardware contexts, then most hardware state is
|
||||
* preserved between batches; we only need to re-emit state that is required
|
||||
* to be in every batch. Otherwise we need to re-emit all the state that
|
||||
* would otherwise be stored in the context (which for all intents and
|
||||
* purposes means everything).
|
||||
*/
|
||||
brw->state.dirty.brw |= BRW_NEW_CONTEXT | BRW_NEW_BATCH;
|
||||
if (intel->hw_ctx == NULL)
|
||||
brw->state.dirty.brw |= BRW_NEW_CONTEXT;
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_BATCH;
|
||||
|
||||
/* Assume that the last command before the start of our batch was a
|
||||
* primitive, for safety.
|
||||
|
@@ -188,8 +188,13 @@ do_flush_locked(struct intel_context *intel)
|
||||
if (ret == 0) {
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
|
||||
intel->vtbl.annotate_aub(intel);
|
||||
ret = drm_intel_bo_mrb_exec(batch->bo, 4*batch->used, NULL, 0, 0,
|
||||
flags);
|
||||
if (intel->hw_ctx == NULL || batch->is_blit) {
|
||||
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
|
||||
flags);
|
||||
} else {
|
||||
ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
|
||||
4 * batch->used, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -225,6 +225,8 @@ struct intel_context
|
||||
|
||||
int urb_size;
|
||||
|
||||
drm_intel_context *hw_ctx;
|
||||
|
||||
struct intel_batchbuffer batch;
|
||||
|
||||
drm_intel_bo *first_post_swapbuffers_batch;
|
||||
|
Reference in New Issue
Block a user