i965: Move intel_context::bufmgr to brw_context.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Acked-by: Chris Forbes <chrisf@ijw.co.nz> Acked-by: Paul Berry <stereotype441@gmail.com> Acked-by: Anuj Phogat <anuj.phogat@gmail.com>
This commit is contained in:
@@ -321,7 +321,7 @@ brwCreateContext(int api,
|
||||
* This is required for transform feedback buffer offsets, query objects,
|
||||
* and also allows us to reduce how much state we have to emit.
|
||||
*/
|
||||
intel->hw_ctx = drm_intel_gem_context_create(intel->bufmgr);
|
||||
intel->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
|
||||
|
||||
if (!intel->hw_ctx) {
|
||||
fprintf(stderr, "Gen6+ requires Kernel 3.6 or later.\n");
|
||||
|
@@ -802,6 +802,8 @@ struct brw_context
|
||||
|
||||
} vtbl;
|
||||
|
||||
dri_bufmgr *bufmgr;
|
||||
|
||||
/**
|
||||
* Set if rendering has occured to the drawable's front buffer.
|
||||
*
|
||||
|
@@ -278,7 +278,7 @@ brw_upload_constant_buffer(struct brw_context *brw)
|
||||
/* Allocate a single page for CURBE entries for this batchbuffer.
|
||||
* They're generally around 64b.
|
||||
*/
|
||||
brw->curbe.curbe_bo = drm_intel_bo_alloc(brw->intel.bufmgr, "CURBE",
|
||||
brw->curbe.curbe_bo = drm_intel_bo_alloc(brw->bufmgr, "CURBE",
|
||||
4096, 1 << 6);
|
||||
brw->curbe.curbe_next_offset = 0;
|
||||
drm_intel_gem_bo_map_gtt(brw->curbe.curbe_bo);
|
||||
|
@@ -208,7 +208,6 @@ void
|
||||
brw_get_scratch_bo(struct brw_context *brw,
|
||||
drm_intel_bo **scratch_bo, int size)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
drm_intel_bo *old_bo = *scratch_bo;
|
||||
|
||||
if (old_bo && old_bo->size < size) {
|
||||
@@ -217,7 +216,7 @@ brw_get_scratch_bo(struct brw_context *brw,
|
||||
}
|
||||
|
||||
if (!old_bo) {
|
||||
*scratch_bo = drm_intel_bo_alloc(intel->bufmgr, "scratch bo", size, 4096);
|
||||
*scratch_bo = drm_intel_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,10 +238,8 @@ void brwInitFragProgFuncs( struct dd_function_table *functions )
|
||||
void
|
||||
brw_init_shader_time(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
const int max_entries = 4096;
|
||||
brw->shader_time.bo = drm_intel_bo_alloc(intel->bufmgr, "shader time",
|
||||
brw->shader_time.bo = drm_intel_bo_alloc(brw->bufmgr, "shader time",
|
||||
max_entries * SHADER_TIME_STRIDE,
|
||||
4096);
|
||||
brw->shader_time.shader_programs = rzalloc_array(brw, struct gl_shader_program *,
|
||||
|
@@ -272,7 +272,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
||||
* the system was doing other work, such as running other applications.
|
||||
*/
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query", 4096, 4096);
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
|
||||
write_timestamp(brw, query->bo, 0);
|
||||
break;
|
||||
|
||||
@@ -420,6 +420,7 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
||||
static void
|
||||
ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
assert(intel->gen < 6);
|
||||
@@ -434,7 +435,7 @@ ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
|
||||
brw_queryobj_get_results(ctx, query);
|
||||
}
|
||||
|
||||
query->bo = drm_intel_bo_alloc(intel->bufmgr, "query", 4096, 1);
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "query", 4096, 1);
|
||||
query->last_index = 0;
|
||||
}
|
||||
}
|
||||
@@ -517,13 +518,12 @@ static void
|
||||
brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_query_object *query = (struct brw_query_object *) q;
|
||||
|
||||
assert(q->Target == GL_TIMESTAMP);
|
||||
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(intel->bufmgr, "timestamp query", 4096, 4096);
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
|
||||
write_timestamp(brw, query->bo, 0);
|
||||
}
|
||||
|
||||
@@ -535,10 +535,10 @@ brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
|
||||
static uint64_t
|
||||
brw_get_timestamp(struct gl_context *ctx)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
uint64_t result = 0;
|
||||
|
||||
drm_intel_reg_read(intel->bufmgr, TIMESTAMP, &result);
|
||||
drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &result);
|
||||
|
||||
/* See logic in brw_queryobj_get_results() */
|
||||
result = result >> 32;
|
||||
|
@@ -168,10 +168,9 @@ static void
|
||||
brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
|
||||
{
|
||||
struct brw_context *brw = cache->brw;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
drm_intel_bo *new_bo;
|
||||
|
||||
new_bo = drm_intel_bo_alloc(intel->bufmgr, "program cache", new_size, 64);
|
||||
new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
|
||||
|
||||
/* Copy any existing data that needs to be saved. */
|
||||
if (cache->next_offset != 0) {
|
||||
@@ -328,7 +327,6 @@ brw_upload_cache(struct brw_cache *cache,
|
||||
void
|
||||
brw_init_caches(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct brw_cache *cache = &brw->cache;
|
||||
|
||||
cache->brw = brw;
|
||||
@@ -338,7 +336,7 @@ brw_init_caches(struct brw_context *brw)
|
||||
cache->items =
|
||||
calloc(1, cache->size * sizeof(struct brw_cache_item *));
|
||||
|
||||
cache->bo = drm_intel_bo_alloc(intel->bufmgr,
|
||||
cache->bo = drm_intel_bo_alloc(brw->bufmgr,
|
||||
"program cache",
|
||||
4096, 64);
|
||||
|
||||
|
@@ -44,7 +44,6 @@
|
||||
static void
|
||||
brw_upload_vs_pull_constants(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
/* BRW_NEW_VERTEX_PROGRAM */
|
||||
struct brw_vertex_program *vp =
|
||||
(struct brw_vertex_program *) brw->vertex_program;
|
||||
@@ -69,7 +68,7 @@ brw_upload_vs_pull_constants(struct brw_context *brw)
|
||||
/* _NEW_PROGRAM_CONSTANTS */
|
||||
drm_intel_bo_unreference(brw->vs.const_bo);
|
||||
uint32_t size = brw->vs.prog_data->base.nr_pull_params * 4;
|
||||
brw->vs.const_bo = drm_intel_bo_alloc(intel->bufmgr, "vp_const_buffer",
|
||||
brw->vs.const_bo = drm_intel_bo_alloc(brw->bufmgr, "vp_const_buffer",
|
||||
size, 64);
|
||||
|
||||
drm_intel_gem_bo_map_gtt(brw->vs.const_bo);
|
||||
|
@@ -479,7 +479,7 @@ brw_upload_wm_pull_constants(struct brw_context *brw)
|
||||
}
|
||||
|
||||
drm_intel_bo_unreference(brw->wm.const_bo);
|
||||
brw->wm.const_bo = drm_intel_bo_alloc(intel->bufmgr, "WM const bo",
|
||||
brw->wm.const_bo = drm_intel_bo_alloc(brw->bufmgr, "WM const bo",
|
||||
size, 64);
|
||||
|
||||
/* _NEW_PROGRAM_CONSTANTS */
|
||||
|
@@ -249,12 +249,11 @@ static void
|
||||
gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_query_object *query = (struct brw_query_object *)q;
|
||||
|
||||
/* Since we're starting a new query, we need to throw away old results. */
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(intel->bufmgr, "query results", 4096, 4096);
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
|
||||
|
||||
switch (query->Base.Target) {
|
||||
case GL_TIME_ELAPSED:
|
||||
|
@@ -67,7 +67,7 @@ intel_batchbuffer_init(struct brw_context *brw)
|
||||
* the gen6 workaround because it involves actually writing to
|
||||
* the buffer, and the kernel doesn't let us write to the batch.
|
||||
*/
|
||||
intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
|
||||
intel->batch.workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
|
||||
"pipe_control workaround",
|
||||
4096, 4096);
|
||||
}
|
||||
@@ -90,7 +90,7 @@ intel_batchbuffer_reset(struct brw_context *brw)
|
||||
|
||||
clear_cache(brw);
|
||||
|
||||
intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
|
||||
intel->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
|
||||
BATCH_SZ, 4096);
|
||||
if (intel->has_llc) {
|
||||
drm_intel_bo_map(intel->batch.bo, true);
|
||||
|
@@ -49,8 +49,7 @@ static void
|
||||
intel_bufferobj_alloc_buffer(struct brw_context *brw,
|
||||
struct intel_buffer_object *intel_obj)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
|
||||
intel_obj->buffer = drm_intel_bo_alloc(brw->bufmgr, "bufferobj",
|
||||
intel_obj->Base.Size, 64);
|
||||
|
||||
/* the buffer might be bound as a uniform buffer, need to update it
|
||||
@@ -187,7 +186,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
|
||||
"glBufferSubData() to a busy buffer object.\n",
|
||||
(long)size);
|
||||
drm_intel_bo *temp_bo =
|
||||
drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
|
||||
drm_intel_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
|
||||
|
||||
drm_intel_bo_subdata(temp_bo, 0, size, data);
|
||||
|
||||
@@ -301,7 +300,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
intel_obj->range_map_buffer = malloc(length);
|
||||
obj->Pointer = intel_obj->range_map_buffer;
|
||||
} else {
|
||||
intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
|
||||
intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
|
||||
"range map",
|
||||
length, 64);
|
||||
if (!(access & GL_MAP_READ_BIT)) {
|
||||
@@ -338,7 +337,6 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
struct gl_buffer_object *obj)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
drm_intel_bo *temp_bo;
|
||||
|
||||
@@ -351,7 +349,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
if (length == 0)
|
||||
return;
|
||||
|
||||
temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
|
||||
temp_bo = drm_intel_bo_alloc(brw->bufmgr, "range map flush", length, 64);
|
||||
|
||||
drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
|
||||
|
||||
@@ -451,7 +449,7 @@ static void wrap_buffers(struct brw_context *brw, GLuint size)
|
||||
if (size < INTEL_UPLOAD_SIZE)
|
||||
size = INTEL_UPLOAD_SIZE;
|
||||
|
||||
intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
|
||||
intel->upload.bo = drm_intel_bo_alloc(brw->bufmgr, "upload", size, 0);
|
||||
intel->upload.offset = 0;
|
||||
}
|
||||
|
||||
|
@@ -527,14 +527,14 @@ intelInitContext(struct brw_context *brw,
|
||||
*/
|
||||
intel->max_gtt_map_object_size = gtt_size / 4;
|
||||
|
||||
intel->bufmgr = intelScreen->bufmgr;
|
||||
brw->bufmgr = intelScreen->bufmgr;
|
||||
|
||||
bo_reuse_mode = driQueryOptioni(&brw->optionCache, "bo_reuse");
|
||||
switch (bo_reuse_mode) {
|
||||
case DRI_CONF_BO_REUSE_DISABLED:
|
||||
break;
|
||||
case DRI_CONF_BO_REUSE_ALL:
|
||||
intel_bufmgr_gem_enable_reuse(intel->bufmgr);
|
||||
intel_bufmgr_gem_enable_reuse(brw->bufmgr);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -564,7 +564,7 @@ intelInitContext(struct brw_context *brw,
|
||||
|
||||
INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
|
||||
if (INTEL_DEBUG & DEBUG_BUFMGR)
|
||||
dri_bufmgr_set_debug(intel->bufmgr, true);
|
||||
dri_bufmgr_set_debug(brw->bufmgr, true);
|
||||
if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && intel->gen < 7) {
|
||||
fprintf(stderr,
|
||||
"shader_time debugging requires gen7 (Ivybridge) or better.\n");
|
||||
@@ -574,7 +574,7 @@ intelInitContext(struct brw_context *brw,
|
||||
intel->perf_debug = true;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_AUB)
|
||||
drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
|
||||
drm_intel_bufmgr_gem_set_aub_dump(brw->bufmgr, true);
|
||||
|
||||
intel_batchbuffer_init(brw);
|
||||
|
||||
|
@@ -115,8 +115,6 @@ struct intel_context
|
||||
|
||||
GLuint NewGLState;
|
||||
|
||||
dri_bufmgr *bufmgr;
|
||||
|
||||
/**
|
||||
* Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
|
||||
*/
|
||||
|
@@ -148,7 +148,7 @@ intelInitExtensions(struct gl_context *ctx)
|
||||
ctx->Extensions.ARB_texture_multisample = true;
|
||||
|
||||
/* Test if the kernel has the ioctl. */
|
||||
if (drm_intel_reg_read(intel->bufmgr, TIMESTAMP, &dummy) == 0)
|
||||
if (drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &dummy) == 0)
|
||||
ctx->Extensions.ARB_timer_query = true;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user