i965/bufmgr: s/BO_ALLOC_FOR_RENDER/BO_ALLOC_BUSY/

"Alloc for render" is a terrible name for a flag because it means
basically nothing.  What the flag really does is allocate a busy BO
which someone theorized at one point in time would be more efficient if
you're planning to immediately render to it.  If the flag really means
"alloc a busy BO" we should just call it that.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Jason Ekstrand
2017-08-19 15:03:39 -07:00
parent cadcd89278
commit c366943ebf
4 changed files with 18 additions and 19 deletions

View File

@@ -256,20 +256,19 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
struct bo_cache_bucket *bucket; struct bo_cache_bucket *bucket;
bool alloc_from_cache; bool alloc_from_cache;
uint64_t bo_size; uint64_t bo_size;
bool for_render = false; bool busy = false;
bool zeroed = false; bool zeroed = false;
if (flags & BO_ALLOC_FOR_RENDER) if (flags & BO_ALLOC_BUSY)
for_render = true; busy = true;
if (flags & BO_ALLOC_ZEROED) if (flags & BO_ALLOC_ZEROED)
zeroed = true; zeroed = true;
/* FOR_RENDER really means "I'm ok with a busy BO". This doesn't really /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
* jive with ZEROED as we have to wait for it to be idle before we can * be idle before we can memset. Just disallow that combination.
* memset. Just disallow that combination.
*/ */
assert(!(for_render && zeroed)); assert(!(busy && zeroed));
/* Round the allocated size up to a power of two number of pages. */ /* Round the allocated size up to a power of two number of pages. */
bucket = bucket_for_size(bufmgr, size); bucket = bucket_for_size(bufmgr, size);
@@ -290,7 +289,7 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
retry: retry:
alloc_from_cache = false; alloc_from_cache = false;
if (bucket != NULL && !list_empty(&bucket->head)) { if (bucket != NULL && !list_empty(&bucket->head)) {
if (for_render && !zeroed) { if (busy && !zeroed) {
/* Allocate new render-target BOs from the tail (MRU) /* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU * of the list, as it will likely be hot in the GPU
* cache and in the aperture for us. If the caller * cache and in the aperture for us. If the caller

View File

@@ -145,7 +145,7 @@ struct brw_bo {
bool cache_coherent; bool cache_coherent;
}; };
#define BO_ALLOC_FOR_RENDER (1<<0) #define BO_ALLOC_BUSY (1<<0)
#define BO_ALLOC_ZEROED (1<<1) #define BO_ALLOC_ZEROED (1<<1)
/** /**

View File

@@ -620,7 +620,7 @@ make_separate_stencil_surface(struct brw_context *brw,
mt->surf.samples, ISL_TILING_W_BIT, mt->surf.samples, ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT | ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT, ISL_SURF_USAGE_TEXTURE_BIT,
BO_ALLOC_FOR_RENDER, 0, NULL); BO_ALLOC_BUSY, 0, NULL);
if (!mt->stencil_mt) if (!mt->stencil_mt)
return false; return false;
@@ -648,7 +648,7 @@ miptree_create(struct brw_context *brw,
ISL_TILING_W_BIT, ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT | ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT, ISL_SURF_USAGE_TEXTURE_BIT,
BO_ALLOC_FOR_RENDER, BO_ALLOC_BUSY,
0, 0,
NULL); NULL);
@@ -666,7 +666,7 @@ miptree_create(struct brw_context *brw,
first_level, last_level, first_level, last_level,
width0, height0, depth0, num_samples, ISL_TILING_Y0_BIT, width0, height0, depth0, num_samples, ISL_TILING_Y0_BIT,
ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT, ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
BO_ALLOC_FOR_RENDER, 0, NULL); BO_ALLOC_BUSY, 0, NULL);
if (needs_separate_stencil(brw, mt, format) && if (needs_separate_stencil(brw, mt, format) &&
!make_separate_stencil_surface(brw, mt)) { !make_separate_stencil_surface(brw, mt)) {
@@ -689,7 +689,7 @@ miptree_create(struct brw_context *brw,
etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE; etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
if (flags & MIPTREE_CREATE_BUSY) if (flags & MIPTREE_CREATE_BUSY)
alloc_flags |= BO_ALLOC_FOR_RENDER; alloc_flags |= BO_ALLOC_BUSY;
isl_tiling_flags_t tiling_flags = (flags & MIPTREE_CREATE_LINEAR) ? isl_tiling_flags_t tiling_flags = (flags & MIPTREE_CREATE_LINEAR) ?
ISL_TILING_LINEAR_BIT : ISL_TILING_ANY_MASK; ISL_TILING_LINEAR_BIT : ISL_TILING_ANY_MASK;
@@ -773,7 +773,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
brw->gen >= 6 ? depth_only_format : format, brw->gen >= 6 ? depth_only_format : format,
0, 0, width, height, depth, 1, ISL_TILING_Y0_BIT, 0, 0, width, height, depth, 1, ISL_TILING_Y0_BIT,
ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT, ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
BO_ALLOC_FOR_RENDER, pitch, bo); BO_ALLOC_BUSY, pitch, bo);
if (!mt) if (!mt)
return NULL; return NULL;
@@ -789,7 +789,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
ISL_TILING_W_BIT, ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT | ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT, ISL_SURF_USAGE_TEXTURE_BIT,
BO_ALLOC_FOR_RENDER, pitch, bo); BO_ALLOC_BUSY, pitch, bo);
if (!mt) if (!mt)
return NULL; return NULL;
@@ -1746,7 +1746,7 @@ intel_miptree_alloc_ccs(struct brw_context *brw,
* fast-clear operation. In that case, being hot in caches more useful. * fast-clear operation. In that case, being hot in caches more useful.
*/ */
const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ? const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ?
BO_ALLOC_ZEROED : BO_ALLOC_FOR_RENDER; BO_ALLOC_ZEROED : BO_ALLOC_BUSY;
mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree", mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
&temp_ccs_surf, alloc_flags, mt); &temp_ccs_surf, alloc_flags, mt);
if (!mt->mcs_buf) { if (!mt->mcs_buf) {
@@ -1811,7 +1811,7 @@ intel_miptree_alloc_hiz(struct brw_context *brw,
isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &temp_hiz_surf); isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &temp_hiz_surf);
assert(ok); assert(ok);
const uint32_t alloc_flags = BO_ALLOC_FOR_RENDER; const uint32_t alloc_flags = BO_ALLOC_BUSY;
mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree", mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree",
&temp_hiz_surf, alloc_flags, mt); &temp_hiz_surf, alloc_flags, mt);
@@ -2967,7 +2967,7 @@ intel_update_r8stencil(struct brw_context *brw,
src->surf.samples, src->surf.samples,
ISL_TILING_Y0_BIT, ISL_TILING_Y0_BIT,
ISL_SURF_USAGE_TEXTURE_BIT, ISL_SURF_USAGE_TEXTURE_BIT,
BO_ALLOC_FOR_RENDER, 0, NULL); BO_ALLOC_BUSY, 0, NULL);
assert(mt->r8stencil_mt); assert(mt->r8stencil_mt);
} }

View File

@@ -2485,7 +2485,7 @@ intelAllocateBuffer(__DRIscreen *dri_screen,
height, height,
cpp, cpp,
I915_TILING_X, &pitch, I915_TILING_X, &pitch,
BO_ALLOC_FOR_RENDER); BO_ALLOC_BUSY);
if (intelBuffer->bo == NULL) { if (intelBuffer->bo == NULL) {
free(intelBuffer); free(intelBuffer);