i965/bufmgr: s/BO_ALLOC_FOR_RENDER/BO_ALLOC_BUSY/
"Alloc for render" is a terrible name for a flag because it means basically nothing. What the flag really does is allocate a busy BO which someone theorized at one point in time would be more efficient if you're planning to immediately render to it. If the flag really means "alloc a busy BO" we should just call it that. Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
@@ -256,20 +256,19 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
|
||||
struct bo_cache_bucket *bucket;
|
||||
bool alloc_from_cache;
|
||||
uint64_t bo_size;
|
||||
bool for_render = false;
|
||||
bool busy = false;
|
||||
bool zeroed = false;
|
||||
|
||||
if (flags & BO_ALLOC_FOR_RENDER)
|
||||
for_render = true;
|
||||
if (flags & BO_ALLOC_BUSY)
|
||||
busy = true;
|
||||
|
||||
if (flags & BO_ALLOC_ZEROED)
|
||||
zeroed = true;
|
||||
|
||||
/* FOR_RENDER really means "I'm ok with a busy BO". This doesn't really
|
||||
* jive with ZEROED as we have to wait for it to be idle before we can
|
||||
* memset. Just disallow that combination.
|
||||
/* BUSY does doesn't really jive with ZEROED as we have to wait for it to
|
||||
* be idle before we can memset. Just disallow that combination.
|
||||
*/
|
||||
assert(!(for_render && zeroed));
|
||||
assert(!(busy && zeroed));
|
||||
|
||||
/* Round the allocated size up to a power of two number of pages. */
|
||||
bucket = bucket_for_size(bufmgr, size);
|
||||
@@ -290,7 +289,7 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
|
||||
retry:
|
||||
alloc_from_cache = false;
|
||||
if (bucket != NULL && !list_empty(&bucket->head)) {
|
||||
if (for_render && !zeroed) {
|
||||
if (busy && !zeroed) {
|
||||
/* Allocate new render-target BOs from the tail (MRU)
|
||||
* of the list, as it will likely be hot in the GPU
|
||||
* cache and in the aperture for us. If the caller
|
||||
|
@@ -145,7 +145,7 @@ struct brw_bo {
|
||||
bool cache_coherent;
|
||||
};
|
||||
|
||||
#define BO_ALLOC_FOR_RENDER (1<<0)
|
||||
#define BO_ALLOC_BUSY (1<<0)
|
||||
#define BO_ALLOC_ZEROED (1<<1)
|
||||
|
||||
/**
|
||||
|
@@ -620,7 +620,7 @@ make_separate_stencil_surface(struct brw_context *brw,
|
||||
mt->surf.samples, ISL_TILING_W_BIT,
|
||||
ISL_SURF_USAGE_STENCIL_BIT |
|
||||
ISL_SURF_USAGE_TEXTURE_BIT,
|
||||
BO_ALLOC_FOR_RENDER, 0, NULL);
|
||||
BO_ALLOC_BUSY, 0, NULL);
|
||||
|
||||
if (!mt->stencil_mt)
|
||||
return false;
|
||||
@@ -648,7 +648,7 @@ miptree_create(struct brw_context *brw,
|
||||
ISL_TILING_W_BIT,
|
||||
ISL_SURF_USAGE_STENCIL_BIT |
|
||||
ISL_SURF_USAGE_TEXTURE_BIT,
|
||||
BO_ALLOC_FOR_RENDER,
|
||||
BO_ALLOC_BUSY,
|
||||
0,
|
||||
NULL);
|
||||
|
||||
@@ -666,7 +666,7 @@ miptree_create(struct brw_context *brw,
|
||||
first_level, last_level,
|
||||
width0, height0, depth0, num_samples, ISL_TILING_Y0_BIT,
|
||||
ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
|
||||
BO_ALLOC_FOR_RENDER, 0, NULL);
|
||||
BO_ALLOC_BUSY, 0, NULL);
|
||||
|
||||
if (needs_separate_stencil(brw, mt, format) &&
|
||||
!make_separate_stencil_surface(brw, mt)) {
|
||||
@@ -689,7 +689,7 @@ miptree_create(struct brw_context *brw,
|
||||
etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
|
||||
|
||||
if (flags & MIPTREE_CREATE_BUSY)
|
||||
alloc_flags |= BO_ALLOC_FOR_RENDER;
|
||||
alloc_flags |= BO_ALLOC_BUSY;
|
||||
|
||||
isl_tiling_flags_t tiling_flags = (flags & MIPTREE_CREATE_LINEAR) ?
|
||||
ISL_TILING_LINEAR_BIT : ISL_TILING_ANY_MASK;
|
||||
@@ -773,7 +773,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
|
||||
brw->gen >= 6 ? depth_only_format : format,
|
||||
0, 0, width, height, depth, 1, ISL_TILING_Y0_BIT,
|
||||
ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
|
||||
BO_ALLOC_FOR_RENDER, pitch, bo);
|
||||
BO_ALLOC_BUSY, pitch, bo);
|
||||
if (!mt)
|
||||
return NULL;
|
||||
|
||||
@@ -789,7 +789,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
|
||||
ISL_TILING_W_BIT,
|
||||
ISL_SURF_USAGE_STENCIL_BIT |
|
||||
ISL_SURF_USAGE_TEXTURE_BIT,
|
||||
BO_ALLOC_FOR_RENDER, pitch, bo);
|
||||
BO_ALLOC_BUSY, pitch, bo);
|
||||
if (!mt)
|
||||
return NULL;
|
||||
|
||||
@@ -1746,7 +1746,7 @@ intel_miptree_alloc_ccs(struct brw_context *brw,
|
||||
* fast-clear operation. In that case, being hot in caches more useful.
|
||||
*/
|
||||
const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ?
|
||||
BO_ALLOC_ZEROED : BO_ALLOC_FOR_RENDER;
|
||||
BO_ALLOC_ZEROED : BO_ALLOC_BUSY;
|
||||
mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
|
||||
&temp_ccs_surf, alloc_flags, mt);
|
||||
if (!mt->mcs_buf) {
|
||||
@@ -1811,7 +1811,7 @@ intel_miptree_alloc_hiz(struct brw_context *brw,
|
||||
isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &temp_hiz_surf);
|
||||
assert(ok);
|
||||
|
||||
const uint32_t alloc_flags = BO_ALLOC_FOR_RENDER;
|
||||
const uint32_t alloc_flags = BO_ALLOC_BUSY;
|
||||
mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree",
|
||||
&temp_hiz_surf, alloc_flags, mt);
|
||||
|
||||
@@ -2967,7 +2967,7 @@ intel_update_r8stencil(struct brw_context *brw,
|
||||
src->surf.samples,
|
||||
ISL_TILING_Y0_BIT,
|
||||
ISL_SURF_USAGE_TEXTURE_BIT,
|
||||
BO_ALLOC_FOR_RENDER, 0, NULL);
|
||||
BO_ALLOC_BUSY, 0, NULL);
|
||||
assert(mt->r8stencil_mt);
|
||||
}
|
||||
|
||||
|
@@ -2485,7 +2485,7 @@ intelAllocateBuffer(__DRIscreen *dri_screen,
|
||||
height,
|
||||
cpp,
|
||||
I915_TILING_X, &pitch,
|
||||
BO_ALLOC_FOR_RENDER);
|
||||
BO_ALLOC_BUSY);
|
||||
|
||||
if (intelBuffer->bo == NULL) {
|
||||
free(intelBuffer);
|
||||
|
Reference in New Issue
Block a user