radeonsi: rename TC_L2_dirty -> L2_cache_dirty

Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31193>
This commit is contained in:
Marek Olšák
2024-08-23 08:07:47 -04:00
committed by Marge Bot
parent 0f063ed1e7
commit 7cc2fee378
6 changed files with 16 additions and 16 deletions

View File

@@ -582,16 +582,16 @@ void si_barrier_after_internal_op(struct si_context *sctx, unsigned flags,
if (num_buffers)
sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE | SI_CONTEXT_PFP_SYNC_ME;
/* We must set TC_L2_dirty for buffers because:
/* We must set L2_cache_dirty for buffers because:
* - GFX6,12: CP DMA doesn't use L2.
* - GFX6-7,12: Index buffer reads don't use L2.
* - GFX6-8,12: CP doesn't use L2.
* - GFX6-8: CB/DB don't use L2.
*
* TC_L2_dirty is checked explicitly when buffers are used in those cases to enforce coherency.
* L2_cache_dirty is checked explicitly when buffers are used in those cases to enforce coherency.
*/
while (writable_buffers_mask)
si_resource(buffers[u_bit_scan(&writable_buffers_mask)].buffer)->TC_L2_dirty = true;
si_resource(buffers[u_bit_scan(&writable_buffers_mask)].buffer)->L2_cache_dirty = true;
/* Make sure RBs see our DCC image stores if RBs and TCCs (L2 instances) are non-coherent. */
if (sctx->gfx_level >= GFX10 && sctx->screen->info.tcc_rb_non_coherent) {

View File

@@ -171,7 +171,7 @@ bool si_alloc_resource(struct si_screen *sscreen, struct si_resource *res)
radeon_bo_reference(sscreen->ws, &old_buf, NULL);
util_range_set_empty(&res->valid_buffer_range);
res->TC_L2_dirty = false;
res->L2_cache_dirty = false;
if (res->b.b.target != PIPE_BUFFER && !(res->b.b.flags & SI_RESOURCE_AUX_PLANE)) {
/* The buffer is shared with other planes. */
@@ -567,7 +567,7 @@ static struct si_resource *si_alloc_buffer_struct(struct pipe_screen *screen,
buf->buf = NULL;
buf->bind_history = 0;
buf->TC_L2_dirty = false;
buf->L2_cache_dirty = false;
util_range_init(&buf->valid_buffer_range);
return buf;
}

View File

@@ -1214,10 +1214,10 @@ static void si_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info
if (info->indirect) {
/* Indirect buffers are read through L2 on GFX9-GFX11, but not other hw. */
if ((sctx->gfx_level <= GFX8 || sctx->gfx_level == GFX12) &&
si_resource(info->indirect)->TC_L2_dirty) {
si_resource(info->indirect)->L2_cache_dirty) {
sctx->flags |= SI_CONTEXT_WB_L2 | SI_CONTEXT_PFP_SYNC_ME;
si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
si_resource(info->indirect)->TC_L2_dirty = false;
si_resource(info->indirect)->L2_cache_dirty = false;
}
}

View File

@@ -324,7 +324,7 @@ struct si_resource {
* an index buffer. The reason is that VGT DMA index fetching doesn't
* use L2.
*/
bool TC_L2_dirty;
bool L2_cache_dirty;
/* Whether this resource is referenced by bindless handles. */
bool texture_handle_allocated;

View File

@@ -2123,7 +2123,7 @@ static void si_draw(struct pipe_context *ctx,
/* GFX6-7 don't read index buffers through L2. */
sctx->flags |= SI_CONTEXT_WB_L2 | SI_CONTEXT_PFP_SYNC_ME;
si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
si_resource(indexbuf)->TC_L2_dirty = false;
si_resource(indexbuf)->L2_cache_dirty = false;
} else if (!IS_DRAW_VERTEX_STATE && info->has_user_indices) {
unsigned start_offset;
@@ -2141,12 +2141,12 @@ static void si_draw(struct pipe_context *ctx,
/* info->start will be added by the drawing code */
index_offset -= start_offset;
} else if ((GFX_VERSION <= GFX7 || GFX_VERSION == GFX12) &&
si_resource(indexbuf)->TC_L2_dirty) {
si_resource(indexbuf)->L2_cache_dirty) {
/* GFX8-GFX11 reads index buffers through L2, so it doesn't
* need this. */
sctx->flags |= SI_CONTEXT_WB_L2 | SI_CONTEXT_PFP_SYNC_ME;
si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
si_resource(indexbuf)->TC_L2_dirty = false;
si_resource(indexbuf)->L2_cache_dirty = false;
}
}
@@ -2156,17 +2156,17 @@ static void si_draw(struct pipe_context *ctx,
if (!IS_DRAW_VERTEX_STATE && indirect) {
/* Indirect buffers use L2 on GFX9-GFX11, but not other hw. */
if (GFX_VERSION <= GFX8 || GFX_VERSION == GFX12) {
if (indirect->buffer && si_resource(indirect->buffer)->TC_L2_dirty) {
if (indirect->buffer && si_resource(indirect->buffer)->L2_cache_dirty) {
sctx->flags |= SI_CONTEXT_WB_L2 | SI_CONTEXT_PFP_SYNC_ME;
si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
si_resource(indirect->buffer)->TC_L2_dirty = false;
si_resource(indirect->buffer)->L2_cache_dirty = false;
}
if (indirect->indirect_draw_count &&
si_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
si_resource(indirect->indirect_draw_count)->L2_cache_dirty) {
sctx->flags |= SI_CONTEXT_WB_L2 | SI_CONTEXT_PFP_SYNC_ME;
si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
si_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
si_resource(indirect->indirect_draw_count)->L2_cache_dirty = false;
}
}
total_direct_count = INT_MAX; /* just set something other than 0 to enable shader culling */

View File

@@ -86,7 +86,7 @@ static void si_set_streamout_targets(struct pipe_context *ctx, unsigned num_targ
*/
for (i = 0; i < old_num_targets; i++)
if (sctx->streamout.targets[i])
si_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
si_resource(sctx->streamout.targets[i]->b.buffer)->L2_cache_dirty = true;
/* Invalidate the scalar cache in case a streamout buffer is
* going to be used as a constant buffer.