radeonsi: NIR equivalent of si_create_clear_buffer_rmw_cs()

Replaced the existing internal TGSI compute shader, which clears
a read-modify-write buffer, with its NIR equivalent. The disassembly
shader generated by the new NIR variant is identical to the previous
implementation. These changes remove the additional conversion step
from TGSI to NIR for the shader at runtime. Tested on a Navi 23 card.

Reviewed-by: Mihai Preda <mhpreda@gmail.com>
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15356>
This commit is contained in:
Ganesh Belgur Ramachandra
2022-03-11 12:27:34 -06:00
committed by Marge Bot
parent ff2b2bc568
commit 582e7f1599
4 changed files with 41 additions and 45 deletions

View File

@@ -243,7 +243,7 @@ void si_compute_clear_buffer_rmw(struct si_context *sctx, struct pipe_resource *
sctx->cs_user_data[1] = ~writebitmask;
if (!sctx->cs_clear_buffer_rmw)
sctx->cs_clear_buffer_rmw = si_create_clear_buffer_rmw_cs(&sctx->b);
sctx->cs_clear_buffer_rmw = si_create_clear_buffer_rmw_cs(sctx);
si_launch_grid_internal_ssbos(sctx, &info, sctx->cs_clear_buffer_rmw, flags, coher,
1, &sb, 0x1);

View File

@@ -1526,7 +1526,7 @@ void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type,
void *si_create_fixed_func_tcs(struct si_context *sctx);
void *si_create_dma_compute_shader(struct pipe_context *ctx, unsigned num_dwords_per_thread,
bool dst_stream_cache_policy, bool is_copy);
void *si_create_clear_buffer_rmw_cs(struct pipe_context *ctx);
void *si_create_clear_buffer_rmw_cs(struct si_context *sctx);
void *si_create_copy_image_compute_shader(struct pipe_context *ctx);
void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx);
void *si_create_dcc_decompress_cs(struct pipe_context *ctx);

View File

@@ -146,3 +146,42 @@ void *gfx9_create_clear_dcc_msaa_cs(struct si_context *sctx, struct si_texture *
return create_nir_cs(sctx, &b);
}
/* Create a compute shader implementing clear_buffer or copy_buffer. */
void *si_create_clear_buffer_rmw_cs(struct si_context *sctx)
{
const nir_shader_compiler_options *options =
sctx->b.screen->get_compiler_options(sctx->b.screen, PIPE_SHADER_IR_NIR, PIPE_SHADER_COMPUTE);
nir_builder b =
nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, options, "clear_buffer_rmw_cs");
b.shader->info.workgroup_size[0] = 64;
b.shader->info.workgroup_size[1] = 1;
b.shader->info.workgroup_size[2] = 1;
b.shader->info.cs.user_data_components_amd = 2;
b.shader->info.num_ssbos = 1;
/* address = blockID * 64 + threadID; */
nir_ssa_def *address = get_global_ids(&b, 1);
/* address = address * 16; (byte offset, loading one vec4 per thread) */
address = nir_ishl(&b, address, nir_imm_int(&b, 4));
nir_ssa_def *zero = nir_imm_int(&b, 0);
nir_ssa_def *data = nir_load_ssbo(&b, 4, 32, zero, address, .align_mul = 4);
/* Get user data SGPRs. */
nir_ssa_def *user_sgprs = nir_load_user_data_amd(&b);
/* data &= inverted_writemask; */
data = nir_iand(&b, data, nir_channel(&b, user_sgprs, 1));
/* data |= clear_value_masked; */
data = nir_ior(&b, data, nir_channel(&b, user_sgprs, 0));
nir_store_ssbo(&b, data, zero, address,
.access = SI_COMPUTE_DST_CACHE_POLICY != L2_LRU ? ACCESS_STREAM_CACHE_POLICY : 0,
.align_mul = 4);
return create_nir_cs(sctx, &b);
}

View File

@@ -214,49 +214,6 @@ void *si_create_dma_compute_shader(struct pipe_context *ctx, unsigned num_dwords
return cs;
}
/* Create a compute shader implementing clear_buffer or copy_buffer. */
void *si_create_clear_buffer_rmw_cs(struct pipe_context *ctx)
{
const char *text = "COMP\n"
"PROPERTY CS_FIXED_BLOCK_WIDTH 64\n"
"PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
"PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
"PROPERTY CS_USER_DATA_COMPONENTS_AMD 2\n"
"DCL SV[0], THREAD_ID\n"
"DCL SV[1], BLOCK_ID\n"
"DCL SV[2], CS_USER_DATA_AMD\n"
"DCL BUFFER[0]\n"
"DCL TEMP[0..1]\n"
"IMM[0] UINT32 {64, 16, 0, 0}\n"
/* ADDRESS = BLOCK_ID * 64 + THREAD_ID; */
"UMAD TEMP[0].x, SV[1].xxxx, IMM[0].xxxx, SV[0].xxxx\n"
/* ADDRESS = ADDRESS * 16; (byte offset, loading one vec4 per thread) */
"UMUL TEMP[0].x, TEMP[0].xxxx, IMM[0].yyyy\n"
"LOAD TEMP[1], BUFFER[0], TEMP[0].xxxx\n"
/* DATA &= inverted_writemask; */
"AND TEMP[1], TEMP[1], SV[2].yyyy\n"
/* DATA |= clear_value_masked; */
"OR TEMP[1], TEMP[1], SV[2].xxxx\n"
"STORE BUFFER[0].xyzw, TEMP[0], TEMP[1]%s\n"
"END\n";
char final_text[2048];
struct tgsi_token tokens[1024];
struct pipe_compute_state state = {0};
snprintf(final_text, sizeof(final_text), text,
SI_COMPUTE_DST_CACHE_POLICY != L2_LRU ? ", STREAM_CACHE_POLICY" : "");
if (!tgsi_text_translate(final_text, tokens, ARRAY_SIZE(tokens))) {
assert(false);
return NULL;
}
state.ir_type = PIPE_SHADER_IR_TGSI;
state.prog = tokens;
return ctx->create_compute_state(ctx, &state);
}
/* Create the compute shader that is used to collect the results.
*
* One compute grid with a single thread is launched for every query result