agx: Don't scalarize preambles in NIR

Scalarizing preambles in NIR isn't really necessary, we can do it more
efficiently in the backend. This makes the final NIR a lot less annoying to
read; the backend IR was already nice to read thanks to all the scalarized moves
being copypropped. Plus, this is a lot simpler.

No shader-db changes.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21122>
This commit is contained in:
Alyssa Rosenzweig
2023-02-04 12:03:21 -05:00
committed by Marge Bot
parent 7edd42cbc0
commit bfa7ec0aa0
2 changed files with 21 additions and 64 deletions

View File

@@ -561,18 +561,32 @@ static agx_instr *
agx_emit_load_preamble(agx_builder *b, agx_index dst,
nir_intrinsic_instr *instr)
{
assert(nir_dest_num_components(instr->dest) == 1 && "already scalarized");
return agx_mov_to(b, dst, agx_uniform(nir_intrinsic_base(instr), dst.size));
agx_index srcs[4] = {agx_null()};
unsigned dim = nir_dest_num_components(instr->dest);
assert(dim <= ARRAY_SIZE(srcs) && "shouldn't see larger vectors");
unsigned base = nir_intrinsic_base(instr);
unsigned stride = agx_size_align_16(dst.size);
for (unsigned i = 0; i < dim; ++i)
srcs[i] = agx_uniform(base + i * stride, dst.size);
return agx_emit_collect_to(b, dst, dim, srcs);
}
static agx_instr *
agx_emit_store_preamble(agx_builder *b, nir_intrinsic_instr *instr)
{
assert(nir_src_num_components(instr->src[0]) == 1 && "already scalarized");
agx_index vec = agx_src_index(&instr->src[0]);
unsigned base = nir_intrinsic_base(instr);
unsigned stride = agx_size_align_16(vec.size);
agx_index value = agx_src_index(&instr->src[0]);
agx_index offset = agx_immediate(nir_intrinsic_base(instr));
return agx_uniform_store(b, value, offset);
for (unsigned i = 0; i < nir_src_num_components(instr->src[0]); ++i) {
agx_uniform_store(b, agx_extract_nir_src(b, instr->src[0], i),
agx_immediate(base + i * stride));
}
return NULL;
}
static enum agx_dim

View File

@@ -25,51 +25,6 @@
#include "compiler/nir/nir_builder.h"
#include "agx_compiler.h"
static bool
nir_scalarize_preamble(struct nir_builder *b, nir_instr *instr,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_preamble &&
intr->intrinsic != nir_intrinsic_store_preamble)
return false;
bool is_load = (intr->intrinsic == nir_intrinsic_load_preamble);
nir_ssa_def *v = is_load
? &intr->dest.ssa
: nir_ssa_for_src(b, intr->src[0],
nir_src_num_components(intr->src[0]));
if (v->num_components == 1)
return false;
/* Scalarize */
b->cursor = nir_before_instr(&intr->instr);
unsigned stride = MAX2(v->bit_size / 16, 1);
unsigned base = nir_intrinsic_base(intr);
if (is_load) {
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < v->num_components; ++i)
comps[i] =
nir_load_preamble(b, 1, v->bit_size, .base = base + (i * stride));
nir_ssa_def_rewrite_uses(v, nir_vec(b, comps, v->num_components));
} else {
for (unsigned i = 0; i < v->num_components; ++i)
nir_store_preamble(b, nir_channel(b, v, i),
.base = base + (i * stride));
nir_instr_remove(instr);
}
return true;
}
static void
def_size(nir_ssa_def *def, unsigned *size, unsigned *align)
{
@@ -155,17 +110,5 @@ static const nir_opt_preamble_options preamble_options = {
bool
agx_nir_opt_preamble(nir_shader *nir, unsigned *preamble_size)
{
bool progress = nir_opt_preamble(nir, &preamble_options, preamble_size);
/* If nir_opt_preamble made progress, the shader now has
* load_preamble/store_preamble intrinsics in it. These need to be
* scalarized for the backend to process them appropriately.
*/
if (progress) {
nir_shader_instructions_pass(
nir, nir_scalarize_preamble,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}
return progress;
return nir_opt_preamble(nir, &preamble_options, preamble_size);
}