agx: Don't prefix pseudo-ops

It's not really buying us anything and it clutters the IR.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18804>
This commit is contained in:
Alyssa Rosenzweig
2022-09-22 22:35:39 -04:00
committed by Marge Bot
parent 40f0ac2082
commit c2bc8c1384
7 changed files with 17 additions and 17 deletions

View File

@@ -148,7 +148,7 @@ agx_emit_combine_to(agx_builder *b, agx_index dst, unsigned nr_srcs,
if (nr_srcs == 1)
return agx_mov_to(b, dst, srcs[0]);
agx_instr *I = agx_p_combine_to(b, dst, nr_srcs);
agx_instr *I = agx_combine_to(b, dst, nr_srcs);
agx_foreach_src(I, s)
I->src[s] = srcs[s];
@@ -214,7 +214,7 @@ agx_emit_split(agx_builder *b, agx_index *dests, agx_index vec, unsigned n)
}
/* Emit the split */
agx_p_split_to(b, dests[0], dests[1], dests[2], dests[3], vec);
agx_split_to(b, dests[0], dests[1], dests[2], dests[3], vec);
}
static void
@@ -255,7 +255,7 @@ agx_umul_high_to(agx_builder *b, agx_index dst, agx_index P, agx_index Q)
agx_index product = agx_temp(b->shader, P.size + 1);
agx_imad_to(b, product, agx_abs(P), agx_abs(Q), agx_zero(), 0);
return agx_p_split_to(b, agx_null(), dst, agx_null(), agx_null(), product);
return agx_split_to(b, agx_null(), dst, agx_null(), agx_null(), product);
}
static agx_index
@@ -1097,7 +1097,7 @@ agx_emit_tex(agx_builder *b, nir_tex_instr *instr)
/* We explicitly don't cache about the split cache for this */
lod = agx_temp(b->shader, AGX_SIZE_32);
agx_instr *I = agx_p_combine_to(b, lod, 2 * n);
agx_instr *I = agx_combine_to(b, lod, 2 * n);
for (unsigned i = 0; i < n; ++i) {
I->src[(2 * i) + 0] = agx_emit_extract(b, index, i);
@@ -1156,7 +1156,7 @@ static void
agx_emit_logical_end(agx_builder *b)
{
if (!b->shader->current_block->unconditional_jumps)
agx_p_logical_end(b);
agx_logical_end(b);
}
/* NIR loops are treated as a pair of AGX loops:

View File

@@ -655,7 +655,7 @@ agx_after_block_logical(agx_block *block)
{
/* Search for a p_logical_end */
agx_foreach_instr_in_block_rev(block, I) {
if (I->op == AGX_OPCODE_P_LOGICAL_END)
if (I->op == AGX_OPCODE_LOGICAL_END)
return agx_before_instr(I);
}

View File

@@ -263,10 +263,10 @@ op("and", _, srcs = 2)
op("or", _, srcs = 2)
# Indicates the logical end of the block, before final branches/control flow
op("p_logical_end", _, dests = 0, srcs = 0, can_eliminate = False)
op("logical_end", _, dests = 0, srcs = 0, can_eliminate = False)
op("p_combine", _, srcs = VARIABLE)
op("p_split", _, srcs = 1, dests = 4)
op("combine", _, srcs = VARIABLE)
op("split", _, srcs = 1, dests = 4)
# Phis are special-cased in the IR as they (uniquely) can take an unbounded
# number of source.

View File

@@ -196,7 +196,7 @@ agx_optimizer_forward(agx_context *ctx)
/* Inline immediates if we can. TODO: systematic */
if (I->op != AGX_OPCODE_ST_VARY && I->op != AGX_OPCODE_ST_TILE &&
I->op != AGX_OPCODE_P_COMBINE && I->op != AGX_OPCODE_TEXTURE_SAMPLE &&
I->op != AGX_OPCODE_COMBINE && I->op != AGX_OPCODE_TEXTURE_SAMPLE &&
I->op != AGX_OPCODE_TEXTURE_LOAD)
agx_optimizer_inline_imm(defs, I, info.nr_srcs, info.is_float);
}

View File

@@ -45,7 +45,7 @@ agx_write_registers(agx_instr *I, unsigned d)
case AGX_OPCODE_LDCF:
return 6;
case AGX_OPCODE_P_COMBINE:
case AGX_OPCODE_COMBINE:
return I->nr_srcs * size;
default:
return size;
@@ -120,7 +120,7 @@ agx_ra_assign_local(agx_block *block, uint8_t *ssa_to_reg, uint8_t *ncomps)
/* Optimization: if a split contains the last use of a vector, the split
* can be removed by assigning the destinations overlapping the source.
*/
if (I->op == AGX_OPCODE_P_SPLIT && I->src[0].kill) {
if (I->op == AGX_OPCODE_SPLIT && I->src[0].kill) {
unsigned reg = ssa_to_reg[I->src[0].value];
unsigned length = ncomps[I->src[0].value];
unsigned width = agx_size_align_16(agx_split_width(I));
@@ -306,7 +306,7 @@ agx_ra(agx_context *ctx)
/* Lower away RA pseudo-instructions */
agx_builder b = agx_init_builder(ctx, agx_after_instr(ins));
if (ins->op == AGX_OPCODE_P_COMBINE) {
if (ins->op == AGX_OPCODE_COMBINE) {
unsigned base = agx_index_to_reg(ssa_to_reg, ins->dest[0]);
unsigned width = agx_size_align_16(ins->dest[0].size);
@@ -328,7 +328,7 @@ agx_ra(agx_context *ctx)
agx_emit_parallel_copies(&b, copies, n);
agx_remove_instruction(ins);
continue;
} else if (ins->op == AGX_OPCODE_P_SPLIT) {
} else if (ins->op == AGX_OPCODE_SPLIT) {
unsigned base = agx_index_to_reg(ssa_to_reg, ins->src[0]);
unsigned width = agx_size_align_16(agx_split_width(ins));
@@ -363,7 +363,7 @@ agx_ra(agx_context *ctx)
/* Phi nodes can be removed now */
agx_foreach_instr_global_safe(ctx, I) {
if (I->op == AGX_OPCODE_PHI || I->op == AGX_OPCODE_P_LOGICAL_END)
if (I->op == AGX_OPCODE_PHI || I->op == AGX_OPCODE_LOGICAL_END)
agx_remove_instruction(I);
/* Remove identity moves */

View File

@@ -66,7 +66,7 @@ agx_validate_block_form(agx_block *block)
state = AGX_BLOCK_STATE_BODY;
break;
case AGX_OPCODE_P_LOGICAL_END:
case AGX_OPCODE_LOGICAL_END:
agx_validate_assert(state != AGX_BLOCK_STATE_CF);
state = AGX_BLOCK_STATE_CF;
break;

View File

@@ -131,7 +131,7 @@ TEST_F(Optimizer, Copyprop)
TEST_F(Optimizer, InlineHazards)
{
NEGCASE32({
agx_instr *I = agx_p_combine_to(b, out, 4);
agx_instr *I = agx_combine_to(b, out, 4);
I->src[0] = agx_mov_imm(b, AGX_SIZE_32, 0);
I->src[1] = wy;
I->src[2] = wz;