agx: Rename "combine" to "collect"

For consistency with ir3 and bifrost.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18804>
This commit is contained in:
Alyssa Rosenzweig
2022-09-22 22:35:46 -04:00
committed by Marge Bot
parent 82e8e709cb
commit 1dcaade3e2
5 changed files with 19 additions and 19 deletions

View File

@@ -114,13 +114,13 @@ agx_emit_extract(agx_builder *b, agx_index vec, unsigned channel)
agx_index *components = _mesa_hash_table_u64_search(b->shader->allocated_vec,
agx_index_to_key(vec));
assert(components != NULL && "missing agx_emit_combine_to");
assert(components != NULL && "missing agx_emit_collect_to");
return components[channel];
}
static void
agx_cache_combine(agx_builder *b, agx_index dst, unsigned nr_srcs,
agx_cache_collect(agx_builder *b, agx_index dst, unsigned nr_srcs,
agx_index *srcs)
{
/* Lifetime of a hash table entry has to be at least as long as the table */
@@ -135,20 +135,20 @@ agx_cache_combine(agx_builder *b, agx_index dst, unsigned nr_srcs,
/*
* Combine multiple scalars into a vector destination. This corresponds to
* p_combine, lowered to moves (a shuffle in general) after register allocation.
* collect, lowered to moves (a shuffle in general) after register allocation.
*
* To optimize vector extractions, we record the individual channels
*/
static agx_instr *
agx_emit_combine_to(agx_builder *b, agx_index dst, unsigned nr_srcs,
agx_emit_collect_to(agx_builder *b, agx_index dst, unsigned nr_srcs,
agx_index *srcs)
{
agx_cache_combine(b, dst, nr_srcs, srcs);
agx_cache_collect(b, dst, nr_srcs, srcs);
if (nr_srcs == 1)
return agx_mov_to(b, dst, srcs[0]);
agx_instr *I = agx_combine_to(b, dst, nr_srcs);
agx_instr *I = agx_collect_to(b, dst, nr_srcs);
agx_foreach_src(I, s)
I->src[s] = srcs[s];
@@ -161,7 +161,7 @@ agx_vec4(agx_builder *b, agx_index s0, agx_index s1, agx_index s2, agx_index s3)
{
agx_index dst = agx_temp(b->shader, s0.size);
agx_index idx[4] = { s0, s1, s2, s3 };
agx_emit_combine_to(b, dst, 4, idx);
agx_emit_collect_to(b, dst, 4, idx);
return dst;
}
@@ -170,7 +170,7 @@ agx_vec2(agx_builder *b, agx_index s0, agx_index s1)
{
agx_index dst = agx_temp(b->shader, s0.size);
agx_index idx[2] = { s0, s1 };
agx_emit_combine_to(b, dst, 2, idx);
agx_emit_collect_to(b, dst, 2, idx);
return dst;
}
@@ -221,7 +221,7 @@ agx_emit_cached_split(agx_builder *b, agx_index vec, unsigned n)
{
agx_index dests[4] = { agx_null(), agx_null(), agx_null(), agx_null() };
agx_emit_split(b, dests, vec, n);
agx_cache_combine(b, vec, n, dests);
agx_cache_collect(b, vec, n, dests);
}
static void
@@ -359,7 +359,7 @@ agx_emit_load_attr(agx_builder *b, agx_index dest, nir_intrinsic_instr *instr)
for (unsigned i = actual_comps; i < instr->num_components; ++i)
dests[i] = default_value[i];
agx_emit_combine_to(b, dest, instr->num_components, dests);
agx_emit_collect_to(b, dest, instr->num_components, dests);
}
static void
@@ -393,7 +393,7 @@ agx_emit_load_vary_flat(agx_builder *b, agx_index dest, nir_intrinsic_instr *ins
cf.value++;
}
agx_emit_combine_to(b, dest, components, dests);
agx_emit_collect_to(b, dest, components, dests);
}
static void
@@ -585,7 +585,7 @@ agx_emit_load_frag_coord(agx_builder *b, agx_index dst, nir_intrinsic_instr *ins
}
}
agx_emit_combine_to(b, dst, 4, dests);
agx_emit_collect_to(b, dst, 4, dests);
}
static agx_instr *
@@ -970,7 +970,7 @@ agx_emit_alu(agx_builder *b, nir_alu_instr *instr)
case nir_op_vec4:
{
agx_index idx[] = { s0, s1, s2, s3 };
return agx_emit_combine_to(b, dst, srcs, idx);
return agx_emit_collect_to(b, dst, srcs, idx);
}
case nir_op_vec8:
@@ -1099,7 +1099,7 @@ agx_emit_tex(agx_builder *b, nir_tex_instr *instr)
/* We explicitly don't cache about the split cache for this */
lod = agx_temp(b->shader, AGX_SIZE_32);
agx_instr *I = agx_combine_to(b, lod, 2 * n);
agx_instr *I = agx_collect_to(b, lod, 2 * n);
for (unsigned i = 0; i < n; ++i) {
I->src[(2 * i) + 0] = agx_emit_extract(b, index, i);

View File

@@ -265,7 +265,7 @@ op("or", _, srcs = 2)
# Indicates the logical end of the block, before final branches/control flow
op("logical_end", _, dests = 0, srcs = 0, can_eliminate = False)
op("combine", _, srcs = VARIABLE)
op("collect", _, srcs = VARIABLE)
op("split", _, srcs = 1, dests = VARIABLE)
op("phi", _, srcs = VARIABLE)

View File

@@ -196,7 +196,7 @@ agx_optimizer_forward(agx_context *ctx)
/* Inline immediates if we can. TODO: systematic */
if (I->op != AGX_OPCODE_ST_VARY && I->op != AGX_OPCODE_ST_TILE &&
I->op != AGX_OPCODE_COMBINE && I->op != AGX_OPCODE_TEXTURE_SAMPLE &&
I->op != AGX_OPCODE_COLLECT && I->op != AGX_OPCODE_TEXTURE_SAMPLE &&
I->op != AGX_OPCODE_TEXTURE_LOAD)
agx_optimizer_inline_imm(defs, I, info.nr_srcs, info.is_float);
}

View File

@@ -45,7 +45,7 @@ agx_write_registers(agx_instr *I, unsigned d)
case AGX_OPCODE_LDCF:
return 6;
case AGX_OPCODE_COMBINE:
case AGX_OPCODE_COLLECT:
return I->nr_srcs * size;
default:
return size;
@@ -306,7 +306,7 @@ agx_ra(agx_context *ctx)
/* Lower away RA pseudo-instructions */
agx_builder b = agx_init_builder(ctx, agx_after_instr(ins));
if (ins->op == AGX_OPCODE_COMBINE) {
if (ins->op == AGX_OPCODE_COLLECT) {
unsigned base = agx_index_to_reg(ssa_to_reg, ins->dest[0]);
unsigned width = agx_size_align_16(ins->dest[0].size);

View File

@@ -131,7 +131,7 @@ TEST_F(Optimizer, Copyprop)
TEST_F(Optimizer, InlineHazards)
{
NEGCASE32({
agx_instr *I = agx_combine_to(b, out, 4);
agx_instr *I = agx_collect_to(b, out, 4);
I->src[0] = agx_mov_imm(b, AGX_SIZE_32, 0);
I->src[1] = wy;
I->src[2] = wz;