agx: Ensure that the optimizer sees legitimate SSA

Expecting it to keep around unused definitions around is wishful. Add an
"anchoring" unit_test instruction to consume the results so they don't
have to be precoloured registers.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18687>
This commit is contained in:
Alyssa Rosenzweig
2022-09-10 17:31:07 -04:00
committed by Marge Bot
parent 52467c2d1e
commit 640fd089a2
2 changed files with 50 additions and 34 deletions

View File

@@ -272,3 +272,5 @@ op("p_split", _, srcs = 1, dests = 4)
# Phis are special-cased in the IR as they (uniquely) can take an unbounded
# number of source.
op("phi", _, srcs = 0)
op("unit_test", _, dests = 0, srcs = 1, can_eliminate = False)

View File

@@ -32,8 +32,20 @@ agx_optimize_and_dce(agx_context *ctx)
agx_dce(ctx);
}
#define CASE(instr, expected) INSTRUCTION_CASE(instr, expected, agx_optimize_and_dce)
#define NEGCASE(instr) CASE(instr, instr)
#define CASE(instr, expected, size) INSTRUCTION_CASE(\
{ UNUSED agx_index out = agx_temp(b->shader, AGX_SIZE_ ## size); \
instr; agx_unit_test(b, out); }, \
{ UNUSED agx_index out = agx_temp(b->shader, AGX_SIZE_ ## size); \
expected; agx_unit_test(b, out); }, \
agx_optimize_and_dce)
#define NEGCASE(instr, size) CASE(instr, instr, size)
#define CASE16(instr, expected) CASE(instr, expected, 16)
#define CASE32(instr, expected) CASE(instr, expected, 32)
#define NEGCASE16(instr) NEGCASE(instr, 16)
#define NEGCASE32(instr) NEGCASE(instr, 32)
static inline agx_index
agx_fmov(agx_builder *b, agx_index s0)
@@ -53,6 +65,8 @@ protected:
wz = agx_register(4, AGX_SIZE_32);
hx = agx_register(0, AGX_SIZE_16);
hy = agx_register(1, AGX_SIZE_16);
hz = agx_register(2, AGX_SIZE_16);
}
~Optimizer() {
@@ -61,63 +75,63 @@ protected:
void *mem_ctx;
agx_index wx, wy, wz, hx;
agx_index wx, wy, wz, hx, hy, hz;
};
TEST_F(Optimizer, FloatCopyprop)
{
CASE(agx_fadd_to(b, wz, agx_abs(agx_fmov(b, wx)), wy),
agx_fadd_to(b, wz, agx_abs(wx), wy));
CASE32(agx_fadd_to(b, out, agx_abs(agx_fmov(b, wx)), wy),
agx_fadd_to(b, out, agx_abs(wx), wy));
CASE(agx_fadd_to(b, wz, agx_neg(agx_fmov(b, wx)), wy),
agx_fadd_to(b, wz, agx_neg(wx), wy));
CASE32(agx_fadd_to(b, out, agx_neg(agx_fmov(b, wx)), wy),
agx_fadd_to(b, out, agx_neg(wx), wy));
}
TEST_F(Optimizer, FusedFABSNEG)
{
CASE(agx_fadd_to(b, wz, agx_fmov(b, agx_abs(wx)), wy),
agx_fadd_to(b, wz, agx_abs(wx), wy));
CASE32(agx_fadd_to(b, out, agx_fmov(b, agx_abs(wx)), wy),
agx_fadd_to(b, out, agx_abs(wx), wy));
CASE(agx_fmul_to(b, wz, wx, agx_fmov(b, agx_neg(agx_abs(wx)))),
agx_fmul_to(b, wz, wx, agx_neg(agx_abs(wx))));
CASE32(agx_fmul_to(b, out, wx, agx_fmov(b, agx_neg(agx_abs(wx)))),
agx_fmul_to(b, out, wx, agx_neg(agx_abs(wx))));
}
TEST_F(Optimizer, FusedFabsAbsorb)
{
CASE(agx_fadd_to(b, wz, agx_abs(agx_fmov(b, agx_abs(wx))), wy),
agx_fadd_to(b, wz, agx_abs(wx), wy));
CASE32(agx_fadd_to(b, out, agx_abs(agx_fmov(b, agx_abs(wx))), wy),
agx_fadd_to(b, out, agx_abs(wx), wy));
}
TEST_F(Optimizer, FusedFnegCancel)
{
CASE(agx_fmul_to(b, wz, wx, agx_neg(agx_fmov(b, agx_neg(wx)))),
agx_fmul_to(b, wz, wx, wx));
CASE32(agx_fmul_to(b, out, wx, agx_neg(agx_fmov(b, agx_neg(wx)))),
agx_fmul_to(b, out, wx, wx));
CASE(agx_fmul_to(b, wz, wx, agx_neg(agx_fmov(b, agx_neg(agx_abs(wx))))),
agx_fmul_to(b, wz, wx, agx_abs(wx)));
CASE32(agx_fmul_to(b, out, wx, agx_neg(agx_fmov(b, agx_neg(agx_abs(wx))))),
agx_fmul_to(b, out, wx, agx_abs(wx)));
}
TEST_F(Optimizer, FmulFsatF2F16)
{
CASE({
CASE16({
agx_index tmp = agx_temp(b->shader, AGX_SIZE_32);
agx_fmov_to(b, tmp, agx_fmul(b, wx, wy))->saturate = true;
agx_fmov_to(b, hx, tmp);
agx_fmov_to(b, out, tmp);
}, {
agx_fmul_to(b, hx, wx, wy)->saturate = true;
agx_fmul_to(b, out, wx, wy)->saturate = true;
});
}
TEST_F(Optimizer, Copyprop)
{
CASE(agx_fmul_to(b, wz, wx, agx_mov(b, wy)), agx_fmul_to(b, wz, wx, wy));
CASE(agx_fmul_to(b, wz, agx_mov(b, wx), agx_mov(b, wy)), agx_fmul_to(b, wz, wx, wy));
CASE32(agx_fmul_to(b, out, wx, agx_mov(b, wy)), agx_fmul_to(b, out, wx, wy));
CASE32(agx_fmul_to(b, out, agx_mov(b, wx), agx_mov(b, wy)), agx_fmul_to(b, out, wx, wy));
}
TEST_F(Optimizer, InlineHazards)
{
NEGCASE({
agx_instr *I = agx_p_combine_to(b, wx, 4);
NEGCASE32({
agx_instr *I = agx_p_combine_to(b, out, 4);
I->src[0] = agx_mov_imm(b, AGX_SIZE_32, 0);
I->src[1] = wy;
I->src[2] = wz;
@@ -127,27 +141,27 @@ TEST_F(Optimizer, InlineHazards)
TEST_F(Optimizer, CopypropRespectsAbsNeg)
{
CASE(agx_fadd_to(b, wz, agx_abs(agx_mov(b, wx)), wy),
agx_fadd_to(b, wz, agx_abs(wx), wy));
CASE32(agx_fadd_to(b, out, agx_abs(agx_mov(b, wx)), wy),
agx_fadd_to(b, out, agx_abs(wx), wy));
CASE(agx_fadd_to(b, wz, agx_neg(agx_mov(b, wx)), wy),
agx_fadd_to(b, wz, agx_neg(wx), wy));
CASE32(agx_fadd_to(b, out, agx_neg(agx_mov(b, wx)), wy),
agx_fadd_to(b, out, agx_neg(wx), wy));
CASE(agx_fadd_to(b, wz, agx_neg(agx_abs(agx_mov(b, wx))), wy),
agx_fadd_to(b, wz, agx_neg(agx_abs(wx)), wy));
CASE32(agx_fadd_to(b, out, agx_neg(agx_abs(agx_mov(b, wx))), wy),
agx_fadd_to(b, out, agx_neg(agx_abs(wx)), wy));
}
TEST_F(Optimizer, IntCopyprop)
{
CASE(agx_xor_to(b, wz, agx_mov(b, wx), wy),
agx_xor_to(b, wz, wx, wy));
CASE32(agx_xor_to(b, out, agx_mov(b, wx), wy),
agx_xor_to(b, out, wx, wy));
}
TEST_F(Optimizer, IntCopypropDoesntConvert)
{
NEGCASE({
NEGCASE32({
agx_index cvt = agx_temp(b->shader, AGX_SIZE_32);
agx_mov_to(b, cvt, hx);
agx_xor_to(b, wz, cvt, wy);
agx_xor_to(b, out, cvt, wy);
});
}