pan/midgard: Optimize comparisions with similar operations

Optimizes comparisions by removing the invert flag on operands
which we can prove to be equal without the invert.

Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Tested-by: Marge Bot <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3036>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3036>
This commit is contained in:
Afonso Bordado
2019-12-10 13:18:00 +00:00
committed by Marge Bot
parent 8e9e94d084
commit 3e1e4ad13d
4 changed files with 92 additions and 1 deletions

View File

@@ -121,7 +121,7 @@ typedef struct midgard_instruction {
uint16_t mask;
/* For ALU ops only: set to true to invert (bitwise NOT) the
* destination of an integer-out op. Not imeplemented in hardware but
* destination of an integer-out op. Not implemented in hardware but
* allows more optimizations */
bool invert;
@@ -704,5 +704,6 @@ bool midgard_opt_fuse_src_invert(compiler_context *ctx, midgard_block *block);
bool midgard_opt_fuse_dest_invert(compiler_context *ctx, midgard_block *block);
bool midgard_opt_csel_invert(compiler_context *ctx, midgard_block *block);
bool midgard_opt_promote_fmov(compiler_context *ctx, midgard_block *block);
bool midgard_opt_drop_cmp_invert(compiler_context *ctx, midgard_block *block);
#endif

View File

@@ -69,6 +69,19 @@
op == TEXTURE_OP_DFDY \
)
#define OP_IS_UNSIGNED_CMP(op) ( \
op == midgard_alu_op_ult || \
op == midgard_alu_op_ule \
)
#define OP_IS_INTEGER_CMP(op) ( \
op == midgard_alu_op_ieq || \
op == midgard_alu_op_ine || \
op == midgard_alu_op_ilt || \
op == midgard_alu_op_ile || \
OP_IS_UNSIGNED_CMP(op) \
)
/* ALU control words are single bit fields with a lot of space */
#define ALU_ENAB_VEC_MUL (1 << 17)

View File

@@ -2583,6 +2583,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl
progress |= midgard_opt_fuse_src_invert(ctx, block);
progress |= midgard_opt_fuse_dest_invert(ctx, block);
progress |= midgard_opt_csel_invert(ctx, block);
progress |= midgard_opt_drop_cmp_invert(ctx, block);
}
} while (progress);

View File

@@ -299,3 +299,79 @@ midgard_opt_csel_invert(compiler_context *ctx, midgard_block *block)
return progress;
}
static bool
mir_is_inverted(compiler_context *ctx, unsigned node)
{
mir_foreach_instr_global(ctx, ins) {
if (ins->compact_branch) continue;
if (ins->dest != node) continue;
return ins->invert;
}
unreachable("Invalid node passed");
}
/* Optimizes comparisions which invert both arguments
*
*
* ieq(not(a), not(b)) = ieq(a, b)
* ine(not(a), not(b)) = ine(a, b)
*
* This does apply for ilt and ile if we flip the argument order:
* Proofs below provided by Alyssa Rosenzweig
*
* not(x) = (x+1)
*
* ( not(A) <= not(B) ) <=> ( (A+1) <= (B+1) )
* <=> ( A+1 >= B+1)
* <=> ( B <= A )
*
* On unsigned comparisons (ult / ule) we can perform the same optimization
* with the additional restriction that the source registers must
* have the same size.
*
* TODO: We may not need them to be of the same size, if we can
* prove that they are the same after sext/zext
*
* not(x) = 2nx1
*
* ( not(A) <= not(B) ) <=> ( 2nA1 <= 2nB1 )
* <=> ( A <= B )
* <=> ( B <= A )
*/
bool
midgard_opt_drop_cmp_invert(compiler_context *ctx, midgard_block *block)
{
bool progress = false;
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
if (!OP_IS_INTEGER_CMP(ins->alu.op)) continue;
if ((ins->src[0] & IS_REG) || (ins->src[1] & IS_REG)) continue;
if (!mir_single_use(ctx, ins->src[0]) || !mir_single_use(ctx, ins->src[1])) continue;
bool a_inverted = mir_is_inverted(ctx, ins->src[0]);
bool b_inverted = mir_is_inverted(ctx, ins->src[1]);
if (!a_inverted || !b_inverted) continue;
if (OP_IS_UNSIGNED_CMP(ins->alu.op) && mir_srcsize(ins, 0) != mir_srcsize(ins, 1)) continue;
mir_strip_inverted(ctx, ins->src[0]);
mir_strip_inverted(ctx, ins->src[1]);
if (ins->alu.op != midgard_alu_op_ieq && ins->alu.op != midgard_alu_op_ine)
mir_flip(ins);
progress |= true;
}
return progress;
}