agx: Use pseudo ops for mov/not/and/xor/or

Rather than using builder magic (implicitly lowered on emit), add actual pseudo
operations (explicitly lowered before encoding). In theory this is slower, I
doubt it matters. This makes the instruction aliases first-class for IR prining
and machine inspection, which will make optimization passes easier to write.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16268>
This commit is contained in:
Alyssa Rosenzweig
2022-04-12 18:06:16 -04:00
parent 3d8c2f2693
commit 7d38bcb7ee
7 changed files with 142 additions and 36 deletions

View File

@@ -121,30 +121,6 @@ enum agx_bitop_table {
AGX_BITOP_OR = 0xE AGX_BITOP_OR = 0xE
}; };
#define UNOP_BITOP(name, table) \
static inline agx_instr * \
agx_## name ##_to(agx_builder *b, agx_index dst0, agx_index src0) \
{ \
return agx_bitop_to(b, dst0, src0, agx_zero(), AGX_BITOP_ ## table); \
}
#define BINOP_BITOP(name, table) \
static inline agx_instr * \
agx_## name ##_to(agx_builder *b, agx_index dst0, agx_index src0, agx_index src1) \
{ \
return agx_bitop_to(b, dst0, src0, src1, AGX_BITOP_ ## table); \
}
UNOP_BITOP(mov, MOV)
UNOP_BITOP(not, NOT)
BINOP_BITOP(and, AND)
BINOP_BITOP(xor, XOR)
BINOP_BITOP(or, OR)
#undef UNOP_BITOP
#undef BINOP_BITOP
static inline agx_instr * static inline agx_instr *
agx_fmov_to(agx_builder *b, agx_index dst0, agx_index src0) agx_fmov_to(agx_builder *b, agx_index dst0, agx_index src0)
{ {
@@ -171,14 +147,6 @@ agx_ushr(agx_builder *b, agx_index s0, agx_index s1)
return tmp; return tmp;
} }
static inline agx_index
agx_mov(agx_builder *b, enum agx_size size, agx_index s0)
{
agx_index tmp = agx_temp(b->shader, size);
agx_mov_to(b, tmp, s0);
return tmp;
}
#endif #endif
""" """

View File

@@ -846,12 +846,16 @@ agx_emit_tex(agx_builder *b, nir_tex_instr *instr)
layer = agx_convert(b, agx_immediate(AGX_CONVERT_F_TO_U32), layer, layer = agx_convert(b, agx_immediate(AGX_CONVERT_F_TO_U32), layer,
AGX_ROUND_RTZ); AGX_ROUND_RTZ);
layer = agx_mov(b, AGX_SIZE_16, layer);
layer = agx_icmpsel(b, layer, d1, layer, d1, AGX_ICOND_ULT); agx_index layer16 = agx_temp(b->shader, AGX_SIZE_16);
layer = agx_mov(b, AGX_SIZE_32, layer); agx_mov_to(b, layer16, layer);
channels[nr - 1] = layer; layer = agx_icmpsel(b, layer16, d1, layer16, d1, AGX_ICOND_ULT);
agx_index layer32 = agx_temp(b->shader, AGX_SIZE_32);
agx_mov_to(b, layer32, layer);
channels[nr - 1] = layer32;
coords = agx_p_combine(b, channels[0], channels[1], channels[2], channels[3]); coords = agx_p_combine(b, channels[0], channels[1], channels[2], channels[3]);
} else { } else {
coords = index; coords = index;
@@ -1641,6 +1645,8 @@ agx_compile_shader_nir(nir_shader *nir,
if (agx_debug & AGX_DBG_SHADERS && !skip_internal) if (agx_debug & AGX_DBG_SHADERS && !skip_internal)
agx_print_shader(ctx, stdout); agx_print_shader(ctx, stdout);
agx_lower_pseudo(ctx);
agx_pack_binary(ctx, binary); agx_pack_binary(ctx, binary);
if ((agx_debug & AGX_DBG_SHADERDB) && !skip_internal) if ((agx_debug & AGX_DBG_SHADERDB) && !skip_internal)

View File

@@ -661,6 +661,7 @@ void agx_print_instr(agx_instr *I, FILE *fp);
void agx_print_block(agx_block *block, FILE *fp); void agx_print_block(agx_block *block, FILE *fp);
void agx_print_shader(agx_context *ctx, FILE *fp); void agx_print_shader(agx_context *ctx, FILE *fp);
void agx_optimizer(agx_context *ctx); void agx_optimizer(agx_context *ctx);
void agx_lower_pseudo(agx_context *ctx);
void agx_dce(agx_context *ctx); void agx_dce(agx_context *ctx);
void agx_ra(agx_context *ctx); void agx_ra(agx_context *ctx);
void agx_pack_binary(agx_context *ctx, struct util_dynarray *emission); void agx_pack_binary(agx_context *ctx, struct util_dynarray *emission);

View File

@@ -0,0 +1,61 @@
/*
* Copyright (C) 2022 Alyssa Rosenzweig <alyssa@rosenzweig.io>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "agx_compiler.h"
#include "agx_builder.h"
/* Lower pseudo instructions created during optimization. */
static void
agx_lower_to_unary_bitop(agx_instr *I, enum agx_bitop_table table)
{
I->op = AGX_OPCODE_BITOP;
I->src[1] = agx_zero();
I->truth_table = table;
}
static void
agx_lower_to_binary_bitop(agx_instr *I, enum agx_bitop_table table)
{
I->op = AGX_OPCODE_BITOP;
I->truth_table = table;
}
void
agx_lower_pseudo(agx_context *ctx)
{
agx_foreach_instr_global(ctx, I) {
switch (I->op) {
/* Various instructions are implemented as bitwise truth tables */
case AGX_OPCODE_MOV: agx_lower_to_unary_bitop(I, AGX_BITOP_MOV); break;
case AGX_OPCODE_NOT: agx_lower_to_unary_bitop(I, AGX_BITOP_NOT); break;
case AGX_OPCODE_AND: agx_lower_to_binary_bitop(I, AGX_BITOP_AND); break;
case AGX_OPCODE_XOR: agx_lower_to_binary_bitop(I, AGX_BITOP_XOR); break;
case AGX_OPCODE_OR: agx_lower_to_binary_bitop(I, AGX_BITOP_OR); break;
default:
break;
}
}
}

View File

@@ -245,5 +245,12 @@ op("stop", (0x88, 0xFFFF, 2, _), dests = 0, can_eliminate = False)
op("trap", (0x08, 0xFFFF, 2, _), dests = 0, can_eliminate = False) op("trap", (0x08, 0xFFFF, 2, _), dests = 0, can_eliminate = False)
op("writeout", (0x48, 0xFF, 4, _), dests = 0, imms = [WRITEOUT], can_eliminate = False) op("writeout", (0x48, 0xFF, 4, _), dests = 0, imms = [WRITEOUT], can_eliminate = False)
# Convenient aliases.
op("mov", _, srcs = 1)
op("not", _, srcs = 1)
op("xor", _, srcs = 2)
op("and", _, srcs = 2)
op("or", _, srcs = 2)
op("p_combine", _, srcs = 4) op("p_combine", _, srcs = 4)
op("p_extract", _, srcs = 1, imms = [COMPONENT]) op("p_extract", _, srcs = 1, imms = [COMPONENT])

View File

@@ -23,6 +23,7 @@ libasahi_agx_files = files(
'agx_compile.c', 'agx_compile.c',
'agx_dce.c', 'agx_dce.c',
'agx_liveness.c', 'agx_liveness.c',
'agx_lower_pseudo.c',
'agx_pack.c', 'agx_pack.c',
'agx_print.c', 'agx_print.c',
'agx_optimizer.c', 'agx_optimizer.c',
@@ -84,6 +85,7 @@ if with_tests
'agx_tests', 'agx_tests',
files( files(
'test/test-optimizer.cpp', 'test/test-optimizer.cpp',
'test/test-lower-pseudo.cpp',
), ),
c_args : [c_msvc_compat_args, no_override_init_args], c_args : [c_msvc_compat_args, no_override_init_args],
gnu_symbol_visibility : 'hidden', gnu_symbol_visibility : 'hidden',

View File

@@ -0,0 +1,61 @@
/*
* Copyright (C) 2021 Collabora, Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "agx_test.h"
#include <gtest/gtest.h>
#define CASE(instr, expected) INSTRUCTION_CASE(instr, expected, agx_lower_pseudo)
#define NEGCASE(instr) CASE(instr, instr)
class LowerPseudo : public testing::Test {
protected:
LowerPseudo() {
mem_ctx = ralloc_context(NULL);
wx = agx_register(0, AGX_SIZE_32);
wy = agx_register(2, AGX_SIZE_32);
wz = agx_register(4, AGX_SIZE_32);
}
~LowerPseudo() {
ralloc_free(mem_ctx);
}
void *mem_ctx;
agx_index wx, wy, wz;
};
TEST_F(LowerPseudo, Move) {
CASE(agx_mov_to(b, wx, wy), agx_bitop_to(b, wx, wy, agx_zero(), 0xA));
}
TEST_F(LowerPseudo, Not) {
CASE(agx_not_to(b, wx, wy), agx_bitop_to(b, wx, wy, agx_zero(), 0x5));
}
TEST_F(LowerPseudo, BinaryBitwise) {
CASE(agx_and_to(b, wx, wy, wz), agx_bitop_to(b, wx, wy, wz, 0x8));
CASE(agx_xor_to(b, wx, wy, wz), agx_bitop_to(b, wx, wy, wz, 0x6));
CASE(agx_or_to(b, wx, wy, wz), agx_bitop_to(b, wx, wy, wz, 0xE));
}