agx: Translate phi nodes

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16268>
This commit is contained in:
Alyssa Rosenzweig
2022-04-12 21:40:23 -04:00
parent 4791dc9125
commit 8ff0a29baf
2 changed files with 72 additions and 1 deletions

View File

@@ -1036,6 +1036,55 @@ agx_emit_jump(agx_builder *b, nir_jump_instr *instr)
ctx->current_block->unconditional_jumps = true; ctx->current_block->unconditional_jumps = true;
} }
static void
agx_emit_phi(agx_builder *b, nir_phi_instr *instr)
{
agx_instr *I = agx_phi_to(b, agx_dest_index(&instr->dest));
/* Deferred */
I->phi = instr;
}
/* Look up the AGX block corresponding to a given NIR block. Used when
* translating phi nodes after emitting all blocks.
*/
static agx_block *
agx_from_nir_block(agx_context *ctx, nir_block *block)
{
return ctx->indexed_nir_blocks[block->index];
}
static void
agx_emit_phi_deferred(agx_context *ctx, agx_block *block, agx_instr *I)
{
nir_phi_instr *phi = I->phi;
/* Guaranteed by lower_phis_to_scalar */
assert(phi->dest.ssa.num_components == 1);
I->nr_srcs = exec_list_length(&phi->srcs);
I->src = rzalloc_array(I, agx_index, I->nr_srcs);
nir_foreach_phi_src(src, phi) {
agx_block *pred = agx_from_nir_block(ctx, src->pred);
unsigned i = agx_predecessor_index(block, pred);
assert(i < I->nr_srcs);
I->src[i] = agx_src_index(&src->src);
}
}
static void
agx_emit_phis_deferred(agx_context *ctx)
{
agx_foreach_block(ctx, block) {
agx_foreach_instr_in_block(block, I) {
if (I->op == AGX_OPCODE_PHI)
agx_emit_phi_deferred(ctx, block, I);
}
}
}
static void static void
agx_emit_instr(agx_builder *b, struct nir_instr *instr) agx_emit_instr(agx_builder *b, struct nir_instr *instr)
{ {
@@ -1060,6 +1109,10 @@ agx_emit_instr(agx_builder *b, struct nir_instr *instr)
agx_emit_jump(b, nir_instr_as_jump(instr)); agx_emit_jump(b, nir_instr_as_jump(instr));
break; break;
case nir_instr_type_phi:
agx_emit_phi(b, nir_instr_as_phi(instr));
break;
default: default:
unreachable("should've been lowered"); unreachable("should've been lowered");
} }
@@ -1089,6 +1142,8 @@ emit_block(agx_context *ctx, nir_block *block)
list_addtail(&blk->link, &ctx->blocks); list_addtail(&blk->link, &ctx->blocks);
list_inithead(&blk->instructions); list_inithead(&blk->instructions);
ctx->indexed_nir_blocks[block->index] = blk;
agx_builder _b = agx_init_builder(ctx, agx_after_block(blk)); agx_builder _b = agx_init_builder(ctx, agx_after_block(blk));
nir_foreach_instr(instr, block) { nir_foreach_instr(instr, block) {
@@ -1677,6 +1732,11 @@ agx_compile_shader_nir(nir_shader *nir,
if (!func->impl) if (!func->impl)
continue; continue;
nir_index_blocks(func->impl);
ctx->indexed_nir_blocks =
rzalloc_array(ctx, agx_block *, func->impl->num_blocks);
/* TODO: Handle phi nodes instead of just convert_from_ssa and yolo'ing /* TODO: Handle phi nodes instead of just convert_from_ssa and yolo'ing
* the mapping of nir_register to hardware registers and guaranteeing bad * the mapping of nir_register to hardware registers and guaranteeing bad
* performance and breaking spilling... */ * performance and breaking spilling... */
@@ -1699,6 +1759,7 @@ agx_compile_shader_nir(nir_shader *nir,
ctx->max_register = nir_regalloc; ctx->max_register = nir_regalloc;
ctx->alloc += func->impl->ssa_alloc; ctx->alloc += func->impl->ssa_alloc;
emit_cf_list(ctx, &func->impl->body); emit_cf_list(ctx, &func->impl->body);
agx_emit_phis_deferred(ctx);
break; /* TODO: Multi-function shaders */ break; /* TODO: Multi-function shaders */
} }

View File

@@ -286,7 +286,16 @@ struct agx_block;
typedef struct { typedef struct {
/* Must be first */ /* Must be first */
struct list_head link; struct list_head link;
agx_index *src;
/* The sources list.
*
* As a special case to workaround ordering issues when translating phis, if
* nr_srcs == 0 and the opcode is PHI, holds a pointer to the NIR phi node.
*/
union {
agx_index *src;
nir_phi_instr *phi;
};
enum agx_opcode op; enum agx_opcode op;
@@ -414,6 +423,7 @@ typedef struct {
agx_block *continue_block; agx_block *continue_block;
agx_block *break_block; agx_block *break_block;
agx_block *after_block; agx_block *after_block;
agx_block **indexed_nir_blocks;
/* During instruction selection, map from vector agx_index to its scalar /* During instruction selection, map from vector agx_index to its scalar
* components, populated by a split. */ * components, populated by a split. */