pan/mdg: eliminate references to ins->alu.op
In an effort to simplify MIR by not prepacking instructions, this commit removes references to `ins->alu.op` so that we can later remove the `ins->alu` field from midgard_instruction. Every place that was using ins->op was changed to now use the generic `ins->op` field instead. We then reconstruct the `alu.op` field right before emission. This new field is generic and can contain opcodes for ALU, texture or load/store instructions. It should be used in conjunction with `ins->type`, just like the current prepacked `op` field. Signed-off-by: Italo Nicola <italonicola@collabora.com> Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5933>
This commit is contained in:
@@ -175,6 +175,9 @@ typedef struct midgard_instruction {
|
||||
unsigned nr_dependencies;
|
||||
BITSET_WORD *dependents;
|
||||
|
||||
/* Use this in conjunction with `type` */
|
||||
unsigned op;
|
||||
|
||||
union {
|
||||
midgard_load_store_word load_store;
|
||||
midgard_vector_alu alu;
|
||||
@@ -550,8 +553,8 @@ v_mov(unsigned src, unsigned dest)
|
||||
.swizzle = SWIZZLE_IDENTITY,
|
||||
.dest = dest,
|
||||
.dest_type = nir_type_uint32,
|
||||
.op = midgard_alu_op_imov,
|
||||
.alu = {
|
||||
.op = midgard_alu_op_imov,
|
||||
.reg_mode = midgard_reg_mode_32,
|
||||
.outmod = midgard_outmod_int_wrap
|
||||
},
|
||||
|
@@ -1220,7 +1220,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
|
||||
ins.mask = mask_of(nr_components);
|
||||
|
||||
midgard_vector_alu alu = {
|
||||
.op = op,
|
||||
.reg_mode = reg_mode,
|
||||
.outmod = outmod,
|
||||
};
|
||||
@@ -1234,6 +1233,8 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
|
||||
|
||||
ins.alu = alu;
|
||||
|
||||
ins.op = op;
|
||||
|
||||
/* Late fixup for emulated instructions */
|
||||
|
||||
if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
|
||||
@@ -2468,7 +2469,7 @@ embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
|
||||
* restrictions. So, if possible we try to flip the arguments
|
||||
* in that case */
|
||||
|
||||
int op = ins->alu.op;
|
||||
int op = ins->op;
|
||||
|
||||
if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
|
||||
alu_opcode_props[op].props & OP_COMMUTES) {
|
||||
@@ -2520,7 +2521,7 @@ embedded_to_inline_constant(compiler_context *ctx, midgard_block *block)
|
||||
uint32_t value = is_16 ? cons->u16[component] : cons->u32[component];
|
||||
|
||||
bool is_vector = false;
|
||||
unsigned mask = effective_writemask(ins->alu.op, ins->mask);
|
||||
unsigned mask = effective_writemask(ins->op, ins->mask);
|
||||
|
||||
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c) {
|
||||
/* We only care if this component is actually used */
|
||||
@@ -2580,8 +2581,8 @@ midgard_legalize_invert(compiler_context *ctx, midgard_block *block)
|
||||
mir_foreach_instr_in_block(block, ins) {
|
||||
if (ins->type != TAG_ALU_4) continue;
|
||||
|
||||
if (ins->alu.op != midgard_alu_op_iand &&
|
||||
ins->alu.op != midgard_alu_op_ior) continue;
|
||||
if (ins->op != midgard_alu_op_iand &&
|
||||
ins->op != midgard_alu_op_ior) continue;
|
||||
|
||||
if (ins->src_invert[1] || !ins->src_invert[0]) continue;
|
||||
|
||||
|
@@ -46,7 +46,7 @@ mir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
|
||||
static unsigned
|
||||
mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
|
||||
{
|
||||
bool integer = midgard_is_integer_op(ins->alu.op);
|
||||
bool integer = midgard_is_integer_op(ins->op);
|
||||
unsigned base_size = (8 << ins->alu.reg_mode);
|
||||
unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
|
||||
bool half = (sz == (base_size >> 1));
|
||||
@@ -278,7 +278,7 @@ mir_pack_swizzle(unsigned mask, unsigned *swizzle,
|
||||
static void
|
||||
mir_pack_vector_srcs(midgard_instruction *ins)
|
||||
{
|
||||
bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props);
|
||||
bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props);
|
||||
|
||||
midgard_reg_mode mode = ins->alu.reg_mode;
|
||||
unsigned base_size = (8 << mode);
|
||||
@@ -441,15 +441,15 @@ mir_lower_inverts(midgard_instruction *ins)
|
||||
ins->src_invert[2]
|
||||
};
|
||||
|
||||
switch (ins->alu.op) {
|
||||
switch (ins->op) {
|
||||
case midgard_alu_op_iand:
|
||||
/* a & ~b = iandnot(a, b) */
|
||||
/* ~a & ~b = ~(a | b) = inor(a, b) */
|
||||
|
||||
if (inv[0] && inv[1])
|
||||
ins->alu.op = midgard_alu_op_inor;
|
||||
ins->op = midgard_alu_op_inor;
|
||||
else if (inv[1])
|
||||
ins->alu.op = midgard_alu_op_iandnot;
|
||||
ins->op = midgard_alu_op_iandnot;
|
||||
|
||||
break;
|
||||
case midgard_alu_op_ior:
|
||||
@@ -457,9 +457,9 @@ mir_lower_inverts(midgard_instruction *ins)
|
||||
/* ~a | ~b = ~(a & b) = inand(a, b) */
|
||||
|
||||
if (inv[0] && inv[1])
|
||||
ins->alu.op = midgard_alu_op_inand;
|
||||
ins->op = midgard_alu_op_inand;
|
||||
else if (inv[1])
|
||||
ins->alu.op = midgard_alu_op_iornot;
|
||||
ins->op = midgard_alu_op_iornot;
|
||||
|
||||
break;
|
||||
|
||||
@@ -468,7 +468,7 @@ mir_lower_inverts(midgard_instruction *ins)
|
||||
/* ~a ^ ~b = a ^ b */
|
||||
|
||||
if (inv[0] ^ inv[1])
|
||||
ins->alu.op = midgard_alu_op_inxor;
|
||||
ins->op = midgard_alu_op_inxor;
|
||||
|
||||
break;
|
||||
|
||||
@@ -482,9 +482,9 @@ mir_lower_inverts(midgard_instruction *ins)
|
||||
static void
|
||||
mir_lower_roundmode(midgard_instruction *ins)
|
||||
{
|
||||
if (alu_opcode_props[ins->alu.op].props & MIDGARD_ROUNDS) {
|
||||
if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) {
|
||||
assert(ins->roundmode <= 0x3);
|
||||
ins->alu.op += ins->roundmode;
|
||||
ins->op += ins->roundmode;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -518,6 +518,8 @@ emit_alu_bundle(compiler_context *ctx,
|
||||
unsigned size = 0;
|
||||
void *source = NULL;
|
||||
|
||||
midgard_vector_alu source_alu;
|
||||
|
||||
/* In case we demote to a scalar */
|
||||
midgard_scalar_alu scalarized;
|
||||
|
||||
@@ -530,7 +532,9 @@ emit_alu_bundle(compiler_context *ctx,
|
||||
mir_pack_mask_alu(ins);
|
||||
mir_pack_vector_srcs(ins);
|
||||
size = sizeof(midgard_vector_alu);
|
||||
source = &ins->alu;
|
||||
source_alu = ins->alu;
|
||||
source_alu.op = ins->op;
|
||||
source = &source_alu;
|
||||
} else if (ins->unit == ALU_ENAB_BR_COMPACT) {
|
||||
size = sizeof(midgard_branch_cond);
|
||||
source = &ins->br_compact;
|
||||
@@ -539,7 +543,9 @@ emit_alu_bundle(compiler_context *ctx,
|
||||
source = &ins->branch_extended;
|
||||
} else {
|
||||
size = sizeof(midgard_scalar_alu);
|
||||
scalarized = vector_to_scalar_alu(ins->alu, ins);
|
||||
source_alu = ins->alu;
|
||||
source_alu.op = ins->op;
|
||||
scalarized = vector_to_scalar_alu(source_alu, ins);
|
||||
source = &scalarized;
|
||||
}
|
||||
|
||||
|
@@ -34,7 +34,7 @@ midgard_opt_copy_prop_reg(compiler_context *ctx, midgard_block *block)
|
||||
|
||||
mir_foreach_instr_in_block_safe(block, ins) {
|
||||
if (ins->type != TAG_ALU_4) continue;
|
||||
if (!OP_IS_MOVE(ins->alu.op)) continue;
|
||||
if (!OP_IS_MOVE(ins->op)) continue;
|
||||
if (ins->is_pack) continue;
|
||||
|
||||
unsigned from = ins->src[1];
|
||||
@@ -68,7 +68,7 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
|
||||
|
||||
mir_foreach_instr_in_block_safe(block, ins) {
|
||||
if (ins->type != TAG_ALU_4) continue;
|
||||
if (!OP_IS_MOVE(ins->alu.op)) continue;
|
||||
if (!OP_IS_MOVE(ins->op)) continue;
|
||||
if (ins->is_pack) continue;
|
||||
|
||||
unsigned from = ins->src[1];
|
||||
|
@@ -138,7 +138,7 @@ midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block)
|
||||
mir_foreach_instr_in_block_safe(block, ins) {
|
||||
if (ins->type != TAG_ALU_4) continue;
|
||||
if (ins->compact_branch) continue;
|
||||
if (!OP_IS_MOVE(ins->alu.op)) continue;
|
||||
if (!OP_IS_MOVE(ins->op)) continue;
|
||||
|
||||
/* Check if it's overwritten in this block before being read */
|
||||
bool overwritten = false;
|
||||
|
@@ -55,7 +55,7 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
|
||||
mir_foreach_instr_in_block_safe(block, ins) {
|
||||
/* First search for fmul */
|
||||
if (ins->type != TAG_ALU_4) continue;
|
||||
if (ins->alu.op != midgard_alu_op_fmul) continue;
|
||||
if (ins->op != midgard_alu_op_fmul) continue;
|
||||
|
||||
/* TODO: Flip */
|
||||
|
||||
@@ -83,7 +83,7 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
|
||||
|
||||
frcp_found =
|
||||
(sub->type == TAG_ALU_4) &&
|
||||
(sub->alu.op == midgard_alu_op_frcp);
|
||||
(sub->op == midgard_alu_op_frcp);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -244,7 +244,7 @@ mir_print_embedded_constant(midgard_instruction *ins, unsigned src_idx)
|
||||
src = vector_alu_from_unsigned(ins->alu.src2);
|
||||
|
||||
unsigned *swizzle = ins->swizzle[src_idx];
|
||||
unsigned comp_mask = effective_writemask(ins->alu.op, ins->mask);
|
||||
unsigned comp_mask = effective_writemask(ins->op, ins->mask);
|
||||
unsigned num_comp = util_bitcount(comp_mask);
|
||||
unsigned max_comp = mir_components_for_type(ins->dest_type);
|
||||
bool first = true;
|
||||
@@ -265,7 +265,7 @@ mir_print_embedded_constant(midgard_instruction *ins, unsigned src_idx)
|
||||
|
||||
mir_print_constant_component(stdout, &ins->constants,
|
||||
swizzle[comp], ins->alu.reg_mode,
|
||||
src.half, src.mod, ins->alu.op);
|
||||
src.half, src.mod, ins->op);
|
||||
}
|
||||
|
||||
if (num_comp > 1)
|
||||
@@ -328,7 +328,7 @@ mir_print_instruction(midgard_instruction *ins)
|
||||
|
||||
switch (ins->type) {
|
||||
case TAG_ALU_4: {
|
||||
midgard_alu_op op = ins->alu.op;
|
||||
midgard_alu_op op = ins->op;
|
||||
const char *name = alu_opcode_props[op].name;
|
||||
|
||||
if (ins->unit)
|
||||
|
@@ -492,7 +492,7 @@ allocate_registers(compiler_context *ctx, bool *spilled)
|
||||
* want to muck with the conditional itself, so just force
|
||||
* alignment for now */
|
||||
|
||||
if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op)) {
|
||||
if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->op)) {
|
||||
min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
|
||||
|
||||
/* LCRA assumes bound >= alignment */
|
||||
@@ -681,7 +681,7 @@ install_registers_instr(
|
||||
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
|
||||
|
||||
unsigned dest_offset =
|
||||
GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props) ? 0 :
|
||||
GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props) ? 0 :
|
||||
dest.offset;
|
||||
|
||||
offset_swizzle(ins->swizzle[0], src1.offset, src1.shift, dest.shift, dest_offset);
|
||||
|
@@ -520,7 +520,7 @@ mir_pipeline_count(midgard_instruction *ins)
|
||||
static bool
|
||||
mir_is_add_2(midgard_instruction *ins)
|
||||
{
|
||||
if (ins->alu.op != midgard_alu_op_fadd)
|
||||
if (ins->op != midgard_alu_op_fadd)
|
||||
return false;
|
||||
|
||||
if (ins->src[0] != ins->src[1])
|
||||
@@ -548,7 +548,7 @@ mir_adjust_unit(midgard_instruction *ins, unsigned unit)
|
||||
{
|
||||
/* FADD x, x = FMUL x, #2 */
|
||||
if (mir_is_add_2(ins) && (unit & (UNITS_MUL | UNIT_VLUT))) {
|
||||
ins->alu.op = midgard_alu_op_fmul;
|
||||
ins->op = midgard_alu_op_fmul;
|
||||
|
||||
ins->src[1] = ~0;
|
||||
ins->src_abs[1] = false;
|
||||
@@ -562,7 +562,7 @@ mir_adjust_unit(midgard_instruction *ins, unsigned unit)
|
||||
static unsigned
|
||||
mir_has_unit(midgard_instruction *ins, unsigned unit)
|
||||
{
|
||||
if (alu_opcode_props[ins->alu.op].props & unit)
|
||||
if (alu_opcode_props[ins->op].props & unit)
|
||||
return true;
|
||||
|
||||
/* FADD x, x can run on any adder or any multiplier */
|
||||
@@ -658,8 +658,8 @@ mir_choose_instruction(
|
||||
|
||||
BITSET_FOREACH_SET(i, worklist, count) {
|
||||
bool is_move = alu &&
|
||||
(instructions[i]->alu.op == midgard_alu_op_imov ||
|
||||
instructions[i]->alu.op == midgard_alu_op_fmov);
|
||||
(instructions[i]->op == midgard_alu_op_imov ||
|
||||
instructions[i]->op == midgard_alu_op_fmov);
|
||||
|
||||
if ((max_active - i) >= max_distance)
|
||||
continue;
|
||||
@@ -698,7 +698,7 @@ mir_choose_instruction(
|
||||
if (ldst && mir_pipeline_count(instructions[i]) + predicate->pipeline_count > 2)
|
||||
continue;
|
||||
|
||||
bool conditional = alu && !branch && OP_IS_CSEL(instructions[i]->alu.op);
|
||||
bool conditional = alu && !branch && OP_IS_CSEL(instructions[i]->op);
|
||||
conditional |= (branch && instructions[i]->branch.conditional);
|
||||
|
||||
if (conditional && no_cond)
|
||||
@@ -830,13 +830,13 @@ mir_comparison_mobile(
|
||||
return ~0;
|
||||
|
||||
/* If it would itself require a condition, that's recursive */
|
||||
if (OP_IS_CSEL(instructions[i]->alu.op))
|
||||
if (OP_IS_CSEL(instructions[i]->op))
|
||||
return ~0;
|
||||
|
||||
/* We'll need to rewrite to .w but that doesn't work for vector
|
||||
* ops that don't replicate (ball/bany), so bail there */
|
||||
|
||||
if (GET_CHANNEL_COUNT(alu_opcode_props[instructions[i]->alu.op].props))
|
||||
if (GET_CHANNEL_COUNT(alu_opcode_props[instructions[i]->op].props))
|
||||
return ~0;
|
||||
|
||||
/* Ensure it will fit with constants */
|
||||
@@ -908,7 +908,7 @@ mir_schedule_condition(compiler_context *ctx,
|
||||
unsigned condition_index = branch ? 0 : 2;
|
||||
|
||||
/* csel_v is vector; otherwise, conditions are scalar */
|
||||
bool vector = !branch && OP_IS_CSEL_V(last->alu.op);
|
||||
bool vector = !branch && OP_IS_CSEL_V(last->op);
|
||||
|
||||
/* Grab the conditional instruction */
|
||||
|
||||
@@ -1160,7 +1160,7 @@ mir_schedule_alu(
|
||||
*vadd = v_mov(~0, make_compiler_temp(ctx));
|
||||
|
||||
if (!ctx->is_blend) {
|
||||
vadd->alu.op = midgard_alu_op_iadd;
|
||||
vadd->op = midgard_alu_op_iadd;
|
||||
vadd->src[0] = SSA_FIXED_REGISTER(31);
|
||||
vadd->src_types[0] = nir_type_uint32;
|
||||
|
||||
@@ -1206,8 +1206,8 @@ mir_schedule_alu(
|
||||
mir_update_worklist(worklist, len, instructions, vadd);
|
||||
mir_update_worklist(worklist, len, instructions, smul);
|
||||
|
||||
bool vadd_csel = vadd && OP_IS_CSEL(vadd->alu.op);
|
||||
bool smul_csel = smul && OP_IS_CSEL(smul->alu.op);
|
||||
bool vadd_csel = vadd && OP_IS_CSEL(vadd->op);
|
||||
bool smul_csel = smul && OP_IS_CSEL(smul->op);
|
||||
|
||||
if (vadd_csel || smul_csel) {
|
||||
midgard_instruction *ins = vadd_csel ? vadd : smul;
|
||||
|
@@ -116,7 +116,7 @@ mir_single_use(compiler_context *ctx, unsigned value)
|
||||
bool
|
||||
mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
|
||||
{
|
||||
bool is_int = midgard_is_integer_op(ins->alu.op);
|
||||
bool is_int = midgard_is_integer_op(ins->op);
|
||||
|
||||
if (is_int) {
|
||||
if (ins->src_shift[i]) return true;
|
||||
@@ -140,7 +140,7 @@ mir_nontrivial_mod(midgard_instruction *ins, unsigned i, bool check_swizzle)
|
||||
bool
|
||||
mir_nontrivial_outmod(midgard_instruction *ins)
|
||||
{
|
||||
bool is_int = midgard_is_integer_op(ins->alu.op);
|
||||
bool is_int = midgard_is_integer_op(ins->op);
|
||||
unsigned mod = ins->alu.outmod;
|
||||
|
||||
if (ins->dest_type != ins->src_types[1])
|
||||
@@ -286,7 +286,7 @@ mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
|
||||
|
||||
/* Handle dot products and things */
|
||||
if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
|
||||
unsigned props = alu_opcode_props[ins->alu.op].props;
|
||||
unsigned props = alu_opcode_props[ins->op].props;
|
||||
|
||||
unsigned channel_override = GET_CHANNEL_COUNT(props);
|
||||
|
||||
@@ -335,7 +335,7 @@ mir_bundle_for_op(compiler_context *ctx, midgard_instruction ins)
|
||||
};
|
||||
|
||||
if (bundle.tag == TAG_ALU_4) {
|
||||
assert(OP_IS_MOVE(u->alu.op));
|
||||
assert(OP_IS_MOVE(u->op));
|
||||
u->unit = UNIT_VMUL;
|
||||
|
||||
size_t bytes_emitted = sizeof(uint32_t) + sizeof(midgard_reg_info) + sizeof(midgard_vector_alu);
|
||||
|
Reference in New Issue
Block a user