nir,ntt,a2xx,lima: Stop using nir_dest directly
We want to get rid of nir_dest so back-ends need to stop storing it in structs and passing it through helpers. Acked-by: Emma Anholt <emma@anholt.net> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24674>
This commit is contained in:

committed by
Marge Bot

parent
b30da1b281
commit
9b4677981f
@@ -122,9 +122,9 @@ nir_legacy_chase_alu_src(const nir_alu_src *src, bool fuse_fabs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static nir_legacy_alu_dest
|
static nir_legacy_alu_dest
|
||||||
chase_alu_dest_helper(nir_dest *dest)
|
chase_alu_dest_helper(nir_def *def)
|
||||||
{
|
{
|
||||||
nir_intrinsic_instr *store = nir_store_reg_for_def(&dest->ssa);
|
nir_intrinsic_instr *store = nir_store_reg_for_def(def);
|
||||||
|
|
||||||
if (store) {
|
if (store) {
|
||||||
bool indirect = (store->intrinsic == nir_intrinsic_store_reg_indirect);
|
bool indirect = (store->intrinsic == nir_intrinsic_store_reg_indirect);
|
||||||
@@ -141,8 +141,8 @@ chase_alu_dest_helper(nir_dest *dest)
|
|||||||
} else {
|
} else {
|
||||||
return (nir_legacy_alu_dest){
|
return (nir_legacy_alu_dest){
|
||||||
.dest.is_ssa = true,
|
.dest.is_ssa = true,
|
||||||
.dest.ssa = &dest->ssa,
|
.dest.ssa = def,
|
||||||
.write_mask = nir_component_mask(dest->ssa.num_components),
|
.write_mask = nir_component_mask(def->num_components),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -218,10 +218,8 @@ chase_fsat(nir_def **def)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nir_legacy_alu_dest
|
nir_legacy_alu_dest
|
||||||
nir_legacy_chase_alu_dest(nir_dest *dest)
|
nir_legacy_chase_alu_dest(nir_def *def)
|
||||||
{
|
{
|
||||||
nir_def *def = &dest->ssa;
|
|
||||||
|
|
||||||
/* Try SSA fsat. No users support 64-bit modifiers. */
|
/* Try SSA fsat. No users support 64-bit modifiers. */
|
||||||
if (chase_fsat(&def)) {
|
if (chase_fsat(&def)) {
|
||||||
return (nir_legacy_alu_dest){
|
return (nir_legacy_alu_dest){
|
||||||
@@ -231,7 +229,7 @@ nir_legacy_chase_alu_dest(nir_dest *dest)
|
|||||||
.write_mask = nir_component_mask(def->num_components),
|
.write_mask = nir_component_mask(def->num_components),
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
return chase_alu_dest_helper(dest);
|
return chase_alu_dest_helper(def);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,12 +242,11 @@ nir_legacy_chase_src(const nir_src *src)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nir_legacy_dest
|
nir_legacy_dest
|
||||||
nir_legacy_chase_dest(nir_dest *dest)
|
nir_legacy_chase_dest(nir_def *def)
|
||||||
{
|
{
|
||||||
nir_legacy_alu_dest alu_dest = chase_alu_dest_helper(dest);
|
nir_legacy_alu_dest alu_dest = chase_alu_dest_helper(def);
|
||||||
assert(!alu_dest.fsat);
|
assert(!alu_dest.fsat);
|
||||||
assert(alu_dest.write_mask ==
|
assert(alu_dest.write_mask == nir_component_mask(def->num_components));
|
||||||
nir_component_mask(nir_dest_num_components(*dest)));
|
|
||||||
|
|
||||||
return alu_dest.dest;
|
return alu_dest.dest;
|
||||||
}
|
}
|
||||||
@@ -310,7 +307,7 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nir_legacy_alu_dest dest = nir_legacy_chase_alu_dest(&alu->dest.dest);
|
nir_legacy_alu_dest dest = nir_legacy_chase_alu_dest(&alu->dest.dest.ssa);
|
||||||
if (dest.fsat) {
|
if (dest.fsat) {
|
||||||
nir_intrinsic_instr *store = nir_store_reg_for_def(dest.dest.ssa);
|
nir_intrinsic_instr *store = nir_store_reg_for_def(dest.dest.ssa);
|
||||||
|
|
||||||
|
@@ -75,12 +75,12 @@ void nir_legacy_trivialize(nir_shader *s, bool fuse_fabs);
|
|||||||
|
|
||||||
/* Reconstruct a legacy source/destination (including registers) */
|
/* Reconstruct a legacy source/destination (including registers) */
|
||||||
nir_legacy_src nir_legacy_chase_src(const nir_src *src);
|
nir_legacy_src nir_legacy_chase_src(const nir_src *src);
|
||||||
nir_legacy_dest nir_legacy_chase_dest(nir_dest *dest);
|
nir_legacy_dest nir_legacy_chase_dest(nir_def *def);
|
||||||
|
|
||||||
/* Reconstruct a legacy ALU source/destination (including float modifiers) */
|
/* Reconstruct a legacy ALU source/destination (including float modifiers) */
|
||||||
nir_legacy_alu_src nir_legacy_chase_alu_src(const nir_alu_src *src,
|
nir_legacy_alu_src nir_legacy_chase_alu_src(const nir_alu_src *src,
|
||||||
bool fuse_fabs);
|
bool fuse_fabs);
|
||||||
nir_legacy_alu_dest nir_legacy_chase_alu_dest(nir_dest *dest);
|
nir_legacy_alu_dest nir_legacy_chase_alu_dest(nir_def *def);
|
||||||
|
|
||||||
/* Check if a source modifier folds. If so, it may be skipped during instruction
|
/* Check if a source modifier folds. If so, it may be skipped during instruction
|
||||||
* selection, avoiding the need for backend dead code elimination.
|
* selection, avoiding the need for backend dead code elimination.
|
||||||
|
@@ -1352,16 +1352,16 @@ ntt_get_chased_dest(struct ntt_compile *c, nir_legacy_dest *dest)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct ureg_dst
|
static struct ureg_dst
|
||||||
ntt_get_dest(struct ntt_compile *c, nir_dest *dest)
|
ntt_get_dest(struct ntt_compile *c, nir_def *def)
|
||||||
{
|
{
|
||||||
nir_legacy_dest chased = nir_legacy_chase_dest(dest);
|
nir_legacy_dest chased = nir_legacy_chase_dest(def);
|
||||||
return ntt_get_chased_dest(c, &chased);
|
return ntt_get_chased_dest(c, &chased);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ureg_dst
|
static struct ureg_dst
|
||||||
ntt_get_alu_dest(struct ntt_compile *c, nir_dest *dest)
|
ntt_get_alu_dest(struct ntt_compile *c, nir_def *def)
|
||||||
{
|
{
|
||||||
nir_legacy_alu_dest chased = nir_legacy_chase_alu_dest(dest);
|
nir_legacy_alu_dest chased = nir_legacy_chase_alu_dest(def);
|
||||||
struct ureg_dst dst = ntt_get_chased_dest(c, &chased.dest);
|
struct ureg_dst dst = ntt_get_chased_dest(c, &chased.dest);
|
||||||
|
|
||||||
if (chased.fsat)
|
if (chased.fsat)
|
||||||
@@ -1371,7 +1371,7 @@ ntt_get_alu_dest(struct ntt_compile *c, nir_dest *dest)
|
|||||||
if (chased.dest.is_ssa)
|
if (chased.dest.is_ssa)
|
||||||
return dst;
|
return dst;
|
||||||
|
|
||||||
int dst_64 = nir_dest_bit_size(*dest) == 64;
|
int dst_64 = def->bit_size == 64;
|
||||||
unsigned write_mask = chased.write_mask;
|
unsigned write_mask = chased.write_mask;
|
||||||
|
|
||||||
if (dst_64)
|
if (dst_64)
|
||||||
@@ -1401,9 +1401,9 @@ ntt_store_def(struct ntt_compile *c, nir_def *def, struct ureg_src src)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ntt_store(struct ntt_compile *c, nir_dest *dest, struct ureg_src src)
|
ntt_store(struct ntt_compile *c, nir_def *def, struct ureg_src src)
|
||||||
{
|
{
|
||||||
nir_legacy_dest chased = nir_legacy_chase_dest(dest);
|
nir_legacy_dest chased = nir_legacy_chase_dest(def);
|
||||||
|
|
||||||
if (chased.is_ssa)
|
if (chased.is_ssa)
|
||||||
ntt_store_def(c, chased.ssa, src);
|
ntt_store_def(c, chased.ssa, src);
|
||||||
@@ -1458,7 +1458,7 @@ ntt_emit_alu(struct ntt_compile *c, nir_alu_instr *instr)
|
|||||||
for (; i < ARRAY_SIZE(src); i++)
|
for (; i < ARRAY_SIZE(src); i++)
|
||||||
src[i] = ureg_src_undef();
|
src[i] = ureg_src_undef();
|
||||||
|
|
||||||
dst = ntt_get_alu_dest(c, &instr->dest.dest);
|
dst = ntt_get_alu_dest(c, &instr->dest.dest.ssa);
|
||||||
|
|
||||||
static enum tgsi_opcode op_map[][2] = {
|
static enum tgsi_opcode op_map[][2] = {
|
||||||
[nir_op_mov] = { TGSI_OPCODE_MOV, TGSI_OPCODE_MOV },
|
[nir_op_mov] = { TGSI_OPCODE_MOV, TGSI_OPCODE_MOV },
|
||||||
@@ -1923,14 +1923,14 @@ ntt_emit_load_ubo(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
src = ntt_shift_by_frac(src, start_component,
|
src = ntt_shift_by_frac(src, start_component,
|
||||||
instr->num_components * bit_size / 32);
|
instr->num_components * bit_size / 32);
|
||||||
|
|
||||||
ntt_store(c, &instr->dest, src);
|
ntt_store(c, &instr->dest.ssa, src);
|
||||||
} else {
|
} else {
|
||||||
/* PIPE_CAP_LOAD_CONSTBUF: Not necessarily vec4 aligned, emit a
|
/* PIPE_CAP_LOAD_CONSTBUF: Not necessarily vec4 aligned, emit a
|
||||||
* TGSI_OPCODE_LOAD instruction from the const file.
|
* TGSI_OPCODE_LOAD instruction from the const file.
|
||||||
*/
|
*/
|
||||||
struct ntt_insn *insn =
|
struct ntt_insn *insn =
|
||||||
ntt_insn(c, TGSI_OPCODE_LOAD,
|
ntt_insn(c, TGSI_OPCODE_LOAD,
|
||||||
ntt_get_dest(c, &instr->dest),
|
ntt_get_dest(c, &instr->dest.ssa),
|
||||||
src, ntt_get_src(c, instr->src[1]),
|
src, ntt_get_src(c, instr->src[1]),
|
||||||
ureg_src_undef(), ureg_src_undef());
|
ureg_src_undef(), ureg_src_undef());
|
||||||
insn->is_mem = true;
|
insn->is_mem = true;
|
||||||
@@ -2112,7 +2112,7 @@ ntt_emit_mem(struct ntt_compile *c, nir_intrinsic_instr *instr,
|
|||||||
write_mask = ntt_64bit_write_mask(write_mask);
|
write_mask = ntt_64bit_write_mask(write_mask);
|
||||||
dst = ureg_writemask(dst, write_mask);
|
dst = ureg_writemask(dst, write_mask);
|
||||||
} else {
|
} else {
|
||||||
dst = ntt_get_dest(c, &instr->dest);
|
dst = ntt_get_dest(c, &instr->dest.ssa);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ntt_insn *insn = ntt_insn(c, opcode, dst, src[0], src[1], src[2], src[3]);
|
struct ntt_insn *insn = ntt_insn(c, opcode, dst, src[0], src[1], src[2], src[3]);
|
||||||
@@ -2157,7 +2157,7 @@ ntt_emit_image_load_store(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
dst = ureg_dst(resource);
|
dst = ureg_dst(resource);
|
||||||
} else {
|
} else {
|
||||||
srcs[num_src++] = resource;
|
srcs[num_src++] = resource;
|
||||||
dst = ntt_get_dest(c, &instr->dest);
|
dst = ntt_get_dest(c, &instr->dest.ssa);
|
||||||
}
|
}
|
||||||
struct ureg_dst opcode_dst = dst;
|
struct ureg_dst opcode_dst = dst;
|
||||||
|
|
||||||
@@ -2269,13 +2269,13 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
switch (instr->intrinsic) {
|
switch (instr->intrinsic) {
|
||||||
case nir_intrinsic_load_input:
|
case nir_intrinsic_load_input:
|
||||||
input = ntt_ureg_src_indirect(c, input, instr->src[0], 0);
|
input = ntt_ureg_src_indirect(c, input, instr->src[0], 0);
|
||||||
ntt_store(c, &instr->dest, input);
|
ntt_store(c, &instr->dest.ssa, input);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_load_per_vertex_input:
|
case nir_intrinsic_load_per_vertex_input:
|
||||||
input = ntt_ureg_src_indirect(c, input, instr->src[1], 0);
|
input = ntt_ureg_src_indirect(c, input, instr->src[1], 0);
|
||||||
input = ntt_ureg_src_dimension_indirect(c, input, instr->src[0]);
|
input = ntt_ureg_src_dimension_indirect(c, input, instr->src[0]);
|
||||||
ntt_store(c, &instr->dest, input);
|
ntt_store(c, &instr->dest.ssa, input);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_load_interpolated_input: {
|
case nir_intrinsic_load_interpolated_input: {
|
||||||
@@ -2290,7 +2290,7 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
/* For these, we know that the barycentric load matches the
|
/* For these, we know that the barycentric load matches the
|
||||||
* interpolation on the input declaration, so we can use it directly.
|
* interpolation on the input declaration, so we can use it directly.
|
||||||
*/
|
*/
|
||||||
ntt_store(c, &instr->dest, input);
|
ntt_store(c, &instr->dest.ssa, input);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_load_barycentric_centroid:
|
case nir_intrinsic_load_barycentric_centroid:
|
||||||
@@ -2299,21 +2299,21 @@ ntt_emit_load_input(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
* input.
|
* input.
|
||||||
*/
|
*/
|
||||||
if (c->centroid_inputs & (1ull << nir_intrinsic_base(instr))) {
|
if (c->centroid_inputs & (1ull << nir_intrinsic_base(instr))) {
|
||||||
ntt_store(c, &instr->dest, input);
|
ntt_store(c, &instr->dest.ssa, input);
|
||||||
} else {
|
} else {
|
||||||
ntt_INTERP_CENTROID(c, ntt_get_dest(c, &instr->dest), input);
|
ntt_INTERP_CENTROID(c, ntt_get_dest(c, &instr->dest.ssa), input);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_load_barycentric_at_sample:
|
case nir_intrinsic_load_barycentric_at_sample:
|
||||||
/* We stored the sample in the fake "bary" dest. */
|
/* We stored the sample in the fake "bary" dest. */
|
||||||
ntt_INTERP_SAMPLE(c, ntt_get_dest(c, &instr->dest), input,
|
ntt_INTERP_SAMPLE(c, ntt_get_dest(c, &instr->dest.ssa), input,
|
||||||
ntt_get_src(c, instr->src[0]));
|
ntt_get_src(c, instr->src[0]));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_load_barycentric_at_offset:
|
case nir_intrinsic_load_barycentric_at_offset:
|
||||||
/* We stored the offset in the fake "bary" dest. */
|
/* We stored the offset in the fake "bary" dest. */
|
||||||
ntt_INTERP_OFFSET(c, ntt_get_dest(c, &instr->dest), input,
|
ntt_INTERP_OFFSET(c, ntt_get_dest(c, &instr->dest.ssa), input,
|
||||||
ntt_get_src(c, instr->src[0]));
|
ntt_get_src(c, instr->src[0]));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -2383,7 +2383,7 @@ ntt_emit_load_output(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
out = ntt_ureg_dst_indirect(c, out, instr->src[0]);
|
out = ntt_ureg_dst_indirect(c, out, instr->src[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ureg_dst dst = ntt_get_dest(c, &instr->dest);
|
struct ureg_dst dst = ntt_get_dest(c, &instr->dest.ssa);
|
||||||
struct ureg_src out_src = ureg_src(out);
|
struct ureg_src out_src = ureg_src(out);
|
||||||
|
|
||||||
/* Don't swizzling unavailable channels of the output in the writemasked-out
|
/* Don't swizzling unavailable channels of the output in the writemasked-out
|
||||||
@@ -2426,7 +2426,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
switch (instr->intrinsic) {
|
switch (instr->intrinsic) {
|
||||||
case nir_intrinsic_load_vertex_id:
|
case nir_intrinsic_load_vertex_id:
|
||||||
case nir_intrinsic_load_instance_id:
|
case nir_intrinsic_load_instance_id:
|
||||||
ntt_U2F(c, ntt_get_dest(c, &instr->dest), sv);
|
ntt_U2F(c, ntt_get_dest(c, &instr->dest.ssa), sv);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -2434,7 +2434,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ntt_store(c, &instr->dest, sv);
|
ntt_store(c, &instr->dest.ssa, sv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@@ -2563,26 +2563,26 @@ ntt_emit_intrinsic(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
case nir_intrinsic_is_helper_invocation:
|
case nir_intrinsic_is_helper_invocation:
|
||||||
ntt_READ_HELPER(c, ntt_get_dest(c, &instr->dest));
|
ntt_READ_HELPER(c, ntt_get_dest(c, &instr->dest.ssa));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_vote_all:
|
case nir_intrinsic_vote_all:
|
||||||
ntt_VOTE_ALL(c, ntt_get_dest(c, &instr->dest), ntt_get_src(c,instr->src[0]));
|
ntt_VOTE_ALL(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c,instr->src[0]));
|
||||||
return;
|
return;
|
||||||
case nir_intrinsic_vote_any:
|
case nir_intrinsic_vote_any:
|
||||||
ntt_VOTE_ANY(c, ntt_get_dest(c, &instr->dest), ntt_get_src(c, instr->src[0]));
|
ntt_VOTE_ANY(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
|
||||||
return;
|
return;
|
||||||
case nir_intrinsic_vote_ieq:
|
case nir_intrinsic_vote_ieq:
|
||||||
ntt_VOTE_EQ(c, ntt_get_dest(c, &instr->dest), ntt_get_src(c, instr->src[0]));
|
ntt_VOTE_EQ(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
|
||||||
return;
|
return;
|
||||||
case nir_intrinsic_ballot:
|
case nir_intrinsic_ballot:
|
||||||
ntt_BALLOT(c, ntt_get_dest(c, &instr->dest), ntt_get_src(c, instr->src[0]));
|
ntt_BALLOT(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
|
||||||
return;
|
return;
|
||||||
case nir_intrinsic_read_first_invocation:
|
case nir_intrinsic_read_first_invocation:
|
||||||
ntt_READ_FIRST(c, ntt_get_dest(c, &instr->dest), ntt_get_src(c, instr->src[0]));
|
ntt_READ_FIRST(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]));
|
||||||
return;
|
return;
|
||||||
case nir_intrinsic_read_invocation:
|
case nir_intrinsic_read_invocation:
|
||||||
ntt_READ_INVOC(c, ntt_get_dest(c, &instr->dest), ntt_get_src(c, instr->src[0]), ntt_get_src(c, instr->src[1]));
|
ntt_READ_INVOC(c, ntt_get_dest(c, &instr->dest.ssa), ntt_get_src(c, instr->src[0]), ntt_get_src(c, instr->src[1]));
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case nir_intrinsic_load_ssbo:
|
case nir_intrinsic_load_ssbo:
|
||||||
@@ -2654,11 +2654,11 @@ ntt_emit_intrinsic(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
|||||||
break;
|
break;
|
||||||
case nir_intrinsic_load_barycentric_at_sample:
|
case nir_intrinsic_load_barycentric_at_sample:
|
||||||
case nir_intrinsic_load_barycentric_at_offset:
|
case nir_intrinsic_load_barycentric_at_offset:
|
||||||
ntt_store(c, &instr->dest, ntt_get_src(c, instr->src[0]));
|
ntt_store(c, &instr->dest.ssa, ntt_get_src(c, instr->src[0]));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_shader_clock:
|
case nir_intrinsic_shader_clock:
|
||||||
ntt_CLOCK(c, ntt_get_dest(c, &instr->dest));
|
ntt_CLOCK(c, ntt_get_dest(c, &instr->dest.ssa));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_decl_reg:
|
case nir_intrinsic_decl_reg:
|
||||||
@@ -2714,7 +2714,7 @@ ntt_push_tex_arg(struct ntt_compile *c,
|
|||||||
static void
|
static void
|
||||||
ntt_emit_texture(struct ntt_compile *c, nir_tex_instr *instr)
|
ntt_emit_texture(struct ntt_compile *c, nir_tex_instr *instr)
|
||||||
{
|
{
|
||||||
struct ureg_dst dst = ntt_get_dest(c, &instr->dest);
|
struct ureg_dst dst = ntt_get_dest(c, &instr->dest.ssa);
|
||||||
enum tgsi_texture_type target = tgsi_texture_type_from_sampler_dim(instr->sampler_dim, instr->is_array, instr->is_shadow);
|
enum tgsi_texture_type target = tgsi_texture_type_from_sampler_dim(instr->sampler_dim, instr->is_array, instr->is_shadow);
|
||||||
unsigned tex_opcode;
|
unsigned tex_opcode;
|
||||||
|
|
||||||
|
@@ -285,9 +285,9 @@ set_legacy_index(struct ir2_context *ctx, nir_legacy_dest dst,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
set_index(struct ir2_context *ctx, nir_dest *dst, struct ir2_instr *instr)
|
set_index(struct ir2_context *ctx, nir_def *def, struct ir2_instr *instr)
|
||||||
{
|
{
|
||||||
set_legacy_index(ctx, nir_legacy_chase_dest(dst), instr);
|
set_legacy_index(ctx, nir_legacy_chase_dest(def), instr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ir2_instr *
|
static struct ir2_instr *
|
||||||
@@ -381,23 +381,23 @@ instr_create_alu_reg(struct ir2_context *ctx, nir_op opcode, uint8_t write_mask,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct ir2_instr *
|
static struct ir2_instr *
|
||||||
instr_create_alu_dest(struct ir2_context *ctx, nir_op opcode, nir_dest *dst)
|
instr_create_alu_dest(struct ir2_context *ctx, nir_op opcode, nir_def *def)
|
||||||
{
|
{
|
||||||
struct ir2_instr *instr;
|
struct ir2_instr *instr;
|
||||||
instr = instr_create_alu(ctx, opcode, nir_dest_num_components(*dst));
|
instr = instr_create_alu(ctx, opcode, def->num_components);
|
||||||
set_index(ctx, dst, instr);
|
set_index(ctx, def, instr);
|
||||||
return instr;
|
return instr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ir2_instr *
|
static struct ir2_instr *
|
||||||
ir2_instr_create_fetch(struct ir2_context *ctx, nir_dest *dst,
|
ir2_instr_create_fetch(struct ir2_context *ctx, nir_def *def,
|
||||||
instr_fetch_opc_t opc)
|
instr_fetch_opc_t opc)
|
||||||
{
|
{
|
||||||
struct ir2_instr *instr = ir2_instr_create(ctx, IR2_FETCH);
|
struct ir2_instr *instr = ir2_instr_create(ctx, IR2_FETCH);
|
||||||
instr->fetch.opc = opc;
|
instr->fetch.opc = opc;
|
||||||
instr->src_count = 1;
|
instr->src_count = 1;
|
||||||
instr->ssa.ncomp = nir_dest_num_components(*dst);
|
instr->ssa.ncomp = def->num_components;
|
||||||
set_index(ctx, dst, instr);
|
set_index(ctx, def, instr);
|
||||||
return instr;
|
return instr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -419,7 +419,7 @@ static void
|
|||||||
emit_alu(struct ir2_context *ctx, nir_alu_instr *alu)
|
emit_alu(struct ir2_context *ctx, nir_alu_instr *alu)
|
||||||
{
|
{
|
||||||
const nir_op_info *info = &nir_op_infos[alu->op];
|
const nir_op_info *info = &nir_op_infos[alu->op];
|
||||||
nir_dest *dst = &alu->dest.dest;
|
nir_def *def = &alu->dest.dest.ssa;
|
||||||
struct ir2_instr *instr;
|
struct ir2_instr *instr;
|
||||||
struct ir2_src tmp;
|
struct ir2_src tmp;
|
||||||
unsigned ncomp;
|
unsigned ncomp;
|
||||||
@@ -433,11 +433,12 @@ emit_alu(struct ir2_context *ctx, nir_alu_instr *alu)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
/* get the number of dst components */
|
/* get the number of dst components */
|
||||||
ncomp = dst->ssa.num_components;
|
ncomp = def->num_components;
|
||||||
|
|
||||||
instr = instr_create_alu(ctx, alu->op, ncomp);
|
instr = instr_create_alu(ctx, alu->op, ncomp);
|
||||||
|
|
||||||
nir_legacy_alu_dest legacy_dest = nir_legacy_chase_alu_dest(&alu->dest.dest);
|
nir_legacy_alu_dest legacy_dest =
|
||||||
|
nir_legacy_chase_alu_dest(&alu->dest.dest.ssa);
|
||||||
set_legacy_index(ctx, legacy_dest.dest, instr);
|
set_legacy_index(ctx, legacy_dest.dest, instr);
|
||||||
instr->alu.saturate = legacy_dest.fsat;
|
instr->alu.saturate = legacy_dest.fsat;
|
||||||
instr->alu.write_mask = legacy_dest.write_mask;
|
instr->alu.write_mask = legacy_dest.write_mask;
|
||||||
@@ -512,13 +513,13 @@ emit_alu(struct ir2_context *ctx, nir_alu_instr *alu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
load_input(struct ir2_context *ctx, nir_dest *dst, unsigned idx)
|
load_input(struct ir2_context *ctx, nir_def *def, unsigned idx)
|
||||||
{
|
{
|
||||||
struct ir2_instr *instr;
|
struct ir2_instr *instr;
|
||||||
int slot = -1;
|
int slot = -1;
|
||||||
|
|
||||||
if (ctx->so->type == MESA_SHADER_VERTEX) {
|
if (ctx->so->type == MESA_SHADER_VERTEX) {
|
||||||
instr = ir2_instr_create_fetch(ctx, dst, 0);
|
instr = ir2_instr_create_fetch(ctx, def, 0);
|
||||||
instr->src[0] = ir2_src(0, 0, IR2_SRC_INPUT);
|
instr->src[0] = ir2_src(0, 0, IR2_SRC_INPUT);
|
||||||
instr->fetch.vtx.const_idx = 20 + (idx / 3);
|
instr->fetch.vtx.const_idx = 20 + (idx / 3);
|
||||||
instr->fetch.vtx.const_idx_sel = idx % 3;
|
instr->fetch.vtx.const_idx_sel = idx % 3;
|
||||||
@@ -554,11 +555,11 @@ load_input(struct ir2_context *ctx, nir_dest *dst, unsigned idx)
|
|||||||
instr->src[0] = ir2_src(ctx->f->fragcoord, IR2_SWIZZLE_Y, IR2_SRC_INPUT);
|
instr->src[0] = ir2_src(ctx->f->fragcoord, IR2_SWIZZLE_Y, IR2_SRC_INPUT);
|
||||||
|
|
||||||
unsigned reg_idx = instr->reg - ctx->reg; /* XXX */
|
unsigned reg_idx = instr->reg - ctx->reg; /* XXX */
|
||||||
instr = instr_create_alu_dest(ctx, nir_op_mov, dst);
|
instr = instr_create_alu_dest(ctx, nir_op_mov, def);
|
||||||
instr->src[0] = ir2_src(reg_idx, 0, IR2_SRC_REG);
|
instr->src[0] = ir2_src(reg_idx, 0, IR2_SRC_REG);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
instr = instr_create_alu_dest(ctx, nir_op_mov, dst);
|
instr = instr_create_alu_dest(ctx, nir_op_mov, def);
|
||||||
instr->src[0] = ir2_src(idx, 0, IR2_SRC_INPUT);
|
instr->src[0] = ir2_src(idx, 0, IR2_SRC_INPUT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -629,7 +630,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case nir_intrinsic_load_input:
|
case nir_intrinsic_load_input:
|
||||||
load_input(ctx, &intr->dest, nir_intrinsic_base(intr));
|
load_input(ctx, &intr->dest.ssa, nir_intrinsic_base(intr));
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_store_output:
|
case nir_intrinsic_store_output:
|
||||||
store_output(ctx, intr->src[0], output_slot(ctx, intr),
|
store_output(ctx, intr->src[0], output_slot(ctx, intr),
|
||||||
@@ -640,7 +641,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
assert(const_offset); /* TODO can be false in ES2? */
|
assert(const_offset); /* TODO can be false in ES2? */
|
||||||
idx = nir_intrinsic_base(intr);
|
idx = nir_intrinsic_base(intr);
|
||||||
idx += (uint32_t)const_offset[0].f32;
|
idx += (uint32_t)const_offset[0].f32;
|
||||||
instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest);
|
instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest.ssa);
|
||||||
instr->src[0] = ir2_src(idx, 0, IR2_SRC_CONST);
|
instr->src[0] = ir2_src(idx, 0, IR2_SRC_CONST);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_discard:
|
case nir_intrinsic_discard:
|
||||||
@@ -667,7 +668,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
struct ir2_instr *tmp = instr_create_alu(ctx, nir_op_frcp, 1);
|
struct ir2_instr *tmp = instr_create_alu(ctx, nir_op_frcp, 1);
|
||||||
tmp->src[0] = ir2_src(ctx->f->inputs_count, 0, IR2_SRC_INPUT);
|
tmp->src[0] = ir2_src(ctx->f->inputs_count, 0, IR2_SRC_INPUT);
|
||||||
|
|
||||||
instr = instr_create_alu_dest(ctx, nir_op_sge, &intr->dest);
|
instr = instr_create_alu_dest(ctx, nir_op_sge, &intr->dest.ssa);
|
||||||
instr->src[0] = ir2_src(tmp->idx, 0, IR2_SRC_SSA);
|
instr->src[0] = ir2_src(tmp->idx, 0, IR2_SRC_SSA);
|
||||||
instr->src[1] = ir2_zero(ctx);
|
instr->src[1] = ir2_zero(ctx);
|
||||||
break;
|
break;
|
||||||
@@ -675,7 +676,7 @@ emit_intrinsic(struct ir2_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
/* param.zw (note: abs might be needed like fragcoord in param.xy?) */
|
/* param.zw (note: abs might be needed like fragcoord in param.xy?) */
|
||||||
ctx->so->need_param = true;
|
ctx->so->need_param = true;
|
||||||
|
|
||||||
instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest);
|
instr = instr_create_alu_dest(ctx, nir_op_mov, &intr->dest.ssa);
|
||||||
instr->src[0] =
|
instr->src[0] =
|
||||||
ir2_src(ctx->f->inputs_count, IR2_SWIZZLE_ZW, IR2_SRC_INPUT);
|
ir2_src(ctx->f->inputs_count, IR2_SWIZZLE_ZW, IR2_SRC_INPUT);
|
||||||
break;
|
break;
|
||||||
@@ -768,7 +769,7 @@ emit_tex(struct ir2_context *ctx, nir_tex_instr *tex)
|
|||||||
/* TODO: lod/bias transformed by src_coord.z ? */
|
/* TODO: lod/bias transformed by src_coord.z ? */
|
||||||
}
|
}
|
||||||
|
|
||||||
instr = ir2_instr_create_fetch(ctx, &tex->dest, TEX_FETCH);
|
instr = ir2_instr_create_fetch(ctx, &tex->dest.ssa, TEX_FETCH);
|
||||||
instr->src[0] = src_coord;
|
instr->src[0] = src_coord;
|
||||||
instr->src[0].swizzle = is_cube ? IR2_SWIZZLE_YXW : 0;
|
instr->src[0].swizzle = is_cube ? IR2_SWIZZLE_YXW : 0;
|
||||||
instr->fetch.tex.is_cube = is_cube;
|
instr->fetch.tex.is_cube = is_cube;
|
||||||
@@ -824,8 +825,7 @@ emit_undef(struct ir2_context *ctx, nir_undef_instr *undef)
|
|||||||
|
|
||||||
struct ir2_instr *instr;
|
struct ir2_instr *instr;
|
||||||
|
|
||||||
instr = instr_create_alu_dest(
|
instr = instr_create_alu_dest(ctx, nir_op_mov, &undef->def);
|
||||||
ctx, nir_op_mov, &(nir_dest){.ssa = undef->def});
|
|
||||||
instr->src[0] = ir2_src(0, 0, IR2_SRC_CONST);
|
instr->src[0] = ir2_src(0, 0, IR2_SRC_CONST);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -159,14 +159,14 @@ static int nir_to_ppir_opcodes[nir_num_opcodes] = {
|
|||||||
static bool ppir_emit_alu(ppir_block *block, nir_instr *ni)
|
static bool ppir_emit_alu(ppir_block *block, nir_instr *ni)
|
||||||
{
|
{
|
||||||
nir_alu_instr *instr = nir_instr_as_alu(ni);
|
nir_alu_instr *instr = nir_instr_as_alu(ni);
|
||||||
nir_dest *dst = &instr->dest.dest;
|
nir_def *def = &instr->dest.dest.ssa;
|
||||||
int op = nir_to_ppir_opcodes[instr->op];
|
int op = nir_to_ppir_opcodes[instr->op];
|
||||||
|
|
||||||
if (op == ppir_op_unsupported) {
|
if (op == ppir_op_unsupported) {
|
||||||
ppir_error("unsupported nir_op: %s\n", nir_op_infos[instr->op].name);
|
ppir_error("unsupported nir_op: %s\n", nir_op_infos[instr->op].name);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
nir_legacy_alu_dest legacy_dest = nir_legacy_chase_alu_dest(dst);
|
nir_legacy_alu_dest legacy_dest = nir_legacy_chase_alu_dest(def);
|
||||||
|
|
||||||
/* Don't try to translate folded fsat since their source won't be valid */
|
/* Don't try to translate folded fsat since their source won't be valid */
|
||||||
if (instr->op == nir_op_fsat && nir_legacy_fsat_folds(instr))
|
if (instr->op == nir_op_fsat && nir_legacy_fsat_folds(instr))
|
||||||
@@ -180,7 +180,7 @@ static bool ppir_emit_alu(ppir_block *block, nir_instr *ni)
|
|||||||
nir_alu_src *ns = &instr->src[0];
|
nir_alu_src *ns = &instr->src[0];
|
||||||
ppir_node *parent = block->comp->var_nodes[ns->src.ssa->index];
|
ppir_node *parent = block->comp->var_nodes[ns->src.ssa->index];
|
||||||
assert(parent);
|
assert(parent);
|
||||||
block->comp->var_nodes[dst->ssa.index] = parent;
|
block->comp->var_nodes[def->index] = parent;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -291,7 +291,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
case nir_intrinsic_load_reg: {
|
case nir_intrinsic_load_reg: {
|
||||||
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest);
|
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
|
||||||
lnode = ppir_node_create_dest(block, ppir_op_dummy, &legacy_dest, mask);
|
lnode = ppir_node_create_dest(block, ppir_op_dummy, &legacy_dest, mask);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -299,7 +299,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
|
|||||||
case nir_intrinsic_load_input: {
|
case nir_intrinsic_load_input: {
|
||||||
mask = u_bit_consecutive(0, instr->num_components);
|
mask = u_bit_consecutive(0, instr->num_components);
|
||||||
|
|
||||||
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest);
|
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
|
||||||
lnode = ppir_node_create_dest(block, ppir_op_load_varying, &legacy_dest, mask);
|
lnode = ppir_node_create_dest(block, ppir_op_load_varying, &legacy_dest, mask);
|
||||||
if (!lnode)
|
if (!lnode)
|
||||||
return false;
|
return false;
|
||||||
@@ -338,7 +338,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest);
|
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
|
||||||
lnode = ppir_node_create_dest(block, op, &legacy_dest, mask);
|
lnode = ppir_node_create_dest(block, op, &legacy_dest, mask);
|
||||||
if (!lnode)
|
if (!lnode)
|
||||||
return false;
|
return false;
|
||||||
@@ -351,7 +351,7 @@ static bool ppir_emit_intrinsic(ppir_block *block, nir_instr *ni)
|
|||||||
case nir_intrinsic_load_uniform: {
|
case nir_intrinsic_load_uniform: {
|
||||||
mask = u_bit_consecutive(0, instr->num_components);
|
mask = u_bit_consecutive(0, instr->num_components);
|
||||||
|
|
||||||
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest);
|
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
|
||||||
lnode = ppir_node_create_dest(block, ppir_op_load_uniform, &legacy_dest, mask);
|
lnode = ppir_node_create_dest(block, ppir_op_load_uniform, &legacy_dest, mask);
|
||||||
if (!lnode)
|
if (!lnode)
|
||||||
return false;
|
return false;
|
||||||
@@ -517,7 +517,7 @@ static bool ppir_emit_tex(ppir_block *block, nir_instr *ni)
|
|||||||
unsigned mask = 0;
|
unsigned mask = 0;
|
||||||
mask = u_bit_consecutive(0, nir_tex_instr_dest_size(instr));
|
mask = u_bit_consecutive(0, nir_tex_instr_dest_size(instr));
|
||||||
|
|
||||||
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest);
|
nir_legacy_dest legacy_dest = nir_legacy_chase_dest(&instr->dest.ssa);
|
||||||
node = ppir_node_create_dest(block, ppir_op_load_texture, &legacy_dest, mask);
|
node = ppir_node_create_dest(block, ppir_op_load_texture, &legacy_dest, mask);
|
||||||
if (!node)
|
if (!node)
|
||||||
return false;
|
return false;
|
||||||
|
Reference in New Issue
Block a user