nir: Get rid of nir_dest_num_components()
We could add a nir_def_num_components() helper but we use ssa.num_components about 3x as often as nir_dest_num_components() today so that's a major Coccinelle refactor anyway and this doesn't make it much worse. Most of this commit was generated byt the following semantic patch: @@ expression D; @@ <... -nir_dest_num_components(D) +D.ssa.num_components ... Some manual fixup was needed, especially in cpp files where Coccinelle tends to give up the moment it sees any interesting C++. Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24674>
This commit is contained in:

committed by
Marge Bot

parent
80a1836d8b
commit
9d81f13a75
@@ -519,7 +519,7 @@ agx_emit_local_load_pixel(agx_builder *b, agx_index dest,
|
||||
b->shader->did_writeout = true;
|
||||
b->shader->out->reads_tib = true;
|
||||
|
||||
unsigned nr_comps = nir_dest_num_components(instr->dest);
|
||||
unsigned nr_comps = instr->dest.ssa.num_components;
|
||||
agx_ld_tile_to(b, dest, agx_src_index(&instr->src[0]),
|
||||
agx_format_for_pipe(nir_intrinsic_format(instr)),
|
||||
BITFIELD_MASK(nr_comps), nir_intrinsic_base(instr));
|
||||
@@ -539,9 +539,8 @@ agx_emit_load(agx_builder *b, agx_index dest, nir_intrinsic_instr *instr)
|
||||
offset = agx_abs(offset);
|
||||
|
||||
agx_device_load_to(b, dest, addr, offset, fmt,
|
||||
BITFIELD_MASK(nir_dest_num_components(instr->dest)),
|
||||
shift, 0);
|
||||
agx_emit_cached_split(b, dest, nir_dest_num_components(instr->dest));
|
||||
BITFIELD_MASK(instr->dest.ssa.num_components), shift, 0);
|
||||
agx_emit_cached_split(b, dest, instr->dest.ssa.num_components);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -567,7 +566,7 @@ agx_emit_load_preamble(agx_builder *b, agx_index dst,
|
||||
nir_intrinsic_instr *instr)
|
||||
{
|
||||
agx_index srcs[4] = {agx_null()};
|
||||
unsigned dim = nir_dest_num_components(instr->dest);
|
||||
unsigned dim = instr->dest.ssa.num_components;
|
||||
assert(dim <= ARRAY_SIZE(srcs) && "shouldn't see larger vectors");
|
||||
|
||||
unsigned base = nir_intrinsic_base(instr);
|
||||
@@ -643,7 +642,7 @@ static agx_instr *
|
||||
agx_load_compute_dimension(agx_builder *b, agx_index dst,
|
||||
nir_intrinsic_instr *instr, enum agx_sr base)
|
||||
{
|
||||
unsigned dim = nir_dest_num_components(instr->dest);
|
||||
unsigned dim = instr->dest.ssa.num_components;
|
||||
unsigned size = instr->dest.ssa.bit_size;
|
||||
assert(size == 16 || size == 32);
|
||||
|
||||
@@ -740,7 +739,7 @@ agx_emit_local_load(agx_builder *b, agx_index dst, nir_intrinsic_instr *instr)
|
||||
assert(base.size == AGX_SIZE_16);
|
||||
|
||||
enum agx_format format = format_for_bitsize(instr->dest.ssa.bit_size);
|
||||
unsigned nr = nir_dest_num_components(instr->dest);
|
||||
unsigned nr = instr->dest.ssa.num_components;
|
||||
unsigned mask = BITFIELD_MASK(nr);
|
||||
|
||||
agx_local_load_to(b, dst, base, index, format, mask);
|
||||
@@ -1228,7 +1227,7 @@ agx_emit_alu(agx_builder *b, nir_alu_instr *instr)
|
||||
unsigned srcs = nir_op_infos[instr->op].num_inputs;
|
||||
unsigned sz = instr->dest.dest.ssa.bit_size;
|
||||
unsigned src_sz = srcs ? nir_src_bit_size(instr->src[0].src) : 0;
|
||||
ASSERTED unsigned comps = nir_dest_num_components(instr->dest.dest);
|
||||
ASSERTED unsigned comps = instr->dest.dest.ssa.num_components;
|
||||
|
||||
assert(comps == 1 || nir_op_is_vec(instr->op));
|
||||
assert(sz == 1 ||
|
||||
|
@@ -311,7 +311,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
|
||||
|
||||
nir_def *repl = NULL;
|
||||
bool has_dest = (intr->intrinsic != nir_intrinsic_store_global);
|
||||
unsigned num_components = has_dest ? nir_dest_num_components(intr->dest) : 0;
|
||||
unsigned num_components = has_dest ? intr->dest.ssa.num_components : 0;
|
||||
unsigned bit_size = has_dest ? intr->dest.ssa.bit_size : 0;
|
||||
|
||||
if (intr->intrinsic == nir_intrinsic_load_global) {
|
||||
|
@@ -147,11 +147,11 @@ lower(nir_builder *b, nir_instr *instr, void *data)
|
||||
|
||||
/* Each component is loaded separated */
|
||||
nir_def *values[NIR_MAX_VEC_COMPONENTS] = {NULL};
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
|
||||
values[i] = interpolate_channel(b, intr, i);
|
||||
}
|
||||
|
||||
return nir_vec(b, values, nir_dest_num_components(intr->dest));
|
||||
return nir_vec(b, values, intr->dest.ssa.num_components);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@@ -140,7 +140,7 @@ agx_txs(nir_builder *b, nir_tex_instr *tex)
|
||||
height = depth;
|
||||
|
||||
/* How we finish depends on the size of the result */
|
||||
unsigned nr_comps = nir_dest_num_components(tex->dest);
|
||||
unsigned nr_comps = tex->dest.ssa.num_components;
|
||||
assert(nr_comps <= 3);
|
||||
|
||||
/* Adjust for LOD, do not adjust array size */
|
||||
@@ -742,10 +742,9 @@ lower_images(nir_builder *b, nir_instr *instr, UNUSED void *data)
|
||||
|
||||
case nir_intrinsic_image_size:
|
||||
case nir_intrinsic_bindless_image_size:
|
||||
nir_def_rewrite_uses(
|
||||
&intr->dest.ssa,
|
||||
txs_for_image(b, intr, nir_dest_num_components(intr->dest),
|
||||
intr->dest.ssa.bit_size));
|
||||
nir_def_rewrite_uses(&intr->dest.ssa,
|
||||
txs_for_image(b, intr, intr->dest.ssa.num_components,
|
||||
intr->dest.ssa.bit_size));
|
||||
return true;
|
||||
|
||||
case nir_intrinsic_image_texel_address:
|
||||
|
@@ -2834,7 +2834,7 @@ nir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src,
|
||||
if (nir_op_infos[instr->op].input_sizes[src] > 0)
|
||||
return channel < nir_op_infos[instr->op].input_sizes[src];
|
||||
|
||||
return channel < nir_dest_num_components(instr->dest.dest);
|
||||
return channel < instr->dest.dest.ssa.num_components;
|
||||
}
|
||||
|
||||
nir_component_mask_t
|
||||
@@ -2856,7 +2856,7 @@ nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
|
||||
if (nir_op_infos[instr->op].input_sizes[src] > 0)
|
||||
return nir_op_infos[instr->op].input_sizes[src];
|
||||
|
||||
return nir_dest_num_components(instr->dest.dest);
|
||||
return instr->dest.dest.ssa.num_components;
|
||||
}
|
||||
|
||||
#define CASE_ALL_SIZES(op) \
|
||||
|
@@ -1082,12 +1082,6 @@ nir_src_is_divergent(nir_src src)
|
||||
return src.ssa->divergent;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
nir_dest_num_components(nir_dest dest)
|
||||
{
|
||||
return dest.ssa.num_components;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nir_dest_is_divergent(nir_dest dest)
|
||||
{
|
||||
|
@@ -180,7 +180,7 @@ nir_legacy_fsat_folds(nir_alu_instr *fsat)
|
||||
return false;
|
||||
|
||||
/* We can't do expansions without a move in the middle */
|
||||
unsigned nr_components = nir_dest_num_components(generate_alu->dest.dest);
|
||||
unsigned nr_components = generate_alu->dest.dest.ssa.num_components;
|
||||
if (fsat->dest.dest.ssa.num_components != nr_components)
|
||||
return false;
|
||||
|
||||
|
@@ -230,7 +230,7 @@ all_same_constant(const nir_alu_instr *instr, unsigned src, double *result)
|
||||
return false;
|
||||
|
||||
const uint8_t *const swizzle = instr->src[src].swizzle;
|
||||
const unsigned num_components = nir_dest_num_components(instr->dest.dest);
|
||||
const unsigned num_components = instr->dest.dest.ssa.num_components;
|
||||
|
||||
if (instr->dest.dest.ssa.bit_size == 32) {
|
||||
const float first = val[swizzle[0]].f32;
|
||||
@@ -266,7 +266,7 @@ sources_are_constants_with_similar_magnitudes(const nir_alu_instr *instr)
|
||||
|
||||
const uint8_t *const swizzle0 = instr->src[0].swizzle;
|
||||
const uint8_t *const swizzle1 = instr->src[1].swizzle;
|
||||
const unsigned num_components = nir_dest_num_components(instr->dest.dest);
|
||||
const unsigned num_components = instr->dest.dest.ssa.num_components;
|
||||
|
||||
if (instr->dest.dest.ssa.bit_size == 32) {
|
||||
for (unsigned i = 0; i < num_components; i++) {
|
||||
|
@@ -76,7 +76,7 @@ lower(nir_builder *b, nir_instr *instr, void *data)
|
||||
*/
|
||||
if (has_dest) {
|
||||
nir_push_else(b, NULL);
|
||||
undef = nir_undef(b, nir_dest_num_components(intr->dest),
|
||||
undef = nir_undef(b, intr->dest.ssa.num_components,
|
||||
intr->dest.ssa.bit_size);
|
||||
}
|
||||
|
||||
|
@@ -2845,7 +2845,7 @@ is_dual_slot(nir_intrinsic_instr *intrin)
|
||||
}
|
||||
|
||||
return intrin->dest.ssa.bit_size == 64 &&
|
||||
nir_dest_num_components(intrin->dest) >= 3;
|
||||
intrin->dest.ssa.num_components >= 3;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -1562,7 +1562,7 @@ nir_opt_trim_stack_values(nir_shader *shader)
|
||||
nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
|
||||
nir_alu_src *alu_src = exec_node_data(nir_alu_src, use_src, src);
|
||||
|
||||
unsigned count = nir_dest_num_components(alu->dest.dest);
|
||||
unsigned count = alu->dest.dest.ssa.num_components;
|
||||
for (unsigned idx = 0; idx < count; ++idx)
|
||||
alu_src->swizzle[idx] = swiz_map[alu_src->swizzle[idx]];
|
||||
} else if (use_src->parent_instr->type == nir_instr_type_intrinsic) {
|
||||
|
@@ -887,7 +887,7 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex)
|
||||
txd->src[tex->num_srcs + 1] = nir_tex_src_for_ssa(nir_tex_src_ddy, dfdy);
|
||||
|
||||
nir_def_init(&txd->instr, &txd->dest.ssa,
|
||||
nir_dest_num_components(tex->dest),
|
||||
tex->dest.ssa.num_components,
|
||||
tex->dest.ssa.bit_size);
|
||||
nir_builder_instr_insert(b, &txd->instr);
|
||||
nir_def_rewrite_uses(&tex->dest.ssa, &txd->dest.ssa);
|
||||
@@ -927,7 +927,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
|
||||
txl->src[tex->num_srcs - 1] = nir_tex_src_for_ssa(nir_tex_src_lod, lod);
|
||||
|
||||
nir_def_init(&txl->instr, &txl->dest.ssa,
|
||||
nir_dest_num_components(tex->dest),
|
||||
tex->dest.ssa.num_components,
|
||||
tex->dest.ssa.bit_size);
|
||||
nir_builder_instr_insert(b, &txl->instr);
|
||||
nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
|
||||
|
@@ -32,7 +32,7 @@ insert_store(nir_builder *b, nir_def *reg, nir_alu_instr *vec,
|
||||
assert(start_idx < nir_op_infos[vec->op].num_inputs);
|
||||
nir_def *src = vec->src[start_idx].src.ssa;
|
||||
|
||||
unsigned num_components = nir_dest_num_components(vec->dest.dest);
|
||||
unsigned num_components = vec->dest.dest.ssa.num_components;
|
||||
assert(num_components == nir_op_infos[vec->op].num_inputs);
|
||||
unsigned write_mask = 0;
|
||||
unsigned swiz[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
||||
@@ -125,7 +125,7 @@ try_coalesce(nir_builder *b, nir_def *reg, nir_alu_instr *vec,
|
||||
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
|
||||
swizzles[j][i] = src_alu->src[j].swizzle[i];
|
||||
|
||||
unsigned dest_components = nir_dest_num_components(vec->dest.dest);
|
||||
unsigned dest_components = vec->dest.dest.ssa.num_components;
|
||||
assert(dest_components == nir_op_infos[vec->op].num_inputs);
|
||||
|
||||
/* Generate the final write mask */
|
||||
@@ -198,7 +198,7 @@ lower(nir_builder *b, nir_instr *instr, void *data_)
|
||||
if (vec->op == nir_op_mov || !nir_op_is_vec(vec->op))
|
||||
return false;
|
||||
|
||||
unsigned num_components = nir_dest_num_components(vec->dest.dest);
|
||||
unsigned num_components = vec->dest.dest.ssa.num_components;
|
||||
|
||||
/* Special case: if all sources are the same, just swizzle instead to avoid
|
||||
* the extra copies from a register.
|
||||
|
@@ -664,7 +664,7 @@ opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
|
||||
->src);
|
||||
|
||||
nir_def_init(&phi->instr, &phi->dest.ssa,
|
||||
nir_dest_num_components(bcsel->dest.dest),
|
||||
bcsel->dest.dest.ssa.num_components,
|
||||
bcsel->dest.dest.ssa.bit_size);
|
||||
|
||||
b->cursor = nir_after_phis(header_block);
|
||||
|
@@ -403,7 +403,7 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
|
||||
/* The conversion we are stripping off could have had a swizzle,
|
||||
* so replace it with a mov if necessary:
|
||||
*/
|
||||
unsigned num_comp = nir_dest_num_components(alu->dest.dest);
|
||||
unsigned num_comp = alu->dest.dest.ssa.num_components;
|
||||
new_src = nir_mov_alu(b, alu->src[0], num_comp);
|
||||
}
|
||||
|
||||
|
@@ -169,7 +169,7 @@ opt_undef_pack(nir_builder *b, nir_alu_instr *alu)
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
unsigned num_components = nir_dest_num_components(alu->dest.dest);
|
||||
unsigned num_components = alu->dest.dest.ssa.num_components;
|
||||
b->cursor = nir_before_instr(&alu->instr);
|
||||
nir_def *def = nir_undef(b, num_components, 32);
|
||||
nir_def_rewrite_uses_after(&alu->dest.dest.ssa, def, &alu->instr);
|
||||
|
@@ -1122,7 +1122,7 @@ read_intrinsic(read_ctx *ctx, union packed_instr header)
|
||||
*/
|
||||
if (nir_intrinsic_infos[op].has_dest &&
|
||||
nir_intrinsic_infos[op].dest_components == 0) {
|
||||
intrin->num_components = nir_dest_num_components(intrin->dest);
|
||||
intrin->num_components = intrin->dest.ssa.num_components;
|
||||
} else {
|
||||
for (unsigned i = 0; i < num_srcs; i++) {
|
||||
if (nir_intrinsic_infos[op].src_components[i] == 0) {
|
||||
|
@@ -53,7 +53,7 @@ nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr,
|
||||
nir_variable *var = nir_intrinsic_get_var(intr, 0);
|
||||
if (var->data.mode != nir_var_function_temp)
|
||||
return false;
|
||||
return nir_dest_num_components(intr->dest) >= 3;
|
||||
return intr->dest.ssa.num_components >= 3;
|
||||
}
|
||||
case nir_intrinsic_store_deref: {
|
||||
if (nir_src_bit_size(intr->src[1]) != 64)
|
||||
@@ -71,7 +71,7 @@ nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr,
|
||||
nir_phi_instr *phi = nir_instr_as_phi(instr);
|
||||
if (phi->dest.ssa.bit_size != 64)
|
||||
return false;
|
||||
return nir_dest_num_components(phi->dest) >= 3;
|
||||
return phi->dest.ssa.num_components >= 3;
|
||||
}
|
||||
|
||||
default:
|
||||
|
@@ -317,7 +317,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state)
|
||||
* as the destination.
|
||||
*/
|
||||
validate_src(&instr->parent, state, instr->dest.ssa.bit_size,
|
||||
nir_dest_num_components(instr->dest));
|
||||
instr->dest.ssa.num_components);
|
||||
|
||||
nir_instr *parent_instr = instr->parent.ssa->parent_instr;
|
||||
|
||||
@@ -477,7 +477,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
|
||||
case nir_intrinsic_load_reg:
|
||||
case nir_intrinsic_load_reg_indirect:
|
||||
validate_register_handle(instr->src[0],
|
||||
nir_dest_num_components(instr->dest),
|
||||
instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size, state);
|
||||
break;
|
||||
|
||||
|
@@ -426,7 +426,7 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
|
||||
}
|
||||
|
||||
/* General case: We can just grab the one used channel per src. */
|
||||
assert(nir_dest_num_components(alu->dest.dest) == 1);
|
||||
assert(alu->dest.dest.ssa.num_components == 1);
|
||||
|
||||
for (int i = 0; i < info->num_inputs; i++) {
|
||||
nir_alu_src *asrc = &alu->src[i];
|
||||
@@ -2901,7 +2901,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
||||
type_t type;
|
||||
opc_t opc = 0;
|
||||
|
||||
ncomp = nir_dest_num_components(tex->dest);
|
||||
ncomp = tex->dest.ssa.num_components;
|
||||
|
||||
coord = off = ddx = ddy = NULL;
|
||||
lod = proj = compare = sample_index = NULL;
|
||||
|
@@ -1116,7 +1116,7 @@ visit_alu(struct lp_build_nir_context *bld_base,
|
||||
struct gallivm_state *gallivm = bld_base->base.gallivm;
|
||||
LLVMValueRef src[NIR_MAX_VEC_COMPONENTS];
|
||||
unsigned src_bit_size[NIR_MAX_VEC_COMPONENTS];
|
||||
const unsigned num_components = nir_dest_num_components(instr->dest.dest);
|
||||
const unsigned num_components = instr->dest.dest.ssa.num_components;
|
||||
unsigned src_components;
|
||||
|
||||
switch (instr->op) {
|
||||
@@ -1296,7 +1296,7 @@ visit_load_input(struct lp_build_nir_context *bld_base,
|
||||
var.data.driver_location = nir_intrinsic_base(instr);
|
||||
var.data.location_frac = nir_intrinsic_component(instr);
|
||||
|
||||
unsigned nc = nir_dest_num_components(instr->dest);
|
||||
unsigned nc = instr->dest.ssa.num_components;
|
||||
unsigned bit_size = instr->dest.ssa.bit_size;
|
||||
|
||||
nir_src offset = *nir_get_io_offset_src(instr);
|
||||
@@ -1364,8 +1364,8 @@ visit_load_reg(struct lp_build_nir_context *bld_base,
|
||||
|
||||
LLVMValueRef val = bld_base->load_reg(bld_base, reg_bld, decl, base, indir_src, reg_storage);
|
||||
|
||||
if (!is_aos(bld_base) && nir_dest_num_components(instr->dest) > 1) {
|
||||
for (unsigned i = 0; i < nir_dest_num_components(instr->dest); i++)
|
||||
if (!is_aos(bld_base) && instr->dest.ssa.num_components > 1) {
|
||||
for (unsigned i = 0; i < instr->dest.ssa.num_components; i++)
|
||||
result[i] = LLVMBuildExtractValue(builder, val, i, "");
|
||||
} else {
|
||||
result[0] = val;
|
||||
@@ -1435,7 +1435,7 @@ visit_load_var(struct lp_build_nir_context *bld_base,
|
||||
LLVMValueRef indir_index;
|
||||
LLVMValueRef indir_vertex_index = NULL;
|
||||
unsigned vertex_index = 0;
|
||||
unsigned nc = nir_dest_num_components(instr->dest);
|
||||
unsigned nc = instr->dest.ssa.num_components;
|
||||
unsigned bit_size = instr->dest.ssa.bit_size;
|
||||
if (var) {
|
||||
bool vs_in = bld_base->shader->info.stage == MESA_SHADER_VERTEX &&
|
||||
@@ -1521,7 +1521,7 @@ visit_load_ubo(struct lp_build_nir_context *bld_base,
|
||||
if (nir_src_num_components(instr->src[0]) == 1)
|
||||
idx = LLVMBuildExtractElement(builder, idx, lp_build_const_int32(gallivm, 0), "");
|
||||
|
||||
bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_ubo(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size,
|
||||
offset_is_uniform, idx, offset, result);
|
||||
}
|
||||
@@ -1537,7 +1537,7 @@ visit_load_push_constant(struct lp_build_nir_context *bld_base,
|
||||
LLVMValueRef idx = lp_build_const_int32(gallivm, 0);
|
||||
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
|
||||
|
||||
bld_base->load_ubo(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_ubo(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size,
|
||||
offset_is_uniform, idx, offset, result);
|
||||
}
|
||||
@@ -1556,7 +1556,7 @@ visit_load_ssbo(struct lp_build_nir_context *bld_base,
|
||||
bool index_and_offset_are_uniform =
|
||||
nir_src_is_always_uniform(instr->src[0]) &&
|
||||
nir_src_is_always_uniform(instr->src[1]);
|
||||
bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_mem(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size,
|
||||
index_and_offset_are_uniform, false, idx, offset, result);
|
||||
}
|
||||
@@ -1881,7 +1881,7 @@ visit_shared_load(struct lp_build_nir_context *bld_base,
|
||||
{
|
||||
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
|
||||
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
|
||||
bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_mem(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size,
|
||||
offset_is_uniform, false, NULL, offset, result);
|
||||
}
|
||||
@@ -1957,7 +1957,7 @@ visit_load_kernel_input(struct lp_build_nir_context *bld_base,
|
||||
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
|
||||
|
||||
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
|
||||
bld_base->load_kernel_arg(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_kernel_arg(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size,
|
||||
nir_src_bit_size(instr->src[0]),
|
||||
offset_is_uniform, offset, result);
|
||||
@@ -1971,7 +1971,7 @@ visit_load_global(struct lp_build_nir_context *bld_base,
|
||||
{
|
||||
LLVMValueRef addr = get_src(bld_base, instr->src[0]);
|
||||
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
|
||||
bld_base->load_global(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_global(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size,
|
||||
nir_src_bit_size(instr->src[0]),
|
||||
offset_is_uniform, addr, result);
|
||||
@@ -2036,7 +2036,7 @@ visit_interp(struct lp_build_nir_context *bld_base,
|
||||
struct gallivm_state *gallivm = bld_base->base.gallivm;
|
||||
LLVMBuilderRef builder = gallivm->builder;
|
||||
nir_deref_instr *deref = nir_instr_as_deref(instr->src[0].ssa->parent_instr);
|
||||
unsigned num_components = nir_dest_num_components(instr->dest);
|
||||
unsigned num_components = instr->dest.ssa.num_components;
|
||||
nir_variable *var = nir_deref_instr_get_variable(deref);
|
||||
unsigned const_index;
|
||||
LLVMValueRef indir_index;
|
||||
@@ -2067,7 +2067,7 @@ visit_load_scratch(struct lp_build_nir_context *bld_base,
|
||||
{
|
||||
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
|
||||
|
||||
bld_base->load_scratch(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_scratch(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size, offset, result);
|
||||
}
|
||||
|
||||
@@ -2091,7 +2091,7 @@ visit_payload_load(struct lp_build_nir_context *bld_base,
|
||||
{
|
||||
LLVMValueRef offset = get_src(bld_base, instr->src[0]);
|
||||
bool offset_is_uniform = nir_src_is_always_uniform(instr->src[0]);
|
||||
bld_base->load_mem(bld_base, nir_dest_num_components(instr->dest),
|
||||
bld_base->load_mem(bld_base, instr->dest.ssa.num_components,
|
||||
instr->dest.ssa.bit_size,
|
||||
offset_is_uniform, true, NULL, offset, result);
|
||||
}
|
||||
@@ -2685,7 +2685,7 @@ visit_tex(struct lp_build_nir_context *bld_base, nir_tex_instr *instr)
|
||||
default:
|
||||
unreachable("unexpected alu type");
|
||||
}
|
||||
for (int i = 0; i < nir_dest_num_components(instr->dest); ++i) {
|
||||
for (int i = 0; i < instr->dest.ssa.num_components; ++i) {
|
||||
if (is_float) {
|
||||
texel[i] = lp_build_float_to_half(gallivm, texel[i]);
|
||||
} else {
|
||||
|
@@ -1271,7 +1271,7 @@ ntt_get_alu_src(struct ntt_compile *c, nir_alu_instr *instr, int i)
|
||||
!(src.src.is_ssa && src.src.ssa->parent_instr->type == nir_instr_type_ssa_undef)) {
|
||||
int chan1 = 1;
|
||||
if (nir_op_infos[instr->op].input_sizes[i] == 0) {
|
||||
chan1 = nir_dest_num_components(instr->dest.dest) > 1 ? 1 : 0;
|
||||
chan1 = instr->dest.dest.ssa.num_components > 1 ? 1 : 0;
|
||||
}
|
||||
usrc = ureg_swizzle(usrc,
|
||||
src.swizzle[0] * 2,
|
||||
@@ -2414,7 +2414,7 @@ ntt_emit_load_sysval(struct ntt_compile *c, nir_intrinsic_instr *instr)
|
||||
* aren't defined, even if they aren't really read. (GLSL compile fails on
|
||||
* gl_NumWorkGroups.w, for example).
|
||||
*/
|
||||
uint32_t write_mask = BITSET_MASK(nir_dest_num_components(instr->dest));
|
||||
uint32_t write_mask = BITSET_MASK(instr->dest.ssa.num_components);
|
||||
sv = ntt_swizzle_for_write_mask(sv, write_mask);
|
||||
|
||||
/* TGSI and NIR define these intrinsics as always loading ints, but they can
|
||||
|
@@ -184,7 +184,7 @@ record_loads(nir_builder *b, nir_instr *instr, void *data)
|
||||
return false;
|
||||
|
||||
assert(intr->dest.ssa.bit_size >= 16 && "no 8-bit sysvals");
|
||||
unsigned dim = nir_dest_num_components(intr->dest);
|
||||
unsigned dim = intr->dest.ssa.num_components;
|
||||
unsigned element_size = intr->dest.ssa.bit_size / 16;
|
||||
unsigned length = dim * element_size;
|
||||
|
||||
|
@@ -415,7 +415,7 @@ get_src(struct etna_compile *c, nir_src *src)
|
||||
static bool
|
||||
vec_dest_has_swizzle(nir_alu_instr *vec, nir_def *ssa)
|
||||
{
|
||||
for (unsigned i = 0; i < nir_dest_num_components(vec->dest.dest); i++) {
|
||||
for (unsigned i = 0; i < vec->dest.dest.ssa.num_components; i++) {
|
||||
if (vec->src[i].src.ssa != ssa)
|
||||
continue;
|
||||
|
||||
@@ -718,7 +718,7 @@ insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
|
||||
|
||||
unsigned num_components = 1;
|
||||
|
||||
for (unsigned i = start_idx + 1; i < nir_dest_num_components(vec->dest.dest); i++) {
|
||||
for (unsigned i = start_idx + 1; i < vec->dest.dest.ssa.num_components; i++) {
|
||||
if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
|
||||
is_src_mod_neg(&vec->instr, i) == is_src_mod_neg(&vec->instr, start_idx) &&
|
||||
is_src_mod_abs(&vec->instr, i) == is_src_mod_neg(&vec->instr, start_idx)) {
|
||||
@@ -894,7 +894,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
|
||||
}
|
||||
|
||||
unsigned finished_write_mask = 0;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu->dest.dest); i++) {
|
||||
for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; i++) {
|
||||
nir_def *ssa = alu->src[i].src.ssa;
|
||||
|
||||
/* check that vecN instruction is only user of this */
|
||||
|
@@ -54,7 +54,7 @@ get_proj_index(nir_instr *coord_instr, nir_instr *proj_instr,
|
||||
if (intrin->intrinsic != nir_intrinsic_load_input)
|
||||
return NULL;
|
||||
|
||||
if (nir_dest_num_components(intrin->dest) != 4)
|
||||
if (intrin->dest.ssa.num_components != 4)
|
||||
return NULL;
|
||||
|
||||
/* Coords must be in .xyz */
|
||||
|
@@ -49,19 +49,19 @@ lima_nir_split_load_input_instr(nir_builder *b,
|
||||
uint8_t swizzle = alu->src[0].swizzle[0];
|
||||
int i;
|
||||
|
||||
for (i = 1; i < nir_dest_num_components(alu->dest.dest); i++)
|
||||
for (i = 1; i < alu->dest.dest.ssa.num_components; i++)
|
||||
if (alu->src[0].swizzle[i] != (swizzle + i))
|
||||
break;
|
||||
|
||||
if (i != nir_dest_num_components(alu->dest.dest))
|
||||
if (i != alu->dest.dest.ssa.num_components)
|
||||
return false;
|
||||
|
||||
/* mali4xx can't access unaligned vec3, don't split load input */
|
||||
if (nir_dest_num_components(alu->dest.dest) == 3 && swizzle > 0)
|
||||
if (alu->dest.dest.ssa.num_components == 3 && swizzle > 0)
|
||||
return false;
|
||||
|
||||
/* mali4xx can't access unaligned vec2, don't split load input */
|
||||
if (nir_dest_num_components(alu->dest.dest) == 2 &&
|
||||
if (alu->dest.dest.ssa.num_components == 2 &&
|
||||
swizzle != 0 && swizzle != 2)
|
||||
return false;
|
||||
|
||||
@@ -70,8 +70,8 @@ lima_nir_split_load_input_instr(nir_builder *b,
|
||||
b->shader,
|
||||
intrin->intrinsic);
|
||||
nir_def_init(&new_intrin->instr, &new_intrin->dest.ssa,
|
||||
nir_dest_num_components(alu->dest.dest), ssa->bit_size);
|
||||
new_intrin->num_components = nir_dest_num_components(alu->dest.dest);
|
||||
alu->dest.dest.ssa.num_components, ssa->bit_size);
|
||||
new_intrin->num_components = alu->dest.dest.ssa.num_components;
|
||||
nir_intrinsic_set_base(new_intrin, nir_intrinsic_base(intrin));
|
||||
nir_intrinsic_set_component(new_intrin, nir_intrinsic_component(intrin) + swizzle);
|
||||
nir_intrinsic_set_dest_type(new_intrin, nir_intrinsic_dest_type(intrin));
|
||||
|
@@ -196,7 +196,7 @@ lima_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
|
||||
return false;
|
||||
}
|
||||
|
||||
int num_components = nir_dest_num_components(alu->dest.dest);
|
||||
int num_components = alu->dest.dest.ssa.num_components;
|
||||
|
||||
uint8_t swizzle = alu->src[0].swizzle[0];
|
||||
|
||||
|
@@ -1903,7 +1903,7 @@ AluInstr::from_nir(nir_alu_instr *alu, Shader& shader)
|
||||
static Pin
|
||||
pin_for_components(const nir_alu_instr& alu)
|
||||
{
|
||||
return nir_dest_num_components(alu.dest.dest) == 1 ? pin_free : pin_none;
|
||||
return alu.dest.dest.ssa.num_components == 1 ? pin_free : pin_none;
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -1924,7 +1924,7 @@ emit_alu_op1_64bit(const nir_alu_instr& alu,
|
||||
swz[1] = 0;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(opcode,
|
||||
value_factory.dest(alu.dest.dest.ssa, 2 * i, pin_chan),
|
||||
value_factory.src64(alu.src[0], i, swz[0]),
|
||||
@@ -1950,7 +1950,7 @@ emit_alu_mov_64bit(const nir_alu_instr& alu, Shader& shader)
|
||||
|
||||
AluInstr *ir = nullptr;
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
for (unsigned c = 0; c < 2; ++c) {
|
||||
ir = new AluInstr(op1_mov,
|
||||
value_factory.dest(alu.dest.dest.ssa, 2 * i + c, pin_free),
|
||||
@@ -1971,7 +1971,7 @@ emit_alu_neg(const nir_alu_instr& alu, Shader& shader)
|
||||
|
||||
AluInstr *ir = nullptr;
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
for (unsigned c = 0; c < 2; ++c) {
|
||||
ir = new AluInstr(op1_mov,
|
||||
value_factory.dest(alu.dest.dest.ssa, 2 * i + c, pin_chan),
|
||||
@@ -1992,7 +1992,7 @@ emit_alu_abs64(const nir_alu_instr& alu, Shader& shader)
|
||||
{
|
||||
auto& value_factory = shader.value_factory();
|
||||
|
||||
assert(nir_dest_num_components(alu.dest.dest) == 1);
|
||||
assert(alu.dest.dest.ssa.num_components == 1);
|
||||
|
||||
shader.emit_instruction(new AluInstr(op1_mov,
|
||||
value_factory.dest(alu.dest.dest.ssa, 0, pin_chan),
|
||||
@@ -2044,7 +2044,7 @@ emit_alu_fsat64(const nir_alu_instr& alu, Shader& shader)
|
||||
{
|
||||
auto& value_factory = shader.value_factory();
|
||||
|
||||
assert(nir_dest_num_components(alu.dest.dest) == 1);
|
||||
assert(alu.dest.dest.ssa.num_components == 1);
|
||||
|
||||
if (try_propagat_fsat64(alu, shader)) {
|
||||
auto ir = new AluInstr(op1_mov,
|
||||
@@ -2100,9 +2100,9 @@ emit_alu_op2_64bit(const nir_alu_instr& alu,
|
||||
|
||||
int num_emit0 = opcode == op2_mul_64 ? 3 : 1;
|
||||
|
||||
assert(num_emit0 == 1 || nir_dest_num_components(alu.dest.dest) == 1);
|
||||
assert(num_emit0 == 1 || alu.dest.dest.ssa.num_components == 1);
|
||||
|
||||
for (unsigned k = 0; k < nir_dest_num_components(alu.dest.dest); ++k) {
|
||||
for (unsigned k = 0; k < alu.dest.dest.ssa.num_components; ++k) {
|
||||
int i = 0;
|
||||
for (; i < num_emit0; ++i) {
|
||||
auto dest = i < 2 ? value_factory.dest(alu.dest.dest.ssa, i, pin_chan)
|
||||
@@ -2149,7 +2149,7 @@ emit_alu_op2_64bit_one_dst(const nir_alu_instr& alu,
|
||||
|
||||
AluInstr::SrcValues src(4);
|
||||
|
||||
for (unsigned k = 0; k < nir_dest_num_components(alu.dest.dest); ++k) {
|
||||
for (unsigned k = 0; k < alu.dest.dest.ssa.num_components; ++k) {
|
||||
auto dest = value_factory.dest(alu.dest.dest.ssa, 2 * k, pin_chan);
|
||||
src[0] = value_factory.src64(alu.src[order[0]], k, 1);
|
||||
src[1] = value_factory.src64(alu.src[order[1]], k, 1);
|
||||
@@ -2224,7 +2224,7 @@ emit_alu_b2f64(const nir_alu_instr& alu, Shader& shader)
|
||||
auto group = new AluGroup();
|
||||
AluInstr *ir = nullptr;
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(op2_and_int,
|
||||
value_factory.dest(alu.dest.dest.ssa, 2 * i, pin_group),
|
||||
value_factory.src(alu.src[0], i),
|
||||
@@ -2253,7 +2253,7 @@ emit_alu_i2f64(const nir_alu_instr& alu, EAluOp op, Shader& shader)
|
||||
auto group = new AluGroup();
|
||||
AluInstr *ir = nullptr;
|
||||
|
||||
assert(nir_dest_num_components(alu.dest.dest) == 1);
|
||||
assert(alu.dest.dest.ssa.num_components == 1);
|
||||
|
||||
auto tmpx = value_factory.temp_register();
|
||||
shader.emit_instruction(new AluInstr(op2_and_int,
|
||||
@@ -2315,7 +2315,7 @@ emit_alu_f2f64(const nir_alu_instr& alu, Shader& shader)
|
||||
auto group = new AluGroup();
|
||||
AluInstr *ir = nullptr;
|
||||
|
||||
assert(nir_dest_num_components(alu.dest.dest) == 1);
|
||||
assert(alu.dest.dest.ssa.num_components == 1);
|
||||
|
||||
ir = new AluInstr(op1_flt32_to_flt64,
|
||||
value_factory.dest(alu.dest.dest.ssa, 0, pin_chan),
|
||||
@@ -2359,7 +2359,7 @@ emit_alu_b2x(const nir_alu_instr& alu, AluInlineConstants mask, Shader& shader)
|
||||
AluInstr *ir = nullptr;
|
||||
auto pin = pin_for_components(alu);
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
auto src = value_factory.src(alu.src[0], i);
|
||||
ir = new AluInstr(op2_and_int,
|
||||
value_factory.dest(alu.dest.dest.ssa, i, pin),
|
||||
@@ -2384,7 +2384,7 @@ emit_alu_op1(const nir_alu_instr& alu,
|
||||
AluInstr *ir = nullptr;
|
||||
auto pin = pin_for_components(alu);
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(opcode,
|
||||
value_factory.dest(alu.dest.dest.ssa, i, pin),
|
||||
value_factory.src(alu.src[0], i),
|
||||
@@ -2426,7 +2426,7 @@ emit_alu_op2(const nir_alu_instr& alu,
|
||||
|
||||
auto pin = pin_for_components(alu);
|
||||
AluInstr *ir = nullptr;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(opcode,
|
||||
value_factory.dest(alu.dest.dest.ssa, i, pin),
|
||||
value_factory.src(*src0, i),
|
||||
@@ -2464,7 +2464,7 @@ emit_alu_op3(const nir_alu_instr& alu,
|
||||
|
||||
auto pin = pin_for_components(alu);
|
||||
AluInstr *ir = nullptr;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(opcode,
|
||||
value_factory.dest(alu.dest.dest.ssa, i, pin),
|
||||
value_factory.src(*src[0], i),
|
||||
@@ -2719,7 +2719,7 @@ emit_alu_comb_with_zero(const nir_alu_instr& alu, EAluOp opcode, Shader& shader)
|
||||
auto& value_factory = shader.value_factory();
|
||||
AluInstr *ir = nullptr;
|
||||
auto pin = pin_for_components(alu);
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(opcode,
|
||||
value_factory.dest(alu.dest.dest.ssa, i, pin),
|
||||
value_factory.zero(),
|
||||
@@ -2878,7 +2878,7 @@ emit_alu_trans_op1_eg(const nir_alu_instr& alu, EAluOp opcode, Shader& shader)
|
||||
AluInstr *ir = nullptr;
|
||||
auto pin = pin_for_components(alu);
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(opcode,
|
||||
value_factory.dest(alu.dest.dest.ssa, i, pin),
|
||||
value_factory.src(src0, i),
|
||||
@@ -2898,7 +2898,7 @@ emit_alu_f2i32_or_u32_eg(const nir_alu_instr& alu, EAluOp opcode, Shader& shader
|
||||
|
||||
PRegister reg[4];
|
||||
|
||||
int num_comp = nir_dest_num_components(alu.dest.dest);
|
||||
int num_comp = alu.dest.dest.ssa.num_components;
|
||||
|
||||
for (int i = 0; i < num_comp; ++i) {
|
||||
reg[i] = value_factory.temp_register();
|
||||
@@ -2935,7 +2935,7 @@ emit_alu_trans_op1_cayman(const nir_alu_instr& alu, EAluOp opcode, Shader& shade
|
||||
|
||||
const std::set<AluModifiers> flags({alu_write, alu_last_instr, alu_is_cayman_trans});
|
||||
|
||||
for (unsigned j = 0; j < nir_dest_num_components(alu.dest.dest); ++j) {
|
||||
for (unsigned j = 0; j < alu.dest.dest.ssa.num_components; ++j) {
|
||||
unsigned ncomp = j == 3 ? 4 : 3;
|
||||
|
||||
AluInstr::SrcValues srcs(ncomp);
|
||||
@@ -2961,7 +2961,7 @@ emit_alu_trans_op2_eg(const nir_alu_instr& alu, EAluOp opcode, Shader& shader)
|
||||
AluInstr *ir = nullptr;
|
||||
|
||||
auto pin = pin_for_components(alu);
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu.dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu.dest.dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(opcode,
|
||||
value_factory.dest(alu.dest.dest.ssa, i, pin),
|
||||
value_factory.src(src0, i),
|
||||
@@ -2985,7 +2985,7 @@ emit_alu_trans_op2_cayman(const nir_alu_instr& alu, EAluOp opcode, Shader& shade
|
||||
|
||||
const std::set<AluModifiers> flags({alu_write, alu_last_instr, alu_is_cayman_trans});
|
||||
|
||||
for (unsigned k = 0; k < nir_dest_num_components(alu.dest.dest); ++k) {
|
||||
for (unsigned k = 0; k < alu.dest.dest.ssa.num_components; ++k) {
|
||||
AluInstr::SrcValues srcs(2 * last_slot);
|
||||
PRegister dest = value_factory.dest(alu.dest.dest.ssa, k, pin_free);
|
||||
|
||||
@@ -3006,7 +3006,7 @@ emit_tex_fdd(const nir_alu_instr& alu, TexInstr::Opcode opcode, bool fine, Shade
|
||||
{
|
||||
auto& value_factory = shader.value_factory();
|
||||
|
||||
int ncomp = nir_dest_num_components(alu.dest.dest);
|
||||
int ncomp = alu.dest.dest.ssa.num_components;
|
||||
RegisterVec4::Swizzle src_swz = {7, 7, 7, 7};
|
||||
RegisterVec4::Swizzle tmp_swz = {7, 7, 7, 7};
|
||||
for (auto i = 0; i < ncomp; ++i) {
|
||||
|
@@ -553,7 +553,7 @@ RatInstr::emit_ssbo_load(nir_intrinsic_instr *intr, Shader& shader)
|
||||
{0, 1, 2, 3}
|
||||
};
|
||||
|
||||
int comp_idx = nir_dest_num_components(intr->dest) - 1;
|
||||
int comp_idx = intr->dest.ssa.num_components - 1;
|
||||
|
||||
auto [offset, res_offset] = shader.evaluate_resource_offset(intr, 0);
|
||||
{
|
||||
@@ -910,7 +910,7 @@ RatInstr::emit_image_size(nir_intrinsic_instr *intrin, Shader& shader)
|
||||
|
||||
if (nir_intrinsic_image_dim(intrin) == GLSL_SAMPLER_DIM_CUBE &&
|
||||
nir_intrinsic_image_array(intrin) &&
|
||||
nir_dest_num_components(intrin->dest) > 2) {
|
||||
intrin->dest.ssa.num_components > 2) {
|
||||
/* Need to load the layers from a const buffer */
|
||||
|
||||
auto dest = vf.dest_vec4(intrin->dest.ssa, pin_group);
|
||||
|
@@ -521,7 +521,7 @@ r600_lower_shared_io_impl(nir_function_impl *impl)
|
||||
if (op->intrinsic == nir_intrinsic_load_shared) {
|
||||
nir_def *addr = op->src[0].ssa;
|
||||
|
||||
switch (nir_dest_num_components(op->dest)) {
|
||||
switch (op->dest.ssa.num_components) {
|
||||
case 2: {
|
||||
auto addr2 = nir_iadd_imm(&b, addr, 4);
|
||||
addr = nir_vec2(&b, addr, addr2);
|
||||
@@ -541,7 +541,7 @@ r600_lower_shared_io_impl(nir_function_impl *impl)
|
||||
|
||||
auto load =
|
||||
nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_local_shared_r600);
|
||||
load->num_components = nir_dest_num_components(op->dest);
|
||||
load->num_components = op->dest.ssa.num_components;
|
||||
load->src[0] = nir_src_for_ssa(addr);
|
||||
nir_def_init(&load->instr, &load->dest.ssa, load->num_components,
|
||||
32);
|
||||
|
@@ -46,7 +46,7 @@ r600_legalize_image_load_store_impl(nir_builder *b,
|
||||
|
||||
if (load_value)
|
||||
default_value =
|
||||
nir_imm_zero(b, nir_dest_num_components(ir->dest), ir->dest.ssa.bit_size);
|
||||
nir_imm_zero(b, ir->dest.ssa.num_components, ir->dest.ssa.bit_size);
|
||||
|
||||
auto image_exists =
|
||||
nir_ult_imm(b, ir->src[0].ssa, b->shader->info.num_images);
|
||||
|
@@ -119,7 +119,7 @@ nir_def *
|
||||
LowerLoad64Uniform::lower(nir_instr *instr)
|
||||
{
|
||||
auto intr = nir_instr_as_intrinsic(instr);
|
||||
int old_components = nir_dest_num_components(intr->dest);
|
||||
int old_components = intr->dest.ssa.num_components;
|
||||
assert(old_components <= 2);
|
||||
intr->dest.ssa.num_components *= 2;
|
||||
intr->dest.ssa.bit_size = 32;
|
||||
@@ -170,7 +170,7 @@ class LowerSplit64op : public NirLowerInstruction {
|
||||
}
|
||||
case nir_instr_type_phi: {
|
||||
auto phi = nir_instr_as_phi(instr);
|
||||
return nir_dest_num_components(phi->dest) == 64;
|
||||
return phi->dest.ssa.num_components == 64;
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
@@ -287,7 +287,7 @@ LowerSplit64BitVar::filter(const nir_instr *instr) const
|
||||
case nir_intrinsic_load_ssbo:
|
||||
if (intr->dest.ssa.bit_size != 64)
|
||||
return false;
|
||||
return nir_dest_num_components(intr->dest) >= 3;
|
||||
return intr->dest.ssa.num_components >= 3;
|
||||
case nir_intrinsic_store_output:
|
||||
if (nir_src_bit_size(intr->src[0]) != 64)
|
||||
return false;
|
||||
@@ -304,7 +304,7 @@ LowerSplit64BitVar::filter(const nir_instr *instr) const
|
||||
auto alu = nir_instr_as_alu(instr);
|
||||
switch (alu->op) {
|
||||
case nir_op_bcsel:
|
||||
if (nir_dest_num_components(alu->dest.dest) < 3)
|
||||
if (alu->dest.dest.ssa.num_components < 3)
|
||||
return false;
|
||||
return alu->dest.dest.ssa.bit_size == 64;
|
||||
case nir_op_bany_fnequal3:
|
||||
@@ -532,7 +532,7 @@ LowerSplit64BitVar::get_var_pair(nir_variable *old_var)
|
||||
nir_def *
|
||||
LowerSplit64BitVar::split_double_load(nir_intrinsic_instr *load1)
|
||||
{
|
||||
unsigned old_components = nir_dest_num_components(load1->dest);
|
||||
unsigned old_components = load1->dest.ssa.num_components;
|
||||
auto load2 = nir_instr_as_intrinsic(nir_instr_clone(b->shader, &load1->instr));
|
||||
nir_io_semantics sem = nir_intrinsic_io_semantics(load1);
|
||||
|
||||
@@ -580,7 +580,7 @@ LowerSplit64BitVar::split_store_output(nir_intrinsic_instr *store1)
|
||||
nir_def *
|
||||
LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr)
|
||||
{
|
||||
unsigned second_components = nir_dest_num_components(intr->dest) - 2;
|
||||
unsigned second_components = intr->dest.ssa.num_components - 2;
|
||||
nir_intrinsic_instr *load2 =
|
||||
nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
|
||||
load2->src[0] = nir_src_for_ssa(nir_iadd_imm(b, intr->src[0].ssa, 1));
|
||||
@@ -610,7 +610,7 @@ LowerSplit64BitVar::split_double_load_uniform(nir_intrinsic_instr *intr)
|
||||
nir_def *
|
||||
LowerSplit64BitVar::split_double_load_ssbo(nir_intrinsic_instr *intr)
|
||||
{
|
||||
unsigned second_components = nir_dest_num_components(intr->dest) - 2;
|
||||
unsigned second_components = intr->dest.ssa.num_components - 2;
|
||||
nir_intrinsic_instr *load2 =
|
||||
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
|
||||
|
||||
@@ -630,7 +630,7 @@ LowerSplit64BitVar::split_double_load_ssbo(nir_intrinsic_instr *intr)
|
||||
nir_def *
|
||||
LowerSplit64BitVar::split_double_load_ubo(nir_intrinsic_instr *intr)
|
||||
{
|
||||
unsigned second_components = nir_dest_num_components(intr->dest) - 2;
|
||||
unsigned second_components = intr->dest.ssa.num_components - 2;
|
||||
nir_intrinsic_instr *load2 =
|
||||
nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intr->instr));
|
||||
load2->src[0] = intr->src[0];
|
||||
@@ -700,13 +700,13 @@ nir_def *
|
||||
LowerSplit64BitVar::split_bcsel(nir_alu_instr *alu)
|
||||
{
|
||||
static nir_def *dest[4];
|
||||
for (unsigned i = 0; i < nir_dest_num_components(alu->dest.dest); ++i) {
|
||||
for (unsigned i = 0; i < alu->dest.dest.ssa.num_components; ++i) {
|
||||
dest[i] = nir_bcsel(b,
|
||||
nir_channel(b, alu->src[0].src.ssa, i),
|
||||
nir_channel(b, alu->src[1].src.ssa, i),
|
||||
nir_channel(b, alu->src[2].src.ssa, i));
|
||||
}
|
||||
return nir_vec(b, dest, nir_dest_num_components(alu->dest.dest));
|
||||
return nir_vec(b, dest, alu->dest.dest.ssa.num_components);
|
||||
}
|
||||
|
||||
nir_def *
|
||||
|
@@ -213,7 +213,7 @@ static uint32_t
|
||||
get_dest_usee_mask(nir_intrinsic_instr *op)
|
||||
{
|
||||
MaskQuery mq = {0};
|
||||
mq.full_mask = (1 << nir_dest_num_components(op->dest)) - 1;
|
||||
mq.full_mask = (1 << op->dest.ssa.num_components) - 1;
|
||||
|
||||
nir_foreach_use(use_src, &op->dest.ssa)
|
||||
{
|
||||
@@ -264,7 +264,7 @@ replace_load_instr(nir_builder *b, nir_intrinsic_instr *op, nir_def *addr)
|
||||
auto new_load = nir_load_local_shared_r600(b, 32, addr_outer);
|
||||
|
||||
auto undef = nir_undef(b, 1, 32);
|
||||
int comps = nir_dest_num_components(op->dest);
|
||||
int comps = op->dest.ssa.num_components;
|
||||
nir_def *remix[4] = {undef, undef, undef, undef};
|
||||
|
||||
int chan = 0;
|
||||
|
@@ -1538,7 +1538,7 @@ Shader::load_ubo(nir_intrinsic_instr *instr)
|
||||
RegisterVec4::Swizzle dest_swz{7, 7, 7, 7};
|
||||
auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group);
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(instr->dest); ++i) {
|
||||
for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
|
||||
dest_swz[i] = i + nir_intrinsic_component(instr);
|
||||
}
|
||||
|
||||
@@ -1560,10 +1560,10 @@ Shader::load_ubo(nir_intrinsic_instr *instr)
|
||||
int buf_cmp = nir_intrinsic_component(instr);
|
||||
|
||||
AluInstr *ir = nullptr;
|
||||
auto pin = nir_dest_num_components(instr->dest) == 1
|
||||
auto pin = instr->dest.ssa.num_components == 1
|
||||
? pin_free
|
||||
: pin_none;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(instr->dest); ++i) {
|
||||
for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
|
||||
|
||||
sfn_log << SfnLog::io << "UBO[" << bufid << "] " << instr->dest.ssa.index
|
||||
<< " const[" << i << "]: " << instr->const_index[i] << "\n";
|
||||
@@ -1584,7 +1584,7 @@ Shader::load_ubo(nir_intrinsic_instr *instr)
|
||||
AluInstr *ir = nullptr;
|
||||
auto kc_id = value_factory().src(instr->src[0], 0);
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(instr->dest); ++i) {
|
||||
for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
|
||||
int cmp = buf_cmp + i;
|
||||
auto u =
|
||||
new UniformValue(512 + buf_offset->u32, cmp, kc_id, nir_intrinsic_base(instr));
|
||||
|
@@ -78,7 +78,7 @@ FragmentShader::load_input(nir_intrinsic_instr *intr)
|
||||
auto location = nir_intrinsic_io_semantics(intr).location;
|
||||
if (location == VARYING_SLOT_POS) {
|
||||
AluInstr *ir = nullptr;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
|
||||
ir = new AluInstr(op1_mov,
|
||||
vf.dest(intr->dest.ssa, i, pin_none),
|
||||
m_pos_input[i],
|
||||
@@ -200,7 +200,7 @@ FragmentShader::load_interpolated_input(nir_intrinsic_instr *intr)
|
||||
unsigned loc = nir_intrinsic_io_semantics(intr).location;
|
||||
switch (loc) {
|
||||
case VARYING_SLOT_POS:
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i)
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i)
|
||||
vf.inject_value(intr->dest.ssa, i, m_pos_input[i]);
|
||||
return true;
|
||||
case VARYING_SLOT_FACE:
|
||||
@@ -684,7 +684,7 @@ FragmentShaderR600::load_input_hw(nir_intrinsic_instr *intr)
|
||||
{
|
||||
auto& vf = value_factory();
|
||||
AluInstr *ir = nullptr;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
|
||||
sfn_log << SfnLog::io << "Inject register "
|
||||
<< *m_interpolated_inputs[nir_intrinsic_base(intr)][i] << "\n";
|
||||
unsigned index = nir_intrinsic_component(intr) + i;
|
||||
@@ -726,7 +726,7 @@ FragmentShaderEG::load_input_hw(nir_intrinsic_instr *intr)
|
||||
|
||||
bool need_temp = comp > 0;
|
||||
AluInstr *ir = nullptr;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
|
||||
if (need_temp) {
|
||||
auto tmp = vf.temp_register(comp + i);
|
||||
ir =
|
||||
@@ -806,7 +806,7 @@ FragmentShaderEG::load_interpolated_input_hw(nir_intrinsic_instr *intr)
|
||||
ASSERTED auto param = nir_src_as_const_value(intr->src[1]);
|
||||
assert(param && "Indirect PS inputs not (yet) supported");
|
||||
|
||||
int dest_num_comp = nir_dest_num_components(intr->dest);
|
||||
int dest_num_comp = intr->dest.ssa.num_components;
|
||||
int start_comp = nir_intrinsic_component(intr);
|
||||
bool need_temp = start_comp > 0;
|
||||
|
||||
@@ -823,7 +823,7 @@ FragmentShaderEG::load_interpolated_input_hw(nir_intrinsic_instr *intr)
|
||||
|
||||
if (need_temp) {
|
||||
AluInstr *ir = nullptr;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
|
||||
auto real_dst = vf.dest(intr->dest.ssa, i, pin_chan);
|
||||
ir = new AluInstr(op1_mov, real_dst, dst[i + start_comp], AluInstr::write);
|
||||
emit_instruction(ir);
|
||||
|
@@ -323,7 +323,7 @@ GeometryShader::emit_load_per_vertex_input(nir_intrinsic_instr *instr)
|
||||
auto dest = value_factory().dest_vec4(instr->dest.ssa, pin_group);
|
||||
|
||||
RegisterVec4::Swizzle dest_swz{7, 7, 7, 7};
|
||||
for (unsigned i = 0; i < nir_dest_num_components(instr->dest); ++i) {
|
||||
for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
|
||||
dest_swz[i] = i + nir_intrinsic_component(instr);
|
||||
}
|
||||
|
||||
|
@@ -502,7 +502,7 @@ VertexShader::load_input(nir_intrinsic_instr *intr)
|
||||
|
||||
AluInstr *ir = nullptr;
|
||||
if (location < VERT_ATTRIB_MAX) {
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
|
||||
auto src = vf.allocate_pinned_register(driver_location + 1, i);
|
||||
src->set_flag(Register::ssa);
|
||||
vf.inject_value(intr->dest.ssa, i, src);
|
||||
|
@@ -1085,7 +1085,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
|
||||
if (instr->op == nir_op_unpack_unorm_4x8) {
|
||||
struct qreg src = ntq_get_src(c, instr->src[0].src,
|
||||
instr->src[0].swizzle[0]);
|
||||
unsigned count = nir_dest_num_components(instr->dest.dest);
|
||||
unsigned count = instr->dest.dest.ssa.num_components;
|
||||
for (int i = 0; i < count; i++) {
|
||||
ntq_store_def(c, &instr->dest.dest.ssa, i,
|
||||
qir_UNPACK_8_F(c, src, i));
|
||||
|
@@ -2159,7 +2159,7 @@ emit_alu(struct ntv_context *ctx, nir_alu_instr *alu)
|
||||
}
|
||||
|
||||
unsigned bit_size = alu->dest.dest.ssa.bit_size;
|
||||
unsigned num_components = nir_dest_num_components(alu->dest.dest);
|
||||
unsigned num_components = alu->dest.dest.ssa.num_components;
|
||||
nir_alu_type atype = bit_size == 1 ?
|
||||
nir_type_bool :
|
||||
(alu_op_is_typeless(alu->op) ? typeless_type : nir_op_infos[alu->op].output_type);
|
||||
@@ -2712,7 +2712,7 @@ static void
|
||||
emit_load_shared(struct ntv_context *ctx, nir_intrinsic_instr *intr)
|
||||
{
|
||||
SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
|
||||
unsigned num_components = nir_dest_num_components(intr->dest);
|
||||
unsigned num_components = intr->dest.ssa.num_components;
|
||||
unsigned bit_size = intr->dest.ssa.bit_size;
|
||||
SpvId uint_type = get_uvec_type(ctx, bit_size, 1);
|
||||
SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
|
||||
@@ -2774,7 +2774,7 @@ static void
|
||||
emit_load_scratch(struct ntv_context *ctx, nir_intrinsic_instr *intr)
|
||||
{
|
||||
SpvId dest_type = get_def_type(ctx, &intr->dest.ssa, nir_type_uint);
|
||||
unsigned num_components = nir_dest_num_components(intr->dest);
|
||||
unsigned num_components = intr->dest.ssa.num_components;
|
||||
unsigned bit_size = intr->dest.ssa.bit_size;
|
||||
SpvId uint_type = get_uvec_type(ctx, bit_size, 1);
|
||||
SpvId ptr_type = spirv_builder_type_pointer(&ctx->builder,
|
||||
@@ -2839,7 +2839,7 @@ emit_load_push_const(struct ntv_context *ctx, nir_intrinsic_instr *intr)
|
||||
SpvId load_type = get_uvec_type(ctx, 32, 1);
|
||||
|
||||
/* number of components being loaded */
|
||||
unsigned num_components = nir_dest_num_components(intr->dest);
|
||||
unsigned num_components = intr->dest.ssa.num_components;
|
||||
SpvId constituents[NIR_MAX_VEC_COMPONENTS * 2];
|
||||
SpvId result;
|
||||
|
||||
@@ -3002,7 +3002,7 @@ emit_load_front_face(struct ntv_context *ctx, nir_intrinsic_instr *intr)
|
||||
|
||||
SpvId result = spirv_builder_emit_load(&ctx->builder, var_type,
|
||||
ctx->front_face_var);
|
||||
assert(1 == nir_dest_num_components(intr->dest));
|
||||
assert(1 == intr->dest.ssa.num_components);
|
||||
store_def(ctx, &intr->dest.ssa, result, nir_type_bool);
|
||||
}
|
||||
|
||||
@@ -3033,7 +3033,7 @@ emit_load_uint_input(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId *
|
||||
}
|
||||
|
||||
SpvId result = spirv_builder_emit_load(&ctx->builder, var_type, load_var);
|
||||
assert(1 == nir_dest_num_components(intr->dest));
|
||||
assert(1 == intr->dest.ssa.num_components);
|
||||
store_def(ctx, &intr->dest.ssa, result, nir_type_uint);
|
||||
}
|
||||
|
||||
@@ -3044,19 +3044,19 @@ emit_load_vec_input(struct ntv_context *ctx, nir_intrinsic_instr *intr, SpvId *v
|
||||
|
||||
switch (type) {
|
||||
case nir_type_bool:
|
||||
var_type = get_bvec_type(ctx, nir_dest_num_components(intr->dest));
|
||||
var_type = get_bvec_type(ctx, intr->dest.ssa.num_components);
|
||||
break;
|
||||
case nir_type_int:
|
||||
var_type = get_ivec_type(ctx, intr->dest.ssa.bit_size,
|
||||
nir_dest_num_components(intr->dest));
|
||||
intr->dest.ssa.num_components);
|
||||
break;
|
||||
case nir_type_uint:
|
||||
var_type = get_uvec_type(ctx, intr->dest.ssa.bit_size,
|
||||
nir_dest_num_components(intr->dest));
|
||||
intr->dest.ssa.num_components);
|
||||
break;
|
||||
case nir_type_float:
|
||||
var_type = get_fvec_type(ctx, intr->dest.ssa.bit_size,
|
||||
nir_dest_num_components(intr->dest));
|
||||
intr->dest.ssa.num_components);
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown type passed");
|
||||
@@ -3314,7 +3314,8 @@ emit_image_deref_load(struct ntv_context *ctx, nir_intrinsic_instr *intr)
|
||||
bool use_sample = glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS ||
|
||||
glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_SUBPASS_MS;
|
||||
SpvId sample = use_sample ? get_src(ctx, &intr->src[2], &atype) : 0;
|
||||
SpvId dest_type = spirv_builder_type_vector(&ctx->builder, base_type, nir_dest_num_components(intr->dest));
|
||||
SpvId dest_type = spirv_builder_type_vector(&ctx->builder, base_type,
|
||||
intr->dest.ssa.num_components);
|
||||
SpvId result = spirv_builder_emit_image_read(&ctx->builder,
|
||||
dest_type,
|
||||
img, coord, 0, sample, 0, sparse);
|
||||
@@ -4116,7 +4117,7 @@ emit_tex(struct ntv_context *ctx, nir_tex_instr *tex)
|
||||
return;
|
||||
}
|
||||
SpvId actual_dest_type;
|
||||
unsigned num_components = nir_dest_num_components(tex->dest);
|
||||
unsigned num_components = tex->dest.ssa.num_components;
|
||||
switch (nir_alu_type_get_base_type(tex->dest_type)) {
|
||||
case nir_type_int:
|
||||
actual_dest_type = get_ivec_type(ctx, 32, num_components);
|
||||
|
@@ -2322,7 +2322,7 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
|
||||
|
||||
/* generate new atomic deref ops for every component */
|
||||
nir_def *result[4];
|
||||
unsigned num_components = nir_dest_num_components(intr->dest);
|
||||
unsigned num_components = intr->dest.ssa.num_components;
|
||||
for (unsigned i = 0; i < num_components; i++) {
|
||||
nir_deref_instr *deref_arr = nir_build_deref_array(b, deref_struct, offset);
|
||||
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(b->shader, op);
|
||||
@@ -2642,7 +2642,7 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data)
|
||||
if (deref_var != var)
|
||||
return false;
|
||||
b->cursor = nir_before_instr(instr);
|
||||
nir_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest),
|
||||
nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components,
|
||||
intr->dest.ssa.bit_size);
|
||||
if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
|
||||
switch (var->data.location) {
|
||||
@@ -2651,7 +2651,7 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data)
|
||||
case VARYING_SLOT_BFC0:
|
||||
case VARYING_SLOT_BFC1:
|
||||
/* default color is 0,0,0,1 */
|
||||
if (nir_dest_num_components(intr->dest) == 4)
|
||||
if (intr->dest.ssa.num_components == 4)
|
||||
zero = nir_vector_insert_imm(b, zero, nir_imm_float(b, 1.0), 3);
|
||||
break;
|
||||
default:
|
||||
@@ -3314,7 +3314,7 @@ rewrite_tex_dest(nir_builder *b, nir_tex_instr *tex, nir_variable *var, struct z
|
||||
unsigned bit_size = glsl_base_type_get_bit_size(ret_type);
|
||||
unsigned dest_size = tex->dest.ssa.bit_size;
|
||||
b->cursor = nir_after_instr(&tex->instr);
|
||||
unsigned num_components = nir_dest_num_components(tex->dest);
|
||||
unsigned num_components = tex->dest.ssa.num_components;
|
||||
bool rewrite_depth = tex->is_shadow && num_components > 1 && tex->op != nir_texop_tg4 && !tex->is_sparse;
|
||||
if (bit_size == dest_size && !rewrite_depth)
|
||||
return NULL;
|
||||
@@ -3395,7 +3395,7 @@ lower_zs_swizzle_tex_instr(nir_builder *b, nir_instr *instr, void *data)
|
||||
const struct glsl_type *type = glsl_without_array(var->type);
|
||||
enum glsl_base_type ret_type = glsl_get_sampler_result_type(type);
|
||||
bool is_int = glsl_base_type_is_integer(ret_type);
|
||||
unsigned num_components = nir_dest_num_components(tex->dest);
|
||||
unsigned num_components = tex->dest.ssa.num_components;
|
||||
if (tex->is_shadow)
|
||||
tex->is_new_style_shadow = true;
|
||||
nir_def *dest = rewrite_tex_dest(b, tex, var, NULL);
|
||||
@@ -4509,7 +4509,7 @@ split_bitfields_instr(nir_builder *b, nir_instr *in, void *data)
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
unsigned num_components = nir_dest_num_components(alu->dest.dest);
|
||||
unsigned num_components = alu->dest.dest.ssa.num_components;
|
||||
if (num_components == 1)
|
||||
return false;
|
||||
b->cursor = nir_before_instr(in);
|
||||
|
@@ -447,7 +447,7 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
|
||||
|
||||
b->cursor = nir_before_instr(&tex->instr);
|
||||
nir_def_init(&txl->instr, &txl->dest.ssa,
|
||||
nir_dest_num_components(tex->dest),
|
||||
tex->dest.ssa.num_components,
|
||||
tex->dest.ssa.bit_size);
|
||||
nir_builder_instr_insert(b, &txl->instr);
|
||||
nir_def_rewrite_uses(&tex->dest.ssa, &txl->dest.ssa);
|
||||
|
@@ -144,7 +144,7 @@ static void trans_nir_intrinsic_load_input_fs(rogue_builder *b,
|
||||
{
|
||||
struct rogue_fs_build_data *fs_data = &b->shader->ctx->stage_data.fs;
|
||||
|
||||
unsigned load_size = nir_dest_num_components(intr->dest);
|
||||
unsigned load_size = intr->dest.ssa.num_components;
|
||||
assert(load_size == 1); /* TODO: We can support larger load sizes. */
|
||||
|
||||
rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index);
|
||||
@@ -178,7 +178,7 @@ static void trans_nir_intrinsic_load_input_vs(rogue_builder *b,
|
||||
struct pvr_pipeline_layout *pipeline_layout =
|
||||
b->shader->ctx->pipeline_layout;
|
||||
|
||||
ASSERTED unsigned load_size = nir_dest_num_components(intr->dest);
|
||||
ASSERTED unsigned load_size = intr->dest.ssa.num_components;
|
||||
assert(load_size == 1); /* TODO: We can support larger load sizes. */
|
||||
|
||||
rogue_reg *dst = rogue_ssa_reg(b->shader, intr->dest.ssa.index);
|
||||
|
@@ -3783,14 +3783,14 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
|
||||
assert(nir_intrinsic_align(instr) > 0);
|
||||
if (bit_size == 32 &&
|
||||
nir_intrinsic_align(instr) >= 4) {
|
||||
assert(nir_dest_num_components(instr->dest) <= 4);
|
||||
assert(instr->dest.ssa.num_components <= 4);
|
||||
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
||||
fs_inst *inst =
|
||||
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
|
||||
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
||||
inst->size_written = instr->num_components * dispatch_width * 4;
|
||||
} else {
|
||||
assert(nir_dest_num_components(instr->dest) == 1);
|
||||
assert(instr->dest.ssa.num_components == 1);
|
||||
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
||||
|
||||
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
||||
@@ -5031,7 +5031,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
|
||||
|
||||
if (instr->dest.ssa.bit_size == 32 &&
|
||||
nir_intrinsic_align(instr) >= 4) {
|
||||
assert(nir_dest_num_components(instr->dest) <= 4);
|
||||
assert(instr->dest.ssa.num_components <= 4);
|
||||
|
||||
srcs[A64_LOGICAL_ARG] = brw_imm_ud(instr->num_components);
|
||||
|
||||
@@ -5042,7 +5042,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
|
||||
inst->dst.component_size(inst->exec_size);
|
||||
} else {
|
||||
const unsigned bit_size = instr->dest.ssa.bit_size;
|
||||
assert(nir_dest_num_components(instr->dest) == 1);
|
||||
assert(instr->dest.ssa.num_components == 1);
|
||||
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
||||
|
||||
srcs[A64_LOGICAL_ARG] = brw_imm_ud(bit_size);
|
||||
@@ -5222,14 +5222,14 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
|
||||
assert(nir_intrinsic_align(instr) > 0);
|
||||
if (bit_size == 32 &&
|
||||
nir_intrinsic_align(instr) >= 4) {
|
||||
assert(nir_dest_num_components(instr->dest) <= 4);
|
||||
assert(instr->dest.ssa.num_components <= 4);
|
||||
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
|
||||
fs_inst *inst =
|
||||
bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
|
||||
dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
|
||||
inst->size_written = instr->num_components * dispatch_width * 4;
|
||||
} else {
|
||||
assert(nir_dest_num_components(instr->dest) == 1);
|
||||
assert(instr->dest.ssa.num_components == 1);
|
||||
srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
|
||||
|
||||
fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
|
||||
@@ -5431,7 +5431,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
|
||||
case nir_intrinsic_load_scratch: {
|
||||
assert(devinfo->ver >= 7);
|
||||
|
||||
assert(nir_dest_num_components(instr->dest) == 1);
|
||||
assert(instr->dest.ssa.num_components == 1);
|
||||
const unsigned bit_size = instr->dest.ssa.bit_size;
|
||||
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
|
||||
|
||||
@@ -5458,7 +5458,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
|
||||
dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
|
||||
|
||||
/* Read the vector */
|
||||
assert(nir_dest_num_components(instr->dest) == 1);
|
||||
assert(instr->dest.ssa.num_components == 1);
|
||||
assert(bit_size <= 32);
|
||||
assert(nir_intrinsic_align(instr) > 0);
|
||||
if (bit_size == 32 &&
|
||||
|
@@ -55,7 +55,7 @@ brw_nir_lower_load_uniforms_impl(nir_builder *b, nir_instr *instr,
|
||||
/* Read the first few 32-bit scalars from InlineData. */
|
||||
if (nir_src_is_const(intrin->src[0]) &&
|
||||
intrin->dest.ssa.bit_size == 32 &&
|
||||
nir_dest_num_components(intrin->dest) == 1) {
|
||||
intrin->dest.ssa.num_components == 1) {
|
||||
unsigned off = nir_intrinsic_base(intrin) + nir_src_as_uint(intrin->src[0]);
|
||||
unsigned off_dw = off / 4;
|
||||
if (off % 4 == 0 && off_dw < BRW_TASK_MESH_PUSH_CONSTANTS_SIZE_DW) {
|
||||
@@ -1779,7 +1779,7 @@ emit_urb_direct_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
|
||||
{
|
||||
assert(instr->dest.ssa.bit_size == 32);
|
||||
|
||||
unsigned comps = nir_dest_num_components(instr->dest);
|
||||
unsigned comps = instr->dest.ssa.num_components;
|
||||
if (comps == 0)
|
||||
return;
|
||||
|
||||
@@ -1821,7 +1821,7 @@ emit_urb_indirect_reads(const fs_builder &bld, nir_intrinsic_instr *instr,
|
||||
{
|
||||
assert(instr->dest.ssa.bit_size == 32);
|
||||
|
||||
unsigned comps = nir_dest_num_components(instr->dest);
|
||||
unsigned comps = instr->dest.ssa.num_components;
|
||||
if (comps == 0)
|
||||
return;
|
||||
|
||||
|
@@ -58,7 +58,7 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b,
|
||||
/* Without the LSC, we can only do block loads of at least 4dwords (1
|
||||
* oword).
|
||||
*/
|
||||
if (!devinfo->has_lsc && nir_dest_num_components(intrin->dest) < 4)
|
||||
if (!devinfo->has_lsc && intrin->dest.ssa.num_components < 4)
|
||||
return false;
|
||||
|
||||
intrin->intrinsic =
|
||||
@@ -91,7 +91,7 @@ brw_nir_blockify_uniform_loads_instr(nir_builder *b,
|
||||
/* Without the LSC, we can only do block loads of at least 4dwords (1
|
||||
* oword).
|
||||
*/
|
||||
if (!devinfo->has_lsc && nir_dest_num_components(intrin->dest) < 4)
|
||||
if (!devinfo->has_lsc && intrin->dest.ssa.num_components < 4)
|
||||
return false;
|
||||
|
||||
intrin->intrinsic = nir_intrinsic_load_global_constant_uniform_block_intel;
|
||||
|
@@ -1084,7 +1084,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
|
||||
nir_alu_type dst_type = (nir_alu_type) (nir_op_infos[instr->op].output_type |
|
||||
instr->dest.dest.ssa.bit_size);
|
||||
dst_reg dst = get_nir_def(instr->dest.dest.ssa, dst_type);
|
||||
dst.writemask &= nir_component_mask(nir_dest_num_components(instr->dest.dest));
|
||||
dst.writemask &= nir_component_mask(instr->dest.dest.ssa.num_components);
|
||||
|
||||
src_reg op[4];
|
||||
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
|
||||
|
@@ -35,14 +35,14 @@ static nir_def *
|
||||
load_ubo(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, unsigned offset)
|
||||
{
|
||||
return nir_load_ubo(b,
|
||||
nir_dest_num_components(intr->dest),
|
||||
intr->dest.ssa.num_components,
|
||||
intr->dest.ssa.bit_size,
|
||||
nir_imm_int(b, var->data.binding),
|
||||
nir_imm_int(b, offset),
|
||||
.align_mul = 256,
|
||||
.align_offset = offset,
|
||||
.range_base = offset,
|
||||
.range = intr->dest.ssa.bit_size * nir_dest_num_components(intr->dest) / 8);
|
||||
.range = intr->dest.ssa.bit_size * intr->dest.ssa.num_components / 8);
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -167,7 +167,7 @@ lower_load_kernel_input(nir_builder *b, nir_intrinsic_instr *intr,
|
||||
}
|
||||
|
||||
const struct glsl_type *type =
|
||||
glsl_vector_type(base_type, nir_dest_num_components(intr->dest));
|
||||
glsl_vector_type(base_type, intr->dest.ssa.num_components);
|
||||
nir_def *ptr = nir_vec2(b, nir_imm_int(b, var->data.binding),
|
||||
nir_u2uN(b, intr->src[0].ssa, 32));
|
||||
nir_deref_instr *deref = nir_build_deref_cast(b, ptr, nir_var_mem_ubo, type,
|
||||
|
@@ -71,7 +71,7 @@ static bool
|
||||
lower_32b_offset_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var)
|
||||
{
|
||||
unsigned bit_size = intr->dest.ssa.bit_size;
|
||||
unsigned num_components = nir_dest_num_components(intr->dest);
|
||||
unsigned num_components = intr->dest.ssa.num_components;
|
||||
unsigned num_bits = num_components * bit_size;
|
||||
|
||||
b->cursor = nir_before_instr(&intr->instr);
|
||||
|
@@ -224,7 +224,7 @@ static nir_def *
|
||||
load_bordercolor(nir_builder *b, nir_tex_instr *tex, const dxil_wrap_sampler_state *active_state,
|
||||
const dxil_texture_swizzle_state *tex_swizzle)
|
||||
{
|
||||
int ndest_comp = nir_dest_num_components(tex->dest);
|
||||
int ndest_comp = tex->dest.ssa.num_components;
|
||||
|
||||
unsigned swizzle[4] = {
|
||||
tex_swizzle->swizzle_r,
|
||||
|
@@ -312,7 +312,7 @@ remove_tess_level_accesses(nir_builder *b, nir_instr *instr, void *_data)
|
||||
nir_instr_remove(instr);
|
||||
} else {
|
||||
b->cursor = nir_after_instr(instr);
|
||||
assert(nir_dest_num_components(intr->dest) == 1);
|
||||
assert(intr->dest.ssa.num_components == 1);
|
||||
nir_def_rewrite_uses(&intr->dest.ssa, nir_undef(b, 1, intr->dest.ssa.bit_size));
|
||||
}
|
||||
return true;
|
||||
|
@@ -2846,7 +2846,7 @@ emit_alu(struct ntd_context *ctx, nir_alu_instr *alu)
|
||||
case nir_op_vec16:
|
||||
return emit_vec(ctx, alu, nir_op_infos[alu->op].num_inputs);
|
||||
case nir_op_mov: {
|
||||
assert(nir_dest_num_components(alu->dest.dest) == 1);
|
||||
assert(alu->dest.dest.ssa.num_components == 1);
|
||||
store_ssa_def(ctx, &alu->dest.dest.ssa, 0, get_src_ssa(ctx,
|
||||
alu->src->src.ssa, alu->src->swizzle[0]));
|
||||
return true;
|
||||
@@ -3527,7 +3527,7 @@ emit_load_ubo_vec4(struct ntd_context *ctx, nir_intrinsic_instr *intr)
|
||||
|
||||
unsigned first_component = nir_intrinsic_has_component(intr) ?
|
||||
nir_intrinsic_component(intr) : 0;
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); i++)
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; i++)
|
||||
store_def(ctx, &intr->dest.ssa, i,
|
||||
dxil_emit_extractval(&ctx->mod, agg, i + first_component));
|
||||
|
||||
@@ -4150,7 +4150,7 @@ emit_image_load(struct ntd_context *ctx, nir_intrinsic_instr *intr)
|
||||
return false;
|
||||
|
||||
assert(intr->dest.ssa.bit_size == 32);
|
||||
unsigned num_components = nir_dest_num_components(intr->dest);
|
||||
unsigned num_components = intr->dest.ssa.num_components;
|
||||
assert(num_components <= 4);
|
||||
for (unsigned i = 0; i < num_components; ++i) {
|
||||
const struct dxil_value *component = dxil_emit_extractval(&ctx->mod, load_result, i);
|
||||
@@ -4316,7 +4316,7 @@ emit_image_size(struct ntd_context *ctx, nir_intrinsic_instr *intr)
|
||||
if (!dimensions)
|
||||
return false;
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(intr->dest); ++i) {
|
||||
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
|
||||
const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, dimensions, i);
|
||||
store_def(ctx, &intr->dest.ssa, i, retval);
|
||||
}
|
||||
@@ -5097,7 +5097,7 @@ emit_phi(struct ntd_context *ctx, nir_phi_instr *instr)
|
||||
}
|
||||
|
||||
struct phi_block *vphi = ralloc(ctx->phis, struct phi_block);
|
||||
vphi->num_components = nir_dest_num_components(instr->dest);
|
||||
vphi->num_components = instr->dest.ssa.num_components;
|
||||
|
||||
for (unsigned i = 0; i < vphi->num_components; ++i) {
|
||||
struct dxil_instr *phi = vphi->comp[i] = dxil_emit_phi(&ctx->mod, type);
|
||||
@@ -5618,7 +5618,7 @@ emit_tex(struct ntd_context *ctx, nir_tex_instr *instr)
|
||||
if (!sample)
|
||||
return false;
|
||||
|
||||
for (unsigned i = 0; i < nir_dest_num_components(instr->dest); ++i) {
|
||||
for (unsigned i = 0; i < instr->dest.ssa.num_components; ++i) {
|
||||
const struct dxil_value *retval = dxil_emit_extractval(&ctx->mod, sample, i);
|
||||
store_def(ctx, &instr->dest.ssa, i, retval);
|
||||
}
|
||||
|
@@ -249,14 +249,14 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
|
||||
|
||||
nir_def *load_data = nir_load_ubo(
|
||||
builder,
|
||||
nir_dest_num_components(intrin->dest),
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size,
|
||||
nir_channel(builder, load_desc, 0),
|
||||
nir_imm_int(builder, offset),
|
||||
.align_mul = 256,
|
||||
.align_offset = offset,
|
||||
.range_base = offset,
|
||||
.range = intrin->dest.ssa.bit_size * nir_dest_num_components(intrin->dest) / 8);
|
||||
.range = intrin->dest.ssa.bit_size * intrin->dest.ssa.num_components / 8);
|
||||
|
||||
nir_def_rewrite_uses(&intrin->dest.ssa, load_data);
|
||||
nir_instr_remove(instr);
|
||||
@@ -338,7 +338,7 @@ lower_load_push_constant(struct nir_builder *builder, nir_instr *instr,
|
||||
nir_def *offset = nir_ssa_for_src(builder, intrin->src[0], 1);
|
||||
nir_def *load_data = nir_load_ubo(
|
||||
builder,
|
||||
nir_dest_num_components(intrin->dest),
|
||||
intrin->dest.ssa.num_components,
|
||||
intrin->dest.ssa.bit_size,
|
||||
nir_channel(builder, load_desc, 0),
|
||||
nir_iadd_imm(builder, offset, base),
|
||||
@@ -595,7 +595,7 @@ kill_undefined_varyings(struct nir_builder *b,
|
||||
* since that would remove the store instruction, and would make it tricky to satisfy
|
||||
* the DXIL requirements of writing all position components.
|
||||
*/
|
||||
nir_def *zero = nir_imm_zero(b, nir_dest_num_components(intr->dest),
|
||||
nir_def *zero = nir_imm_zero(b, intr->dest.ssa.num_components,
|
||||
intr->dest.ssa.bit_size);
|
||||
nir_def_rewrite_uses(&intr->dest.ssa, zero);
|
||||
nir_instr_remove(instr);
|
||||
|
@@ -2433,7 +2433,7 @@ Converter::visit(nir_load_const_instr *insn)
|
||||
}
|
||||
|
||||
#define DEFAULT_CHECKS \
|
||||
if (nir_dest_num_components(insn->dest.dest) > 1) { \
|
||||
if (insn->dest.dest.ssa.num_components > 1) { \
|
||||
ERROR("nir_alu_instr only supported with 1 component!\n"); \
|
||||
return false; \
|
||||
}
|
||||
|
@@ -88,7 +88,7 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
|
||||
nir_def *zero = has_dest ? nir_imm_zero(b, 1, size) : NULL;
|
||||
nir_def *zeroes[4] = {zero, zero, zero, zero};
|
||||
nir_def *res =
|
||||
has_dest ? nir_vec(b, zeroes, nir_dest_num_components(intr->dest)) : NULL;
|
||||
has_dest ? nir_vec(b, zeroes, intr->dest.ssa.num_components) : NULL;
|
||||
|
||||
for (unsigned i = 0; i < (*lanes); ++i) {
|
||||
nir_push_if(b, nir_ieq_imm(b, lane, i));
|
||||
|
@@ -1066,8 +1066,7 @@ bi_emit_load_push_constant(bi_builder *b, nir_intrinsic_instr *instr)
|
||||
uint32_t base = nir_intrinsic_base(instr) + nir_src_as_uint(*offset);
|
||||
assert((base & 3) == 0 && "unaligned push constants");
|
||||
|
||||
unsigned bits =
|
||||
instr->dest.ssa.bit_size * nir_dest_num_components(instr->dest);
|
||||
unsigned bits = instr->dest.ssa.bit_size * instr->dest.ssa.num_components;
|
||||
|
||||
unsigned n = DIV_ROUND_UP(bits, 32);
|
||||
assert(n <= 4);
|
||||
@@ -2097,7 +2096,7 @@ bi_emit_alu(bi_builder *b, nir_alu_instr *instr)
|
||||
bi_index dst = bi_def_index(&instr->dest.dest.ssa);
|
||||
unsigned srcs = nir_op_infos[instr->op].num_inputs;
|
||||
unsigned sz = instr->dest.dest.ssa.bit_size;
|
||||
unsigned comps = nir_dest_num_components(instr->dest.dest);
|
||||
unsigned comps = instr->dest.dest.ssa.num_components;
|
||||
unsigned src_sz = srcs > 0 ? nir_src_bit_size(instr->src[0].src) : 0;
|
||||
|
||||
/* Indicate scalarness */
|
||||
@@ -3428,7 +3427,7 @@ bi_emit_texc(bi_builder *b, nir_tex_instr *instr)
|
||||
bi_emit_split_i32(b, w, dst, res_size);
|
||||
bi_emit_collect_to(
|
||||
b, bi_def_index(&instr->dest.ssa), w,
|
||||
DIV_ROUND_UP(nir_dest_num_components(instr->dest) * res_size, 4));
|
||||
DIV_ROUND_UP(instr->dest.ssa.num_components * res_size, 4));
|
||||
}
|
||||
|
||||
/* Staging registers required by texturing in the order they appear (Valhall) */
|
||||
@@ -3605,7 +3604,7 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr)
|
||||
/* Index into the packed component array */
|
||||
unsigned j = 0;
|
||||
unsigned comps[4] = {0};
|
||||
unsigned nr_components = nir_dest_num_components(instr->dest);
|
||||
unsigned nr_components = instr->dest.ssa.num_components;
|
||||
|
||||
for (unsigned i = 0; i < nr_components; ++i) {
|
||||
if (mask & BITFIELD_BIT(i)) {
|
||||
@@ -3617,8 +3616,7 @@ bi_emit_tex_valhall(bi_builder *b, nir_tex_instr *instr)
|
||||
}
|
||||
|
||||
bi_make_vec_to(b, bi_def_index(&instr->dest.ssa), unpacked, comps,
|
||||
nir_dest_num_components(instr->dest),
|
||||
instr->dest.ssa.bit_size);
|
||||
instr->dest.ssa.num_components, instr->dest.ssa.bit_size);
|
||||
}
|
||||
|
||||
/* Simple textures ops correspond to NIR tex or txl with LOD = 0 on 2D/cube
|
||||
@@ -4625,8 +4623,8 @@ bi_lower_load_output(nir_builder *b, nir_instr *instr, UNUSED void *data)
|
||||
b, .base = rt, .src_type = nir_intrinsic_dest_type(intr));
|
||||
|
||||
nir_def *lowered = nir_load_converted_output_pan(
|
||||
b, nir_dest_num_components(intr->dest), intr->dest.ssa.bit_size,
|
||||
conversion, .dest_type = nir_intrinsic_dest_type(intr),
|
||||
b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size, conversion,
|
||||
.dest_type = nir_intrinsic_dest_type(intr),
|
||||
.io_semantics = nir_intrinsic_io_semantics(intr));
|
||||
|
||||
nir_def_rewrite_uses(&intr->dest.ssa, lowered);
|
||||
|
@@ -224,7 +224,7 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
|
||||
return false;
|
||||
|
||||
unsigned compsz = intr->dest.ssa.bit_size;
|
||||
unsigned totalsz = compsz * nir_dest_num_components(intr->dest);
|
||||
unsigned totalsz = compsz * intr->dest.ssa.num_components;
|
||||
/* 8, 16, 32, 64 and 128 bit loads don't need to be lowered */
|
||||
if (util_bitcount(totalsz) < 2 && totalsz <= 128)
|
||||
return false;
|
||||
@@ -264,7 +264,7 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
|
||||
addr = nir_iadd_imm(b, addr, loadsz / 8);
|
||||
}
|
||||
|
||||
assert(ncomps == nir_dest_num_components(intr->dest));
|
||||
assert(ncomps == intr->dest.ssa.num_components);
|
||||
nir_def_rewrite_uses(&intr->dest.ssa, nir_vec(b, comps, ncomps));
|
||||
|
||||
return true;
|
||||
@@ -632,7 +632,7 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr)
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned nr_components = nir_dest_num_components(instr->dest.dest);
|
||||
unsigned nr_components = instr->dest.dest.ssa.num_components;
|
||||
unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
|
||||
unsigned op = 0;
|
||||
|
||||
@@ -1071,7 +1071,7 @@ emit_global(compiler_context *ctx, nir_instr *instr, bool is_read,
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
if (is_read) {
|
||||
unsigned bitsize =
|
||||
intr->dest.ssa.bit_size * nir_dest_num_components(intr->dest);
|
||||
intr->dest.ssa.bit_size * intr->dest.ssa.num_components;
|
||||
|
||||
switch (bitsize) {
|
||||
case 8:
|
||||
@@ -1556,7 +1556,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
|
||||
ins.dest_type = ins.src_types[1] =
|
||||
nir_type_uint | instr->dest.ssa.bit_size;
|
||||
|
||||
ins.mask = BITFIELD_MASK(nir_dest_num_components(instr->dest));
|
||||
ins.mask = BITFIELD_MASK(instr->dest.ssa.num_components);
|
||||
emit_mir_instruction(ctx, ins);
|
||||
break;
|
||||
}
|
||||
|
@@ -92,7 +92,7 @@ walk_varyings(UNUSED nir_builder *b, nir_instr *instr, void *data)
|
||||
if (b->shader->info.stage != MESA_SHADER_FRAGMENT)
|
||||
return false;
|
||||
|
||||
count = nir_dest_num_components(intr->dest);
|
||||
count = intr->dest.ssa.num_components;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@@ -49,7 +49,7 @@ static nir_def *
|
||||
load_sysval_from_ubo(nir_builder *b, nir_intrinsic_instr *intr, unsigned offset)
|
||||
{
|
||||
return nir_load_ubo(
|
||||
b, nir_dest_num_components(intr->dest), intr->dest.ssa.bit_size,
|
||||
b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size,
|
||||
nir_imm_int(b, PANVK_SYSVAL_UBO_INDEX), nir_imm_int(b, offset),
|
||||
.align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0,
|
||||
.range_base = offset, .range = intr->dest.ssa.bit_size / 8);
|
||||
@@ -189,12 +189,12 @@ panvk_lower_load_push_constant(nir_builder *b, nir_instr *instr, void *data)
|
||||
return false;
|
||||
|
||||
b->cursor = nir_before_instr(instr);
|
||||
nir_def *ubo_load = nir_load_ubo(
|
||||
b, nir_dest_num_components(intr->dest), intr->dest.ssa.bit_size,
|
||||
nir_imm_int(b, PANVK_PUSH_CONST_UBO_INDEX), intr->src[0].ssa,
|
||||
.align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0,
|
||||
.range_base = nir_intrinsic_base(intr),
|
||||
.range = nir_intrinsic_range(intr));
|
||||
nir_def *ubo_load =
|
||||
nir_load_ubo(b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size,
|
||||
nir_imm_int(b, PANVK_PUSH_CONST_UBO_INDEX), intr->src[0].ssa,
|
||||
.align_mul = intr->dest.ssa.bit_size / 8, .align_offset = 0,
|
||||
.range_base = nir_intrinsic_base(intr),
|
||||
.range = nir_intrinsic_range(intr));
|
||||
nir_def_rewrite_uses(&intr->dest.ssa, ubo_load);
|
||||
nir_instr_remove(instr);
|
||||
return true;
|
||||
|
Reference in New Issue
Block a user