nir s/nir_get_ssa_scalar/nir_get_scalar/

Generated with sed:

    sed -i -e 's/nir_get_ssa_scalar/nir_get_scalar/g' src/**/*.h src/**/*.c src/**/*.cpp

Reviewed-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24703>
This commit is contained in:
Faith Ekstrand
2023-08-15 10:07:24 -05:00
committed by Marge Bot
parent 65b6ac8aa4
commit b781dd6200
26 changed files with 70 additions and 70 deletions

View File

@@ -876,8 +876,8 @@ ac_nir_gs_shader_query(nir_builder *b,
u_foreach_bit (i, b->shader->info.gs.active_stream_mask) {
assert(vertex_count[i] && primitive_count[i]);
nir_scalar vtx_cnt = nir_get_ssa_scalar(vertex_count[i], 0);
nir_scalar prm_cnt = nir_get_ssa_scalar(primitive_count[i], 0);
nir_scalar vtx_cnt = nir_get_scalar(vertex_count[i], 0);
nir_scalar prm_cnt = nir_get_scalar(primitive_count[i], 0);
if (nir_scalar_is_const(vtx_cnt) && nir_scalar_is_const(prm_cnt)) {
unsigned gs_vtx_cnt = nir_scalar_as_uint(vtx_cnt);

View File

@@ -384,7 +384,7 @@ move_tex_coords(struct move_tex_coords_state *state, nir_function_impl *impl, ni
return false;
for (unsigned i = 0; i < tex->coord_components; i++)
components[i] = nir_get_ssa_scalar(build_coordinate(state, components[i], infos[i]), 0);
components[i] = nir_get_scalar(build_coordinate(state, components[i], infos[i]), 0);
nir_def *linear_vgpr = nir_vec_scalars(&state->toplevel_b, components, tex->coord_components);
lower_tex_coords(&state->toplevel_b, tex, &linear_vgpr, state->options);
@@ -425,7 +425,7 @@ move_fddxy(struct move_tex_coords_state *state, nir_function_impl *impl, nir_alu
coord_info infos[NIR_MAX_VEC_COMPONENTS];
bool can_move_all = true;
for (unsigned i = 0; i < num_components; i++) {
components[i] = nir_scalar_chase_alu_src(nir_get_ssa_scalar(&instr->def, i), 0);
components[i] = nir_scalar_chase_alu_src(nir_get_scalar(&instr->def, i), 0);
components[i] = nir_scalar_chase_movs(components[i]);
can_move_all &= can_move_coord(components[i], &infos[i]);
}
@@ -434,7 +434,7 @@ move_fddxy(struct move_tex_coords_state *state, nir_function_impl *impl, nir_alu
for (unsigned i = 0; i < num_components; i++) {
nir_def *def = build_coordinate(state, components[i], infos[i]);
components[i] = nir_get_ssa_scalar(def, 0);
components[i] = nir_get_scalar(def, 0);
}
nir_def *def = nir_vec_scalars(&state->toplevel_b, components, num_components);

View File

@@ -128,7 +128,7 @@ match_soa(nir_builder *b, struct match *match, unsigned format_shift)
nir_def *rewrite = nir_iadd_imm(
b, nir_imul_imm(b, unmultiplied, multiplier_shifted), offset_shifted);
match->offset = nir_get_ssa_scalar(rewrite, 0);
match->offset = nir_get_scalar(rewrite, 0);
match->shift = 0;
return true;
}
@@ -161,7 +161,7 @@ match_address(nir_builder *b, nir_scalar base, int8_t format_shift)
return (struct match){
.base = summands[1 - i],
.offset = nir_get_ssa_scalar(nir_imm_int(b, value), 0),
.offset = nir_get_scalar(nir_imm_int(b, value), 0),
.shift = -format_shift,
.sign_extend = false,
};
@@ -215,7 +215,7 @@ match_address(nir_builder *b, nir_scalar base, int8_t format_shift)
/* Only fold in if we wouldn't overflow the lsl field */
if (new_shift <= 2) {
match.offset =
nir_get_ssa_scalar(nir_imul_imm(b, multiplied_ssa, multiplier), 0);
nir_get_scalar(nir_imul_imm(b, multiplied_ssa, multiplier), 0);
match.shift = new_shift;
} else if (new_shift > 0) {
/* For large shifts, we do need a multiply, but we can
@@ -226,7 +226,7 @@ match_address(nir_builder *b, nir_scalar base, int8_t format_shift)
nir_def *rewrite =
nir_imul_imm(b, multiplied_ssa, multiplier << new_shift);
match.offset = nir_get_ssa_scalar(rewrite, 0);
match.offset = nir_get_scalar(rewrite, 0);
match.shift = 0;
}
} else {

View File

@@ -2638,7 +2638,7 @@ nir_scalar_chase_alu_src(nir_scalar s, unsigned alu_src_idx)
nir_scalar nir_scalar_chase_movs(nir_scalar s);
static inline nir_scalar
nir_get_ssa_scalar(nir_def *def, unsigned channel)
nir_get_scalar(nir_def *def, unsigned channel)
{
nir_scalar s = { def, channel };
return s;
@@ -2648,13 +2648,13 @@ nir_get_ssa_scalar(nir_def *def, unsigned channel)
static inline nir_scalar
nir_scalar_resolved(nir_def *def, unsigned channel)
{
return nir_scalar_chase_movs(nir_get_ssa_scalar(def, channel));
return nir_scalar_chase_movs(nir_get_scalar(def, channel));
}
static inline uint64_t
nir_alu_src_as_uint(nir_alu_src src)
{
nir_scalar scalar = nir_get_ssa_scalar(src.src.ssa, src.swizzle[0]);
nir_scalar scalar = nir_get_scalar(src.src.ssa, src.swizzle[0]);
return nir_scalar_as_uint(scalar);
}

View File

@@ -1215,10 +1215,10 @@ nir_pad_vector(nir_builder *b, nir_def *src, unsigned num_components)
return src;
nir_scalar components[NIR_MAX_VEC_COMPONENTS];
nir_scalar undef = nir_get_ssa_scalar(nir_undef(b, 1, src->bit_size), 0);
nir_scalar undef = nir_get_scalar(nir_undef(b, 1, src->bit_size), 0);
unsigned i = 0;
for (; i < src->num_components; i++)
components[i] = nir_get_ssa_scalar(src, i);
components[i] = nir_get_scalar(src, i);
for (; i < num_components; i++)
components[i] = undef;
@@ -1239,10 +1239,10 @@ nir_pad_vector_imm_int(nir_builder *b, nir_def *src, uint64_t imm_val,
return src;
nir_scalar components[NIR_MAX_VEC_COMPONENTS];
nir_scalar imm = nir_get_ssa_scalar(nir_imm_intN_t(b, imm_val, src->bit_size), 0);
nir_scalar imm = nir_get_scalar(nir_imm_intN_t(b, imm_val, src->bit_size), 0);
unsigned i = 0;
for (; i < src->num_components; i++)
components[i] = nir_get_ssa_scalar(src, i);
components[i] = nir_get_scalar(src, i);
for (; i < num_components; i++)
components[i] = imm;

View File

@@ -49,9 +49,9 @@ lower_cube_size(nir_builder *b, nir_intrinsic_instr *intrin)
unsigned coord_comps = intrin->def.num_components;
for (unsigned c = 0; c < coord_comps; c++) {
if (c == 2) {
comps[2] = nir_get_ssa_scalar(nir_idiv(b, nir_channel(b, size, 2), nir_imm_int(b, 6)), 0);
comps[2] = nir_get_scalar(nir_idiv(b, nir_channel(b, size, 2), nir_imm_int(b, 6)), 0);
} else {
comps[c] = nir_get_ssa_scalar(size, c);
comps[c] = nir_get_scalar(size, c);
}
}

View File

@@ -564,10 +564,10 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
for (unsigned c = 0; c < intrin->num_components; c++) {
if (new_frac + c >= old_frac &&
(old_wrmask & 1 << (new_frac + c - old_frac))) {
comps[c] = nir_get_ssa_scalar(old_value,
comps[c] = nir_get_scalar(old_value,
new_frac + c - old_frac);
} else {
comps[c] = nir_get_ssa_scalar(nir_undef(&b, old_value->num_components,
comps[c] = nir_get_scalar(nir_undef(&b, old_value->num_components,
old_value->bit_size),
0);
}

View File

@@ -178,7 +178,7 @@ get_deref_reg_location(nir_deref_instr *deref,
/* Avoid emitting iadd with 0, which is otherwise common, since this
* pass runs late enough that nothing will clean it up.
*/
nir_scalar scal = nir_get_ssa_scalar(indirect, 0);
nir_scalar scal = nir_get_scalar(indirect, 0);
if (nir_scalar_is_const(scal))
indirect = nir_iadd_imm(b, offset, nir_scalar_as_uint(scal));
else

View File

@@ -803,14 +803,14 @@ fold_16bit_src(nir_builder *b, nir_instr *instr, nir_src *src, nir_alu_type src_
nir_scalar comp = nir_scalar_resolved(src->ssa, i);
if (nir_scalar_is_undef(comp))
new_comps[i] = nir_get_ssa_scalar(nir_undef(b, 1, 16), 0);
new_comps[i] = nir_get_scalar(nir_undef(b, 1, 16), 0);
else if (nir_scalar_is_const(comp)) {
nir_def *constant;
if (src_type == nir_type_float32)
constant = nir_imm_float16(b, nir_scalar_as_float(comp));
else
constant = nir_imm_intN_t(b, nir_scalar_as_uint(comp), 16);
new_comps[i] = nir_get_ssa_scalar(constant, 0);
new_comps[i] = nir_get_scalar(constant, 0);
} else {
/* conversion instruction */
new_comps[i] = nir_scalar_chase_alu_src(comp, 0);

View File

@@ -604,7 +604,7 @@ lower_compute_system_value_instr(nir_builder *b,
if (!b->shader->info.workgroup_size_variable && is_zero) {
nir_scalar defs[3];
for (unsigned i = 0; i < 3; i++) {
defs[i] = is_zero & (1 << i) ? nir_get_ssa_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_ssa_scalar(&intrin->def, i);
defs[i] = is_zero & (1 << i) ? nir_get_scalar(nir_imm_zero(b, 1, 32), 0) : nir_get_scalar(&intrin->def, i);
}
return nir_vec_scalars(b, defs, 3);
}

View File

@@ -1046,9 +1046,9 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
nir_scalar srcs[4];
for (unsigned i = 0; i < 4; i++) {
if (swizzle[i] < 4) {
srcs[i] = nir_get_ssa_scalar(&tex->def, swizzle[i]);
srcs[i] = nir_get_scalar(&tex->def, swizzle[i]);
} else {
srcs[i] = nir_get_ssa_scalar(get_zero_or_one(b, tex->dest_type, swizzle[i]), 0);
srcs[i] = nir_get_scalar(get_zero_or_one(b, tex->dest_type, swizzle[i]), 0);
}
}
swizzled = nir_vec_scalars(b, srcs, 4);
@@ -1224,7 +1224,7 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
nir_builder_instr_insert(b, &tex_copy->instr);
dest[i] = nir_get_ssa_scalar(&tex_copy->def, 3);
dest[i] = nir_get_scalar(&tex_copy->def, 3);
if (tex->is_sparse) {
nir_def *code = nir_channel(b, &tex_copy->def, 4);
if (residency)
@@ -1233,7 +1233,7 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
residency = code;
}
}
dest[4] = nir_get_ssa_scalar(residency, 0);
dest[4] = nir_get_scalar(residency, 0);
nir_def *res = nir_vec_scalars(b, dest, tex->def.num_components);
nir_def_rewrite_uses(&tex->def, res);

View File

@@ -658,9 +658,9 @@ rename_variables(struct lower_variables_state *state)
nir_scalar srcs[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < intrin->num_components; i++) {
if (wrmask & (1 << i)) {
srcs[i] = nir_get_ssa_scalar(value, i);
srcs[i] = nir_get_scalar(value, i);
} else {
srcs[i] = nir_get_ssa_scalar(old_def, i);
srcs[i] = nir_get_scalar(old_def, i);
}
}
new_def = nir_vec_scalars(&b, srcs, intrin->num_components);

View File

@@ -34,7 +34,7 @@ static nir_scalar
nir_alu_arg(const nir_alu_instr *alu, unsigned arg, unsigned comp)
{
const nir_alu_src *src = &alu->src[arg];
return nir_get_ssa_scalar(src->src.ssa, src->swizzle[comp]);
return nir_get_scalar(src->src.ssa, src->swizzle[comp]);
}
/* Tries to determine the value of expression "val % div", assuming that val

View File

@@ -135,13 +135,13 @@ combine_stores(struct combine_stores_state *state,
* and store->src[1] is a scalar. Otherwise, we're a regular vector
* load and we have to pick off a component.
*/
comps[i] = nir_get_ssa_scalar(store->src[1].ssa, store->num_components == 1 ? 0 : i);
comps[i] = nir_get_scalar(store->src[1].ssa, store->num_components == 1 ? 0 : i);
assert(store->instr.pass_flags > 0);
if (--store->instr.pass_flags == 0 && store != combo->latest)
nir_instr_remove(&store->instr);
} else {
comps[i] = nir_get_ssa_scalar(nir_undef(&state->b, 1, bit_size), 0);
comps[i] = nir_get_scalar(nir_undef(&state->b, 1, bit_size), 0);
}
}
assert(combo->latest->instr.pass_flags == 0);

View File

@@ -744,7 +744,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
nir_scalar comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < num_components; i++) {
if (value->ssa.def[i]) {
comps[i] = nir_get_ssa_scalar(value->ssa.def[i], value->ssa.component[i]);
comps[i] = nir_get_scalar(value->ssa.def[i], value->ssa.component[i]);
} else {
/* We don't have anything for this component in our
* list. Just re-use a channel from the load.
@@ -755,7 +755,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
if (load_def->parent_instr == &intrin->instr)
keep_intrin = true;
comps[i] = nir_get_ssa_scalar(load_def, i);
comps[i] = nir_get_scalar(load_def, i);
}
}

View File

@@ -94,7 +94,7 @@ try_extract_const_addition(nir_builder *b, nir_scalar val, opt_offsets_state *st
nir_def *r =
nir_iadd(b, nir_channel(b, src[0].def, src[0].comp),
nir_channel(b, src[1].def, src[1].comp));
return nir_get_ssa_scalar(r, 0);
return nir_get_scalar(r, 0);
}
static bool

View File

@@ -167,7 +167,7 @@ opt_shrink_vector(nir_builder *b, nir_alu_instr *instr)
if (!((mask >> i) & 0x1))
continue;
nir_scalar scalar = nir_get_ssa_scalar(instr->src[i].src.ssa, instr->src[i].swizzle[0]);
nir_scalar scalar = nir_get_scalar(instr->src[i].src.ssa, instr->src[i].swizzle[0]);
/* Try reuse a component with the same value */
unsigned j;

View File

@@ -1406,7 +1406,7 @@ search_phi_bcsel(nir_scalar scalar, nir_scalar *buf, unsigned buf_size, struct s
unsigned total_added = 0;
nir_foreach_phi_src(src, phi) {
num_sources_left--;
unsigned added = search_phi_bcsel(nir_get_ssa_scalar(src->src.ssa, scalar.comp),
unsigned added = search_phi_bcsel(nir_get_scalar(src->src.ssa, scalar.comp),
buf + total_added, buf_size - num_sources_left, visited);
assert(added <= buf_size);
buf_size -= added;
@@ -1580,7 +1580,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re
break;
case nir_intrinsic_mbcnt_amd: {
if (!q.head.pushed_queries) {
push_uub_query(state, nir_get_ssa_scalar(intrin->src[1].ssa, 0));
push_uub_query(state, nir_get_scalar(intrin->src[1].ssa, 0));
return;
} else {
uint32_t src0 = config->max_subgroup_size - 1;
@@ -1624,7 +1624,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re
nir_op op = nir_intrinsic_reduction_op(intrin);
if (op == nir_op_umin || op == nir_op_umax || op == nir_op_imin || op == nir_op_imax) {
if (!q.head.pushed_queries) {
push_uub_query(state, nir_get_ssa_scalar(intrin->src[0].ssa, q.scalar.comp));
push_uub_query(state, nir_get_scalar(intrin->src[0].ssa, q.scalar.comp));
return;
} else {
*result = src[0];
@@ -1645,7 +1645,7 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re
case nir_intrinsic_quad_swizzle_amd:
case nir_intrinsic_masked_swizzle_amd:
if (!q.head.pushed_queries) {
push_uub_query(state, nir_get_ssa_scalar(intrin->src[0].ssa, q.scalar.comp));
push_uub_query(state, nir_get_scalar(intrin->src[0].ssa, q.scalar.comp));
return;
} else {
*result = src[0];
@@ -1653,8 +1653,8 @@ get_intrinsic_uub(struct analysis_state *state, struct uub_query q, uint32_t *re
break;
case nir_intrinsic_write_invocation_amd:
if (!q.head.pushed_queries) {
push_uub_query(state, nir_get_ssa_scalar(intrin->src[0].ssa, q.scalar.comp));
push_uub_query(state, nir_get_ssa_scalar(intrin->src[1].ssa, q.scalar.comp));
push_uub_query(state, nir_get_scalar(intrin->src[0].ssa, q.scalar.comp));
push_uub_query(state, nir_get_scalar(intrin->src[1].ssa, q.scalar.comp));
return;
} else {
*result = MAX2(src[0], src[1]);
@@ -1914,7 +1914,7 @@ get_phi_uub(struct analysis_state *state, struct uub_query q, uint32_t *result,
push_uub_query(state, defs[i]);
} else {
nir_foreach_phi_src(src, phi)
push_uub_query(state, nir_get_ssa_scalar(src->src.ssa, q.scalar.comp));
push_uub_query(state, nir_get_scalar(src->src.ssa, q.scalar.comp));
}
}

View File

@@ -27,7 +27,7 @@
static inline bool
nir_mod_analysis_comp0(nir_def *val, nir_alu_type val_type, unsigned div, unsigned *mod)
{
return nir_mod_analysis(nir_get_ssa_scalar(val, 0), val_type, div, mod);
return nir_mod_analysis(nir_get_scalar(val, 0), val_type, div, mod);
}
class nir_mod_analysis_test : public nir_test {

View File

@@ -304,9 +304,9 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_simple)
nir_scalar srcs[4] = {{0}};
for (unsigned i = 0; i < 4; i++) {
srcs[i] = nir_get_ssa_scalar(phi_def, i);
srcs[i] = nir_get_scalar(phi_def, i);
}
srcs[1] = nir_get_ssa_scalar(fadd, 0);
srcs[1] = nir_get_scalar(fadd, 0);
nir_def *vec = nir_vec_scalars(b, srcs, 4);
nir_phi_instr_add_src(phi, vec->parent_instr->block,
@@ -409,10 +409,10 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_swizzle)
fadd_alu_instr->src[0].swizzle[0] = 2;
nir_scalar srcs[4] = {{0}};
srcs[0] = nir_get_ssa_scalar(phi_def, 0);
srcs[1] = nir_get_ssa_scalar(fadd, 0);
srcs[2] = nir_get_ssa_scalar(phi_def, 1);
srcs[3] = nir_get_ssa_scalar(phi_def, 3);
srcs[0] = nir_get_scalar(phi_def, 0);
srcs[1] = nir_get_scalar(fadd, 0);
srcs[2] = nir_get_scalar(phi_def, 1);
srcs[3] = nir_get_scalar(phi_def, 3);
nir_def *vec = nir_vec_scalars(b, srcs, 4);
nir_phi_instr_add_src(phi, vec->parent_instr->block,
@@ -517,9 +517,9 @@ TEST_F(nir_opt_shrink_vectors_test, opt_shrink_phis_loop_phi_out)
nir_scalar srcs[4] = {{0}};
for (unsigned i = 0; i < 4; i++) {
srcs[i] = nir_get_ssa_scalar(phi_def, i);
srcs[i] = nir_get_scalar(phi_def, i);
}
srcs[1] = nir_get_ssa_scalar(fadd, 0);
srcs[1] = nir_get_scalar(fadd, 0);
nir_def *vec = nir_vec_scalars(b, srcs, 4);
nir_phi_instr_add_src(phi, vec->parent_instr->block,

View File

@@ -289,7 +289,7 @@ TEST_F(unsigned_upper_bound_test, loop_phi_bcsel)
nir_validate_shader(b->shader, NULL);
struct hash_table *range_ht = _mesa_pointer_hash_table_create(NULL);
nir_scalar scalar = nir_get_ssa_scalar(&phi->def, 0);
nir_scalar scalar = nir_get_scalar(&phi->def, 0);
EXPECT_EQ(nir_unsigned_upper_bound(b->shader, range_ht, scalar, NULL), 2);
_mesa_hash_table_destroy(range_ht, NULL);
}

View File

@@ -4087,7 +4087,7 @@ vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
unsigned cols = glsl_get_matrix_columns(src->type);
nir_scalar srcs[NIR_MAX_MATRIX_COLUMNS];
for (unsigned j = 0; j < cols; j++) {
srcs[j] = nir_get_ssa_scalar(src->elems[j]->def, i);
srcs[j] = nir_get_scalar(src->elems[j]->def, i);
}
dest->elems[i]->def = nir_vec_scalars(&b->nb, srcs, cols);
}

View File

@@ -550,7 +550,7 @@ ntt_allocate_regs_unoptimized(struct ntt_compile *c, nir_function_impl *impl)
static const uint32_t
ntt_extract_const_src_offset(nir_src *src)
{
nir_scalar s = nir_get_ssa_scalar(src->ssa, 0);
nir_scalar s = nir_get_scalar(src->ssa, 0);
while (nir_scalar_is_alu(s)) {
nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
@@ -3438,10 +3438,10 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
if (has_dest) {
/* Merge the two loads' results back into a vector. */
nir_scalar channels[4] = {
nir_get_ssa_scalar(&first->def, 0),
nir_get_ssa_scalar(&first->def, 1),
nir_get_ssa_scalar(&second->def, 0),
nir_get_ssa_scalar(&second->def, second->num_components > 1 ? 1 : 0),
nir_get_scalar(&first->def, 0),
nir_get_scalar(&first->def, 1),
nir_get_scalar(&second->def, 0),
nir_get_scalar(&second->def, second->num_components > 1 ? 1 : 0),
};
nir_def *new = nir_vec_scalars(b, channels, instr->num_components);
nir_def_rewrite_uses(&instr->def, new);
@@ -3452,7 +3452,7 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
nir_def *src0 = instr->src[0].ssa;
nir_scalar channels[4] = { 0 };
for (int i = 0; i < instr->num_components; i++)
channels[i] = nir_get_ssa_scalar(src0, i);
channels[i] = nir_get_scalar(src0, i);
nir_intrinsic_set_write_mask(first, nir_intrinsic_write_mask(instr) & 3);
nir_intrinsic_set_write_mask(second, nir_intrinsic_write_mask(instr) >> 2);
@@ -3584,7 +3584,7 @@ nir_to_tgsi_lower_tex_instr_arg(nir_builder *b,
nir_def *def = instr->src[tex_src].src.ssa;
for (int i = 0; i < def->num_components; i++) {
s->channels[s->i++] = nir_get_ssa_scalar(def, i);
s->channels[s->i++] = nir_get_scalar(def, i);
}
nir_tex_instr_remove_src(instr, tex_src);

View File

@@ -1330,10 +1330,10 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
if (has_dest) {
/* Merge the two loads' results back into a vector. */
nir_scalar channels[4] = {
nir_get_ssa_scalar(&first->def, 0),
nir_get_ssa_scalar(&first->def, 1),
nir_get_ssa_scalar(&second->def, 0),
nir_get_ssa_scalar(&second->def, second->num_components > 1 ? 1 : 0),
nir_get_scalar(&first->def, 0),
nir_get_scalar(&first->def, 1),
nir_get_scalar(&second->def, 0),
nir_get_scalar(&second->def, second->num_components > 1 ? 1 : 0),
};
nir_def *new_ir = nir_vec_scalars(b, channels, instr->num_components);
nir_def_rewrite_uses(&instr->def, new_ir);
@@ -1344,7 +1344,7 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
nir_def *src0 = instr->src[0].ssa;
nir_scalar channels[4] = {{0}};
for (int i = 0; i < instr->num_components; i++)
channels[i] = nir_get_ssa_scalar(src0, i);
channels[i] = nir_get_scalar(src0, i);
nir_intrinsic_set_write_mask(first, nir_intrinsic_write_mask(instr) & 3);
nir_intrinsic_set_write_mask(second, nir_intrinsic_write_mask(instr) >> 2);

View File

@@ -1896,7 +1896,7 @@ fs_visitor::emit_task_mesh_store(const fs_builder &bld, nir_intrinsic_instr *ins
/* Try to calculate the value of (offset + base) % 4. If we can do
* this, then we can do indirect writes using only 1 URB write.
*/
use_mod = nir_mod_analysis(nir_get_ssa_scalar(offset_nir_src->ssa, 0), nir_type_uint, 4, &mod);
use_mod = nir_mod_analysis(nir_get_scalar(offset_nir_src->ssa, 0), nir_type_uint, 4, &mod);
if (use_mod) {
mod += nir_intrinsic_base(instr) + component_from_intrinsic(instr);
mod %= 4;

View File

@@ -2291,7 +2291,7 @@ emit_shift(struct ntd_context *ctx, nir_alu_instr *alu,
0);
} else {
uint64_t val = nir_scalar_as_uint(
nir_scalar_chase_alu_src(nir_get_ssa_scalar(&alu->def, 0), 1));
nir_scalar_chase_alu_src(nir_get_scalar(&alu->def, 0), 1));
op1 = dxil_module_get_int_const(&ctx->mod, val & shift_mask, op0_bit_size);
}
@@ -2898,7 +2898,7 @@ emit_alu(struct ntd_context *ctx, nir_alu_instr *alu)
case nir_op_udiv:
if (nir_src_is_const(alu->src[1].src)) {
/* It's illegal to emit a literal divide by 0 in DXIL */
nir_scalar divisor = nir_scalar_chase_alu_src(nir_get_ssa_scalar(&alu->def, 0), 1);
nir_scalar divisor = nir_scalar_chase_alu_src(nir_get_scalar(&alu->def, 0), 1);
if (nir_scalar_as_int(divisor) == 0) {
store_alu_dest(ctx, alu, 0,
dxil_module_get_int_const(&ctx->mod, 0, alu->def.bit_size));