treewide: Remove more is_ssa asserts
Stuff Coccinelle missed. sed -i -e '/assert(.*\.is_ssa)/d' $(git grep -l is_ssa) sed -i -e '/ASSERT.*\.is_ssa)/d' $(git grep -l is_ssa) + a manual fixup to restore the assert for parallel copy lowering. Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24432>
This commit is contained in:

committed by
Marge Bot

parent
042a3eca00
commit
ab0d878932
@@ -1281,7 +1281,6 @@ static bool visit_alu(struct ac_nir_context *ctx, const nir_alu_instr *instr)
|
||||
}
|
||||
|
||||
if (result) {
|
||||
assert(instr->dest.dest.is_ssa);
|
||||
result = ac_to_integer_or_pointer(&ctx->ac, result);
|
||||
ctx->ssa_defs[instr->dest.dest.ssa.index] = result;
|
||||
}
|
||||
@@ -2338,7 +2337,6 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri
|
||||
vindex =
|
||||
LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
|
||||
|
||||
assert(instr->dest.is_ssa);
|
||||
bool can_speculate = access & ACCESS_CAN_REORDER;
|
||||
res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
|
||||
args.access, can_speculate,
|
||||
@@ -2372,7 +2370,6 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri
|
||||
args.dmask = 15;
|
||||
args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0;
|
||||
|
||||
assert(instr->dest.is_ssa);
|
||||
args.d16 = instr->dest.ssa.bit_size == 16;
|
||||
|
||||
res = ac_build_image_opcode(&ctx->ac, &args);
|
||||
@@ -4072,7 +4069,6 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
|
||||
args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
|
||||
}
|
||||
|
||||
assert(instr->dest.is_ssa);
|
||||
args.d16 = instr->dest.ssa.bit_size == 16;
|
||||
args.tfe = instr->is_sparse;
|
||||
|
||||
@@ -4102,7 +4098,6 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
|
||||
result = ac_build_concat(&ctx->ac, result, code);
|
||||
|
||||
if (result) {
|
||||
assert(instr->dest.is_ssa);
|
||||
result = ac_to_integer(&ctx->ac, result);
|
||||
|
||||
for (int i = ARRAY_SIZE(wctx); --i >= 0;) {
|
||||
|
@@ -2088,7 +2088,6 @@ agx_lower_front_face(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
|
||||
if (intr->intrinsic != nir_intrinsic_load_front_face)
|
||||
return false;
|
||||
|
||||
assert(intr->dest.is_ssa);
|
||||
nir_ssa_def *def = &intr->dest.ssa;
|
||||
assert(def->bit_size == 1);
|
||||
|
||||
|
@@ -850,7 +850,6 @@ ntq_get_src(struct v3d_compile *c, nir_src src, int i)
|
||||
{
|
||||
struct hash_entry *entry;
|
||||
|
||||
assert(src.is_ssa);
|
||||
nir_intrinsic_instr *load = nir_load_reg_for_def(src.ssa);
|
||||
if (load == NULL) {
|
||||
assert(i < src.ssa->num_components);
|
||||
@@ -1228,7 +1227,6 @@ ntq_emit_comparison(struct v3d_compile *c,
|
||||
static struct nir_alu_instr *
|
||||
ntq_get_alu_parent(nir_src src)
|
||||
{
|
||||
assert(src.is_ssa);
|
||||
if (src.ssa->parent_instr->type != nir_instr_type_alu)
|
||||
return NULL;
|
||||
nir_alu_instr *instr = nir_instr_as_alu(src.ssa->parent_instr);
|
||||
@@ -1240,7 +1238,6 @@ ntq_get_alu_parent(nir_src src)
|
||||
* src.
|
||||
*/
|
||||
for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
|
||||
assert(instr->src[i].src.is_ssa);
|
||||
if (nir_load_reg_for_def(instr->src[i].src.ssa))
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -244,7 +244,6 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
|
||||
/* Limit the number of channels returned to both how many the NIR
|
||||
* instruction writes and how many the instruction could produce.
|
||||
*/
|
||||
assert(instr->dest.is_ssa);
|
||||
nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->dest.ssa);
|
||||
if (store == NULL) {
|
||||
p0_unpacked.return_words_of_texture_data =
|
||||
|
@@ -605,7 +605,6 @@ lower_tex_src(nir_builder *b,
|
||||
/* We compute first the offsets */
|
||||
nir_deref_instr *deref = nir_instr_as_deref(src->src.ssa->parent_instr);
|
||||
while (deref->deref_type != nir_deref_type_var) {
|
||||
assert(deref->parent.is_ssa);
|
||||
nir_deref_instr *parent =
|
||||
nir_instr_as_deref(deref->parent.ssa->parent_instr);
|
||||
|
||||
@@ -743,7 +742,6 @@ lower_image_deref(nir_builder *b,
|
||||
unsigned base_index = 0;
|
||||
|
||||
while (deref->deref_type != nir_deref_type_var) {
|
||||
assert(deref->parent.is_ssa);
|
||||
nir_deref_instr *parent =
|
||||
nir_instr_as_deref(deref->parent.ssa->parent_instr);
|
||||
|
||||
|
@@ -1400,21 +1400,17 @@ nir_instr_ssa_def(nir_instr *instr)
|
||||
{
|
||||
switch (instr->type) {
|
||||
case nir_instr_type_alu:
|
||||
assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
|
||||
return &nir_instr_as_alu(instr)->dest.dest.ssa;
|
||||
|
||||
case nir_instr_type_deref:
|
||||
assert(nir_instr_as_deref(instr)->dest.is_ssa);
|
||||
return &nir_instr_as_deref(instr)->dest.ssa;
|
||||
|
||||
case nir_instr_type_tex:
|
||||
assert(nir_instr_as_tex(instr)->dest.is_ssa);
|
||||
return &nir_instr_as_tex(instr)->dest.ssa;
|
||||
|
||||
case nir_instr_type_intrinsic: {
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
|
||||
assert(intrin->dest.is_ssa);
|
||||
return &intrin->dest.ssa;
|
||||
} else {
|
||||
return NULL;
|
||||
@@ -1422,7 +1418,6 @@ nir_instr_ssa_def(nir_instr *instr)
|
||||
}
|
||||
|
||||
case nir_instr_type_phi:
|
||||
assert(nir_instr_as_phi(instr)->dest.is_ssa);
|
||||
return &nir_instr_as_phi(instr)->dest.ssa;
|
||||
|
||||
case nir_instr_type_parallel_copy:
|
||||
@@ -2914,7 +2909,6 @@ nir_variable *nir_get_binding_variable(nir_shader *shader, nir_binding binding)
|
||||
bool
|
||||
nir_alu_instr_is_copy(nir_alu_instr *instr)
|
||||
{
|
||||
assert(instr->src[0].src.is_ssa);
|
||||
|
||||
if (instr->op == nir_op_mov) {
|
||||
return !instr->src[0].abs &&
|
||||
|
@@ -2729,7 +2729,6 @@ nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
|
||||
assert(s.comp < s.def->num_components);
|
||||
assert(alu->dest.write_mask & (1u << s.comp));
|
||||
|
||||
assert(alu->src[alu_src_idx].src.is_ssa);
|
||||
out.def = alu->src[alu_src_idx].src.ssa;
|
||||
|
||||
if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) {
|
||||
@@ -2769,7 +2768,6 @@ nir_ssa_scalar_resolved(nir_ssa_def *def, unsigned channel)
|
||||
static inline uint64_t
|
||||
nir_alu_src_as_uint(nir_alu_src src)
|
||||
{
|
||||
assert(src.src.is_ssa && "precondition");
|
||||
nir_ssa_scalar scalar = nir_get_ssa_scalar(src.src.ssa, src.swizzle[0]);
|
||||
return nir_ssa_scalar_as_uint(scalar);
|
||||
}
|
||||
|
@@ -1313,7 +1313,6 @@ static inline nir_deref_instr *
|
||||
nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
|
||||
int64_t index)
|
||||
{
|
||||
assert(parent->dest.is_ssa);
|
||||
nir_ssa_def *idx_ssa = nir_imm_intN_t(build, index,
|
||||
parent->dest.ssa.bit_size);
|
||||
|
||||
@@ -1447,7 +1446,6 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
|
||||
nir_deref_instr *leader)
|
||||
{
|
||||
/* If the derefs would have the same parent, don't make a new one */
|
||||
assert(leader->parent.is_ssa);
|
||||
if (leader->parent.ssa == &parent->dest.ssa)
|
||||
return leader;
|
||||
|
||||
@@ -1468,7 +1466,6 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
|
||||
glsl_get_length(leader_parent->type));
|
||||
|
||||
if (leader->deref_type == nir_deref_type_array) {
|
||||
assert(leader->arr.index.is_ssa);
|
||||
nir_ssa_def *index = nir_i2iN(b, leader->arr.index.ssa,
|
||||
parent->dest.ssa.bit_size);
|
||||
return nir_build_deref_array(b, parent, index);
|
||||
@@ -1736,7 +1733,6 @@ nir_steal_tex_src(nir_tex_instr *tex, nir_tex_src_type type_)
|
||||
if (idx < 0)
|
||||
return NULL;
|
||||
|
||||
assert(tex->src[idx].src.is_ssa);
|
||||
nir_ssa_def *ssa = tex->src[idx].src.ssa;
|
||||
nir_tex_instr_remove_src(tex, idx);
|
||||
return ssa;
|
||||
|
@@ -109,7 +109,6 @@ nir_deref_instr_remove_if_unused(nir_deref_instr *instr)
|
||||
|
||||
for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) {
|
||||
/* If anyone is using this deref, leave it alone */
|
||||
assert(d->dest.is_ssa);
|
||||
if (!nir_ssa_def_is_unused(&d->dest.ssa))
|
||||
break;
|
||||
|
||||
@@ -539,7 +538,6 @@ compare_deref_paths(nir_deref_path *a_path, nir_deref_path *b_path,
|
||||
} else {
|
||||
assert(a[*i]->deref_type == nir_deref_type_array &&
|
||||
b[*i]->deref_type == nir_deref_type_array);
|
||||
assert(a[*i]->arr.index.is_ssa && b[*i]->arr.index.is_ssa);
|
||||
|
||||
if (nir_src_is_const(a[*i]->arr.index) &&
|
||||
nir_src_is_const(b[*i]->arr.index)) {
|
||||
@@ -923,7 +921,6 @@ opt_alu_of_cast(nir_alu_instr *alu)
|
||||
bool progress = false;
|
||||
|
||||
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
|
||||
assert(alu->src[i].src.is_ssa);
|
||||
nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
|
||||
if (src_instr->type != nir_instr_type_deref)
|
||||
continue;
|
||||
@@ -932,7 +929,6 @@ opt_alu_of_cast(nir_alu_instr *alu)
|
||||
if (src_deref->deref_type != nir_deref_type_cast)
|
||||
continue;
|
||||
|
||||
assert(src_deref->parent.is_ssa);
|
||||
nir_instr_rewrite_src_ssa(&alu->instr, &alu->src[i].src,
|
||||
src_deref->parent.ssa);
|
||||
progress = true;
|
||||
@@ -1200,8 +1196,6 @@ opt_deref_cast(nir_builder *b, nir_deref_instr *cast)
|
||||
|
||||
bool trivial_array_cast = is_trivial_array_deref_cast(cast);
|
||||
|
||||
assert(cast->dest.is_ssa);
|
||||
assert(cast->parent.is_ssa);
|
||||
|
||||
nir_foreach_use_including_if_safe(use_src, &cast->dest.ssa) {
|
||||
assert(!use_src->is_if && "there cannot be if-uses");
|
||||
@@ -1256,9 +1250,6 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref)
|
||||
parent->deref_type != nir_deref_type_ptr_as_array)
|
||||
return false;
|
||||
|
||||
assert(parent->parent.is_ssa);
|
||||
assert(parent->arr.index.is_ssa);
|
||||
assert(deref->arr.index.is_ssa);
|
||||
|
||||
deref->arr.in_bounds &= parent->arr.in_bounds;
|
||||
|
||||
@@ -1354,7 +1345,6 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load)
|
||||
/* Stomp it to reference the parent */
|
||||
nir_instr_rewrite_src(&load->instr, &load->src[0],
|
||||
nir_src_for_ssa(&parent->dest.ssa));
|
||||
assert(load->dest.is_ssa);
|
||||
load->dest.ssa.bit_size = new_bit_size;
|
||||
load->dest.ssa.num_components = new_num_comps;
|
||||
load->num_components = new_num_comps;
|
||||
@@ -1384,7 +1374,6 @@ opt_store_vec_deref(nir_builder *b, nir_intrinsic_instr *store)
|
||||
* results in a LOT of vec4->vec3 casts on loads and stores.
|
||||
*/
|
||||
if (is_vector_bitcast_deref(deref, write_mask, true)) {
|
||||
assert(store->src[1].is_ssa);
|
||||
nir_ssa_def *data = store->src[1].ssa;
|
||||
|
||||
const unsigned old_bit_size = data->bit_size;
|
||||
|
@@ -392,7 +392,6 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
|
||||
nir_instr_insert_after(&last_phi->instr, &block_pcopy->instr);
|
||||
|
||||
nir_foreach_phi(phi, block) {
|
||||
assert(phi->dest.is_ssa);
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
if (nir_src_is_undef(src->src))
|
||||
continue;
|
||||
@@ -411,7 +410,6 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
|
||||
entry->dest.dest.ssa.divergent = nir_src_is_divergent(src->src);
|
||||
exec_list_push_tail(&pcopy->entries, &entry->node);
|
||||
|
||||
assert(src->src.is_ssa);
|
||||
nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
|
||||
|
||||
nir_instr_rewrite_src(&phi->instr, &src->src,
|
||||
@@ -630,7 +628,6 @@ remove_no_op_phi(nir_instr *instr, struct from_ssa_state *state)
|
||||
if (nir_src_is_undef(src->src))
|
||||
continue;
|
||||
|
||||
assert(src->src.is_ssa);
|
||||
entry = _mesa_hash_table_search(state->merge_node_table, src->src.ssa);
|
||||
assert(entry != NULL);
|
||||
merge_node *src_node = (merge_node *)entry->data;
|
||||
@@ -719,7 +716,6 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
|
||||
|
||||
nir_foreach_parallel_copy_entry(entry, pcopy) {
|
||||
assert(!entry->dest_is_reg);
|
||||
assert(entry->dest.dest.is_ssa);
|
||||
assert(nir_ssa_def_is_unused(&entry->dest.dest.ssa));
|
||||
|
||||
/* Parallel copy destinations will always be registers */
|
||||
@@ -734,7 +730,6 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
|
||||
|
||||
nir_foreach_parallel_copy_entry(entry, pcopy) {
|
||||
assert(!entry->src_is_reg);
|
||||
assert(entry->src.is_ssa);
|
||||
nir_ssa_def *reg = reg_for_ssa_def(entry->src.ssa, state);
|
||||
if (reg == NULL)
|
||||
continue;
|
||||
@@ -817,8 +812,7 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
|
||||
unsigned num_copies = 0;
|
||||
nir_foreach_parallel_copy_entry(entry, pcopy) {
|
||||
/* Sources may be SSA but destinations are always registers */
|
||||
assert(entry->src.is_ssa);
|
||||
assert(entry->dest_is_reg && entry->dest.dest.is_ssa);
|
||||
assert(entry->dest_is_reg);
|
||||
if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa)
|
||||
continue;
|
||||
|
||||
@@ -858,7 +852,6 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
|
||||
if (entry->src_is_reg && entry->src.ssa == entry->dest.reg.ssa)
|
||||
continue;
|
||||
|
||||
assert(entry->src.is_ssa);
|
||||
struct copy_value src_value = {
|
||||
.is_reg = entry->src_is_reg,
|
||||
.ssa = entry->src.ssa,
|
||||
@@ -874,7 +867,7 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
|
||||
values[src_idx] = src_value;
|
||||
}
|
||||
|
||||
assert(entry->dest_is_reg && entry->dest.dest.is_ssa);
|
||||
assert(entry->dest_is_reg);
|
||||
struct copy_value dest_value = {
|
||||
.is_reg = true,
|
||||
.ssa = entry->dest.reg.ssa,
|
||||
@@ -1166,14 +1159,12 @@ nir_lower_phis_to_regs_block(nir_block *block)
|
||||
|
||||
bool progress = false;
|
||||
nir_foreach_phi_safe(phi, block) {
|
||||
assert(phi->dest.is_ssa);
|
||||
nir_ssa_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa);
|
||||
|
||||
b.cursor = nir_after_instr(&phi->instr);
|
||||
nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg));
|
||||
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
assert(src->src.is_ssa);
|
||||
|
||||
_mesa_set_add(visited_blocks, src->src.ssa->parent_instr->block);
|
||||
place_phi_read(&b, reg, src->src.ssa, src->pred, visited_blocks);
|
||||
@@ -1228,7 +1219,6 @@ instr_is_load_new_reg(nir_instr *instr, unsigned old_num_ssa)
|
||||
if (load->intrinsic != nir_intrinsic_load_reg)
|
||||
return false;
|
||||
|
||||
assert(load->src[0].is_ssa);
|
||||
nir_ssa_def *reg = load->src[0].ssa;
|
||||
|
||||
return reg->index >= old_num_ssa;
|
||||
|
@@ -82,7 +82,6 @@ void nir_inline_function_impl(struct nir_builder *b,
|
||||
|
||||
unsigned param_idx = nir_intrinsic_param_idx(load);
|
||||
assert(param_idx < impl->function->num_params);
|
||||
assert(load->dest.is_ssa);
|
||||
nir_ssa_def_rewrite_uses(&load->dest.ssa,
|
||||
params[param_idx]);
|
||||
|
||||
|
@@ -278,7 +278,6 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_)
|
||||
* this for loads in the same block as the use because uses of loads
|
||||
* which cross block boundaries aren't trivial anyway.
|
||||
*/
|
||||
assert(alu->src[0].src.is_ssa);
|
||||
nir_intrinsic_instr *load = nir_load_reg_for_def(alu->src[0].src.ssa);
|
||||
if (load != NULL) {
|
||||
/* Duplicate the load before changing it in case there are other
|
||||
|
@@ -146,7 +146,6 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
|
||||
case nir_intrinsic_reduce:
|
||||
case nir_intrinsic_inclusive_scan:
|
||||
case nir_intrinsic_exclusive_scan: {
|
||||
assert(intrin->src[0].is_ssa && intrin->dest.is_ssa);
|
||||
const unsigned old_bit_size = intrin->dest.ssa.bit_size;
|
||||
assert(old_bit_size < bit_size);
|
||||
|
||||
|
@@ -54,7 +54,6 @@ static void
|
||||
lower_convert_alu_types_instr(nir_builder *b, nir_intrinsic_instr *conv)
|
||||
{
|
||||
assert(conv->intrinsic == nir_intrinsic_convert_alu_types);
|
||||
assert(conv->src[0].is_ssa && conv->dest.is_ssa);
|
||||
|
||||
b->cursor = nir_instr_remove(&conv->instr);
|
||||
nir_ssa_def *val =
|
||||
|
@@ -234,7 +234,6 @@ lower_locals_to_regs_block(nir_block *block,
|
||||
loc.reg, .base = loc.base_offset);
|
||||
}
|
||||
|
||||
assert(intrin->dest.is_ssa);
|
||||
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, value);
|
||||
nir_instr_remove(&intrin->instr);
|
||||
state->progress = true;
|
||||
@@ -251,7 +250,6 @@ lower_locals_to_regs_block(nir_block *block,
|
||||
struct reg_location loc = get_deref_reg_location(deref, state);
|
||||
nir_intrinsic_instr *decl = nir_reg_get_decl(loc.reg);
|
||||
|
||||
assert(intrin->src[1].is_ssa);
|
||||
nir_ssa_def *val = intrin->src[1].ssa;
|
||||
unsigned num_array_elems = nir_intrinsic_num_array_elems(decl);
|
||||
unsigned write_mask = nir_intrinsic_write_mask(intrin);
|
||||
|
@@ -229,7 +229,6 @@ lower_mem_store(nir_builder *b, nir_intrinsic_instr *intrin,
|
||||
nir_lower_mem_access_bit_sizes_cb mem_access_size_align_cb,
|
||||
const void *cb_data, bool allow_unaligned_stores_as_atomics)
|
||||
{
|
||||
assert(intrin->src[0].is_ssa);
|
||||
nir_ssa_def *value = intrin->src[0].ssa;
|
||||
|
||||
assert(intrin->num_components == value->num_components);
|
||||
|
@@ -58,7 +58,6 @@ lower_load_store(nir_builder *b,
|
||||
} else {
|
||||
assert(intrin->intrinsic == nir_intrinsic_store_deref);
|
||||
|
||||
assert(intrin->src[1].is_ssa);
|
||||
nir_ssa_def *value = intrin->src[1].ssa;
|
||||
if (value->bit_size == 1)
|
||||
value = nir_b2b32(b, value);
|
||||
|
@@ -74,7 +74,6 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
|
||||
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
|
||||
return NULL;
|
||||
|
||||
assert(intrin->dest.is_ssa);
|
||||
const unsigned bit_size = intrin->dest.ssa.bit_size;
|
||||
|
||||
switch (intrin->intrinsic) {
|
||||
@@ -135,11 +134,9 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
|
||||
case nir_intrinsic_interp_deref_at_centroid:
|
||||
return nir_load_barycentric_coord_centroid(b, 32, .interp_mode = interp_mode);
|
||||
case nir_intrinsic_interp_deref_at_sample:
|
||||
assert(intrin->src[1].is_ssa);
|
||||
return nir_load_barycentric_coord_at_sample(b, 32, intrin->src[1].ssa,
|
||||
.interp_mode = interp_mode);
|
||||
case nir_intrinsic_interp_deref_at_offset:
|
||||
assert(intrin->src[1].is_ssa);
|
||||
return nir_load_barycentric_coord_at_offset(b, 32, intrin->src[1].ssa,
|
||||
.interp_mode = interp_mode);
|
||||
default:
|
||||
@@ -159,7 +156,6 @@ lower_system_value_instr(nir_builder *b, nir_instr *instr, void *_state)
|
||||
* couple of ray-tracing intrinsics which are matrices.
|
||||
*/
|
||||
assert(deref->deref_type == nir_deref_type_array);
|
||||
assert(deref->arr.index.is_ssa);
|
||||
column = deref->arr.index.ssa;
|
||||
nir_deref_instr *arr_deref = deref;
|
||||
deref = nir_deref_instr_parent(deref);
|
||||
|
@@ -96,7 +96,6 @@ nir_lower_deref_copy_instr(nir_builder *b, nir_intrinsic_instr *copy)
|
||||
/* Unfortunately, there's just no good way to handle wildcards except to
|
||||
* flip the chain around and walk the list from variable to final pointer.
|
||||
*/
|
||||
assert(copy->src[0].is_ssa && copy->src[1].is_ssa);
|
||||
nir_deref_instr *dst = nir_instr_as_deref(copy->src[0].ssa->parent_instr);
|
||||
nir_deref_instr *src = nir_instr_as_deref(copy->src[1].ssa->parent_instr);
|
||||
|
||||
|
@@ -337,7 +337,6 @@ try_match_deref(nir_deref_path *base_path, int *path_array_idx,
|
||||
continue;
|
||||
|
||||
case nir_deref_type_array: {
|
||||
assert(b->arr.index.is_ssa && d->arr.index.is_ssa);
|
||||
const bool const_b_idx = nir_src_is_const(b->arr.index);
|
||||
const bool const_d_idx = nir_src_is_const(d->arr.index);
|
||||
const unsigned b_idx = const_b_idx ? nir_src_as_uint(b->arr.index) : 0;
|
||||
|
@@ -161,7 +161,6 @@ nir_opt_idiv_const_instr(nir_builder *b, nir_instr *instr, void *user_data)
|
||||
alu->op != nir_op_irem)
|
||||
return false;
|
||||
|
||||
assert(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa);
|
||||
|
||||
if (alu->dest.dest.ssa.bit_size < *min_bit_size)
|
||||
return false;
|
||||
|
@@ -573,7 +573,6 @@ nir_schedule_regs_freed_store_reg(nir_intrinsic_instr *store,
|
||||
nir_schedule_regs_freed_state *state)
|
||||
{
|
||||
assert(nir_is_store_reg(store));
|
||||
assert(store->src[0].is_ssa && store->src[1].is_ssa);
|
||||
|
||||
nir_schedule_regs_freed_src_cb(&store->src[0], state);
|
||||
if (store->intrinsic == nir_intrinsic_store_reg_indirect)
|
||||
@@ -962,7 +961,6 @@ nir_schedule_mark_store_reg_scheduled(nir_intrinsic_instr *store,
|
||||
nir_schedule_scoreboard *scoreboard)
|
||||
{
|
||||
assert(nir_is_store_reg(store));
|
||||
assert(store->src[0].is_ssa && store->src[1].is_ssa);
|
||||
nir_ssa_def *reg = store->src[1].ssa;
|
||||
|
||||
nir_schedule_mark_src_scheduled(&store->src[0], scoreboard);
|
||||
|
@@ -339,9 +339,6 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state)
|
||||
validate_assert(state, instr->cast.align_offset == 0);
|
||||
}
|
||||
} else {
|
||||
/* We require the parent to be SSA. This may be lifted in the future */
|
||||
validate_assert(state, instr->parent.is_ssa);
|
||||
|
||||
/* The parent pointer value must have the same number of components
|
||||
* as the destination.
|
||||
*/
|
||||
@@ -477,9 +474,6 @@ validate_register_handle(nir_src handle_src,
|
||||
unsigned bit_size,
|
||||
validate_state *state)
|
||||
{
|
||||
if (!validate_assert(state, handle_src.is_ssa))
|
||||
return;
|
||||
|
||||
nir_ssa_def *handle = handle_src.ssa;
|
||||
nir_instr *parent = handle->parent_instr;
|
||||
|
||||
@@ -1081,12 +1075,10 @@ validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
|
||||
{
|
||||
state->instr = &instr->instr;
|
||||
|
||||
validate_assert(state, instr->dest.is_ssa);
|
||||
|
||||
exec_list_validate(&instr->srcs);
|
||||
nir_foreach_phi_src(src, instr) {
|
||||
if (src->pred == pred) {
|
||||
validate_assert(state, src->src.is_ssa);
|
||||
validate_src(&src->src, state, instr->dest.ssa.bit_size,
|
||||
instr->dest.ssa.num_components);
|
||||
state->instr = NULL;
|
||||
|
@@ -251,10 +251,8 @@ TEST_F(nir_redundant_load_vars_test, duplicated_load_volatile)
|
||||
ASSERT_EQ(count_intrinsics(nir_intrinsic_load_deref), 2);
|
||||
|
||||
nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
ASSERT_TRUE(first_store->src[1].is_ssa);
|
||||
|
||||
nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
ASSERT_TRUE(third_store->src[1].is_ssa);
|
||||
|
||||
EXPECT_EQ(first_store->src[1].ssa, third_store->src[1].ssa);
|
||||
}
|
||||
@@ -374,10 +372,8 @@ TEST_F(nir_copy_prop_vars_test, simple_copies)
|
||||
ASSERT_EQ(count_intrinsics(nir_intrinsic_copy_deref), 2);
|
||||
|
||||
nir_intrinsic_instr *first_copy = get_intrinsic(nir_intrinsic_copy_deref, 0);
|
||||
ASSERT_TRUE(first_copy->src[1].is_ssa);
|
||||
|
||||
nir_intrinsic_instr *second_copy = get_intrinsic(nir_intrinsic_copy_deref, 1);
|
||||
ASSERT_TRUE(second_copy->src[1].is_ssa);
|
||||
|
||||
EXPECT_EQ(first_copy->src[1].ssa, second_copy->src[1].ssa);
|
||||
}
|
||||
@@ -420,7 +416,6 @@ TEST_F(nir_copy_prop_vars_test, simple_store_load)
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
EXPECT_EQ(store->src[1].ssa, stored_value);
|
||||
}
|
||||
}
|
||||
@@ -451,7 +446,6 @@ TEST_F(nir_copy_prop_vars_test, store_store_load)
|
||||
/* Store to v[1] should use second_value directly. */
|
||||
nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
|
||||
ASSERT_TRUE(store_to_v1->src[1].is_ssa);
|
||||
EXPECT_EQ(store_to_v1->src[1].ssa, second_value);
|
||||
}
|
||||
|
||||
@@ -563,7 +557,6 @@ TEST_F(nir_copy_prop_vars_test, store_volatile)
|
||||
*/
|
||||
nir_intrinsic_instr *store_to_v1 = get_intrinsic(nir_intrinsic_store_deref, 3);
|
||||
ASSERT_EQ(nir_intrinsic_get_var(store_to_v1, 0), v[1]);
|
||||
ASSERT_TRUE(store_to_v1->src[1].is_ssa);
|
||||
EXPECT_EQ(store_to_v1->src[1].ssa, third_value);
|
||||
}
|
||||
|
||||
@@ -1056,7 +1049,6 @@ TEST_F(nir_copy_prop_vars_test, simple_store_load_in_two_blocks)
|
||||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, i);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
EXPECT_EQ(store->src[1].ssa, stored_value);
|
||||
}
|
||||
}
|
||||
@@ -1091,7 +1083,6 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previou
|
||||
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 3);
|
||||
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
|
||||
/* NOTE: The ALU instruction is how we get the vec.y. */
|
||||
ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
|
||||
@@ -1151,7 +1142,6 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused)
|
||||
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
|
||||
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
|
||||
}
|
||||
|
||||
@@ -1191,12 +1181,10 @@ TEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector)
|
||||
/* Third store will just use the value from first store. */
|
||||
nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
nir_intrinsic_instr *third_store = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
ASSERT_TRUE(third_store->src[1].is_ssa);
|
||||
EXPECT_EQ(third_store->src[1].ssa, first_store->src[1].ssa);
|
||||
|
||||
/* Fourth store will compose first and second store values. */
|
||||
nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3);
|
||||
ASSERT_TRUE(fourth_store->src[1].is_ssa);
|
||||
EXPECT_TRUE(nir_src_as_alu_instr(fourth_store->src[1]));
|
||||
}
|
||||
|
||||
@@ -1232,8 +1220,6 @@ TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector)
|
||||
/* Store to vec[idx] propagated to out. */
|
||||
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
|
||||
ASSERT_TRUE(first->src[1].is_ssa);
|
||||
ASSERT_TRUE(second->src[1].is_ssa);
|
||||
EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
|
||||
}
|
||||
|
||||
@@ -1270,8 +1256,6 @@ TEST_F(nir_copy_prop_vars_test, store_load_direct_and_indirect_array_deref_on_ve
|
||||
/* Store to vec[idx] propagated to out. */
|
||||
nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
|
||||
nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
ASSERT_TRUE(second->src[1].is_ssa);
|
||||
ASSERT_TRUE(third->src[1].is_ssa);
|
||||
EXPECT_EQ(second->src[1].ssa, third->src[1].ssa);
|
||||
}
|
||||
|
||||
@@ -1309,8 +1293,6 @@ TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref)
|
||||
/* Store to arr[idx] propagated to out. */
|
||||
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
nir_intrinsic_instr *second = get_intrinsic(nir_intrinsic_store_deref, 1);
|
||||
ASSERT_TRUE(first->src[1].is_ssa);
|
||||
ASSERT_TRUE(second->src[1].is_ssa);
|
||||
EXPECT_EQ(first->src[1].ssa, second->src[1].ssa);
|
||||
}
|
||||
|
||||
@@ -1356,8 +1338,6 @@ TEST_F(nir_copy_prop_vars_test, restrict_ssbo_bindings)
|
||||
/* Store to b0.x propagated to out. */
|
||||
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
ASSERT_TRUE(first->src[1].is_ssa);
|
||||
ASSERT_TRUE(third->src[1].is_ssa);
|
||||
EXPECT_EQ(first->src[1].ssa, third->src[1].ssa);
|
||||
}
|
||||
|
||||
@@ -1489,8 +1469,6 @@ TEST_F(nir_copy_prop_vars_test, restrict_ssbo_array_binding)
|
||||
/* Store to b0.x propagated to out. */
|
||||
nir_intrinsic_instr *first = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
nir_intrinsic_instr *third = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
ASSERT_TRUE(first->src[1].is_ssa);
|
||||
ASSERT_TRUE(third->src[1].is_ssa);
|
||||
EXPECT_EQ(first->src[1].ssa, third->src[1].ssa);
|
||||
}
|
||||
|
||||
@@ -1655,7 +1633,6 @@ TEST_F(nir_dead_write_vars_test, dead_write_in_block)
|
||||
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
|
||||
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
EXPECT_EQ(store->src[1].ssa, load_v2);
|
||||
}
|
||||
|
||||
@@ -1673,7 +1650,6 @@ TEST_F(nir_dead_write_vars_test, dead_write_components_in_block)
|
||||
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
|
||||
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
EXPECT_EQ(store->src[1].ssa, load_v2);
|
||||
}
|
||||
|
||||
@@ -1701,7 +1677,6 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_in_two_blocks)
|
||||
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
|
||||
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
EXPECT_EQ(store->src[1].ssa, load_v2);
|
||||
}
|
||||
|
||||
@@ -1723,7 +1698,6 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_write_components_in_two_blocks)
|
||||
EXPECT_EQ(1, count_intrinsics(nir_intrinsic_store_deref));
|
||||
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
ASSERT_TRUE(store->src[1].is_ssa);
|
||||
EXPECT_EQ(store->src[1].ssa, load_v2);
|
||||
}
|
||||
|
||||
@@ -1749,11 +1723,9 @@ TEST_F(nir_dead_write_vars_test, DISABLED_dead_writes_in_if_statement)
|
||||
EXPECT_EQ(2, count_intrinsics(nir_intrinsic_store_deref));
|
||||
|
||||
nir_intrinsic_instr *first_store = get_intrinsic(nir_intrinsic_store_deref, 0);
|
||||
ASSERT_TRUE(first_store->src[1].is_ssa);
|
||||
EXPECT_EQ(first_store->src[1].ssa, load_v2);
|
||||
|
||||
nir_intrinsic_instr *second_store = get_intrinsic(nir_intrinsic_store_deref, 1);
|
||||
ASSERT_TRUE(second_store->src[1].is_ssa);
|
||||
EXPECT_EQ(second_store->src[1].ssa, load_v3);
|
||||
}
|
||||
|
||||
|
@@ -3213,7 +3213,6 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
||||
if (opc == OPC_META_TEX_PREFETCH) {
|
||||
int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
|
||||
|
||||
compile_assert(ctx, tex->src[idx].src.is_ssa);
|
||||
|
||||
sam = ir3_SAM(ctx->in_block, opc, type, MASK(ncomp), 0, NULL,
|
||||
get_barycentric(ctx, IJ_PERSP_PIXEL), 0);
|
||||
|
@@ -496,7 +496,6 @@ lower_instr(nir_intrinsic_instr *instr, nir_builder *b,
|
||||
nir_instr_remove(&instr->instr);
|
||||
for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
|
||||
/* If anyone is using this deref, leave it alone */
|
||||
assert(d->dest.is_ssa);
|
||||
if (!list_is_empty(&d->dest.ssa.uses))
|
||||
break;
|
||||
|
||||
|
@@ -243,7 +243,6 @@ private:
|
||||
|
||||
auto buf_id = nir_imm_int(b, R600_BUFFER_INFO_CONST_BUFFER);
|
||||
|
||||
assert(intr->src[0].is_ssa);
|
||||
auto clip_vtx = intr->src[0].ssa;
|
||||
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
@@ -312,7 +311,6 @@ private:
|
||||
{
|
||||
auto intr = nir_instr_as_intrinsic(instr);
|
||||
assert(intr->intrinsic == nir_intrinsic_load_ubo_vec4);
|
||||
assert(intr->src[0].is_ssa);
|
||||
|
||||
auto parent = intr->src[0].ssa->parent_instr;
|
||||
|
||||
|
@@ -394,7 +394,6 @@ NirLowerIOToVector::vec_instr_stack_pop(nir_builder *b,
|
||||
assert(glsl_get_vector_elements(glsl_without_array(var2->type)) < 4);
|
||||
|
||||
if (srcs[var2->data.location_frac] == &instr_undef->def) {
|
||||
assert(intr2->src[1].is_ssa);
|
||||
assert(intr2->src[1].ssa);
|
||||
srcs[var2->data.location_frac] = intr2->src[1].ssa;
|
||||
}
|
||||
|
@@ -7388,7 +7388,6 @@ brw_compute_barycentric_interp_modes(const struct intel_device_info *devinfo,
|
||||
}
|
||||
|
||||
/* Ignore WPOS; it doesn't require interpolation. */
|
||||
assert(intrin->dest.is_ssa);
|
||||
if (!is_used_in_not_interp_frag_coord(&intrin->dest.ssa))
|
||||
continue;
|
||||
|
||||
|
@@ -4368,7 +4368,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
|
||||
nir_ssa_bind_infos[instr->dest.ssa.index].binding =
|
||||
nir_intrinsic_binding(instr);
|
||||
|
||||
assert(instr->src[1].is_ssa);
|
||||
if (nir_intrinsic_resource_access_intel(instr) &
|
||||
nir_resource_intel_non_uniform) {
|
||||
nir_resource_values[instr->dest.ssa.index] = fs_reg();
|
||||
|
@@ -149,12 +149,10 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
|
||||
* assuming an alpha of 1.0 and letting the sample mask pass through
|
||||
* unaltered seems like the kindest thing to do to apps.
|
||||
*/
|
||||
assert(color0_write->src[0].is_ssa);
|
||||
nir_ssa_def *color0 = color0_write->src[0].ssa;
|
||||
if (color0->num_components < 4)
|
||||
goto skip;
|
||||
|
||||
assert(sample_mask_write->src[0].is_ssa);
|
||||
nir_ssa_def *sample_mask = sample_mask_write->src[0].ssa;
|
||||
|
||||
if (sample_mask_write_first) {
|
||||
|
@@ -169,7 +169,6 @@ brw_nir_opt_peephole_ffma_instr(nir_builder *b,
|
||||
if (add->exact)
|
||||
return false;
|
||||
|
||||
assert(add->src[0].src.is_ssa && add->src[1].src.is_ssa);
|
||||
|
||||
/* This, is the case a + a. We would rather handle this with an
|
||||
* algebraic reduction than fuse it. Also, we want to only fuse
|
||||
|
@@ -1356,7 +1356,6 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
|
||||
{
|
||||
b->cursor = nir_before_instr(&intrin->instr);
|
||||
|
||||
assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
|
||||
nir_ssa_def *index =
|
||||
build_res_reindex(b, intrin->src[0].ssa,
|
||||
intrin->src[1].ssa);
|
||||
|
@@ -779,7 +779,6 @@ lower_res_reindex_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
|
||||
nir_address_format addr_format =
|
||||
addr_format_for_desc_type(nir_intrinsic_desc_type(intrin), state);
|
||||
|
||||
assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
|
||||
nir_ssa_def *index =
|
||||
build_res_reindex(b, intrin->src[0].ssa,
|
||||
intrin->src[1].ssa,
|
||||
|
@@ -195,7 +195,6 @@ lower_shader_system_values(struct nir_builder *builder, nir_instr *instr,
|
||||
if (!nir_intrinsic_infos[intrin->intrinsic].has_dest)
|
||||
return false;
|
||||
|
||||
assert(intrin->dest.is_ssa);
|
||||
|
||||
const struct dxil_spirv_runtime_conf *conf =
|
||||
(const struct dxil_spirv_runtime_conf *)cb_data;
|
||||
|
@@ -262,7 +262,6 @@ lower_res_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
|
||||
break;
|
||||
|
||||
case nir_intrinsic_vulkan_resource_reindex:
|
||||
assert(intrin->src[0].is_ssa && intrin->src[1].is_ssa);
|
||||
res = build_res_reindex(b, intrin->src[0].ssa, intrin->src[1].ssa,
|
||||
addr_format);
|
||||
break;
|
||||
|
Reference in New Issue
Block a user