nir: use nir_i{ne,eq}_imm helpers

We already have these, so let's use them more.

Reviewed-by: Rhys Perry <pendingchaos02@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23393>
This commit is contained in:
Erik Faye-Lund
2023-06-02 12:25:58 +02:00
committed by Marge Bot
parent 57260a836a
commit 28b1c5bca1
18 changed files with 36 additions and 36 deletions

View File

@@ -1585,7 +1585,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
if_es_thread->control = nir_selection_control_divergent_always_taken; if_es_thread->control = nir_selection_control_divergent_always_taken;
{ {
nir_ssa_def *accepted = nir_load_shared(b, 1, 8u, es_vertex_lds_addr, .base = lds_es_vertex_accepted, .align_mul = 4u); nir_ssa_def *accepted = nir_load_shared(b, 1, 8u, es_vertex_lds_addr, .base = lds_es_vertex_accepted, .align_mul = 4u);
nir_ssa_def *accepted_bool = nir_ine(b, nir_u2u32(b, accepted), nir_imm_int(b, 0)); nir_ssa_def *accepted_bool = nir_ine_imm(b, nir_u2u32(b, accepted), 0);
nir_store_var(b, s->es_accepted_var, accepted_bool, 0x1u); nir_store_var(b, s->es_accepted_var, accepted_bool, 0x1u);
} }
nir_pop_if(b, if_es_thread); nir_pop_if(b, if_es_thread);
@@ -1605,7 +1605,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
num_exported_prims = nir_bcsel(b, fully_culled, nir_imm_int(b, 0u), num_exported_prims); num_exported_prims = nir_bcsel(b, fully_culled, nir_imm_int(b, 0u), num_exported_prims);
nir_store_var(b, s->gs_exported_var, nir_iand(b, nir_inot(b, fully_culled), has_input_primitive(b)), 0x1u); nir_store_var(b, s->gs_exported_var, nir_iand(b, nir_inot(b, fully_culled), has_input_primitive(b)), 0x1u);
nir_if *if_wave_0 = nir_push_if(b, nir_ieq(b, nir_load_subgroup_id(b), nir_imm_int(b, 0))); nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
{ {
/* Tell the final vertex and primitive count to the HW. */ /* Tell the final vertex and primitive count to the HW. */
if (s->options->gfx_level == GFX10) { if (s->options->gfx_level == GFX10) {
@@ -1628,7 +1628,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
nir_push_else(b, if_cull_en); nir_push_else(b, if_cull_en);
{ {
/* When culling is disabled, we do the same as we would without culling. */ /* When culling is disabled, we do the same as we would without culling. */
nir_if *if_wave_0 = nir_push_if(b, nir_ieq(b, nir_load_subgroup_id(b), nir_imm_int(b, 0))); nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
{ {
nir_ssa_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b); nir_ssa_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b);
nir_ssa_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b); nir_ssa_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b);
@@ -2370,7 +2370,7 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
/* Newer chips can use PRIMGEN_PASSTHRU_NO_MSG to skip gs_alloc_req for NGG passthrough. */ /* Newer chips can use PRIMGEN_PASSTHRU_NO_MSG to skip gs_alloc_req for NGG passthrough. */
if (!(options->passthrough && options->family >= CHIP_NAVI23)) { if (!(options->passthrough && options->family >= CHIP_NAVI23)) {
/* Allocate export space on wave 0 - confirm to the HW that we want to use all possible space */ /* Allocate export space on wave 0 - confirm to the HW that we want to use all possible space */
nir_if *if_wave_0 = nir_push_if(b, nir_ieq(b, nir_load_subgroup_id(b), nir_imm_int(b, 0))); nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
{ {
nir_ssa_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b); nir_ssa_def *vtx_cnt = nir_load_workgroup_num_input_vertices_amd(b);
nir_ssa_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b); nir_ssa_def *prim_cnt = nir_load_workgroup_num_input_primitives_amd(b);
@@ -3311,7 +3311,7 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
/* When the output is compile-time known, the GS writes all possible vertices and primitives it can. /* When the output is compile-time known, the GS writes all possible vertices and primitives it can.
* The gs_alloc_req needs to happen on one wave only, otherwise the HW hangs. * The gs_alloc_req needs to happen on one wave only, otherwise the HW hangs.
*/ */
nir_if *if_wave_0 = nir_push_if(b, nir_ieq(b, nir_load_subgroup_id(b), nir_imm_zero(b, 1, 32))); nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
alloc_vertices_and_primitives(b, max_vtxcnt, max_prmcnt); alloc_vertices_and_primitives(b, max_vtxcnt, max_prmcnt);
nir_pop_if(b, if_wave_0); nir_pop_if(b, if_wave_0);
} }
@@ -3345,7 +3345,7 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
* requires that the invocations that export vertices are packed (ie. compact). * requires that the invocations that export vertices are packed (ie. compact).
* To ensure this, we need to repack invocations that have a live vertex. * To ensure this, we need to repack invocations that have a live vertex.
*/ */
nir_ssa_def *vertex_live = nir_ine(b, out_vtx_primflag_0, nir_imm_zero(b, 1, out_vtx_primflag_0->bit_size)); nir_ssa_def *vertex_live = nir_ine_imm(b, out_vtx_primflag_0, 0);
wg_repack_result rep = repack_invocations_in_workgroup(b, vertex_live, s->lds_addr_gs_scratch, wg_repack_result rep = repack_invocations_in_workgroup(b, vertex_live, s->lds_addr_gs_scratch,
s->max_num_waves, s->options->wave_size); s->max_num_waves, s->options->wave_size);
@@ -3353,11 +3353,11 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
nir_ssa_def *exporter_tid_in_tg = rep.repacked_invocation_index; nir_ssa_def *exporter_tid_in_tg = rep.repacked_invocation_index;
/* When the workgroup emits 0 total vertices, we also must export 0 primitives (otherwise the HW can hang). */ /* When the workgroup emits 0 total vertices, we also must export 0 primitives (otherwise the HW can hang). */
nir_ssa_def *any_output = nir_ine(b, workgroup_num_vertices, nir_imm_int(b, 0)); nir_ssa_def *any_output = nir_ine_imm(b, workgroup_num_vertices, 0);
max_prmcnt = nir_bcsel(b, any_output, max_prmcnt, nir_imm_int(b, 0)); max_prmcnt = nir_bcsel(b, any_output, max_prmcnt, nir_imm_int(b, 0));
/* Allocate export space. We currently don't compact primitives, just use the maximum number. */ /* Allocate export space. We currently don't compact primitives, just use the maximum number. */
nir_if *if_wave_0 = nir_push_if(b, nir_ieq(b, nir_load_subgroup_id(b), nir_imm_zero(b, 1, 32))); nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, nir_load_subgroup_id(b), 0));
{ {
if (s->options->gfx_level == GFX10) if (s->options->gfx_level == GFX10)
alloc_vertices_and_primitives_gfx10_workaround(b, workgroup_num_vertices, max_prmcnt); alloc_vertices_and_primitives_gfx10_workaround(b, workgroup_num_vertices, max_prmcnt);

View File

@@ -293,7 +293,7 @@ build_pipeline_statistics_query_shader(struct radv_device *device)
nir_ssa_def *global_id = get_global_ids(&b, 1); nir_ssa_def *global_id = get_global_ids(&b, 1);
nir_variable *input_stride = nir_local_variable_create(b.impl, glsl_int_type(), "input_stride"); nir_variable *input_stride = nir_local_variable_create(b.impl, glsl_int_type(), "input_stride");
nir_push_if(&b, nir_ine(&b, uses_gds, nir_imm_int(&b, 0))); nir_push_if(&b, nir_ine_imm(&b, uses_gds, 0));
{ {
nir_store_var(&b, input_stride, nir_imm_int(&b, pipelinestat_block_size * 2 + 8 * 2), 0x1); nir_store_var(&b, input_stride, nir_imm_int(&b, pipelinestat_block_size * 2 + 8 * 2), 0x1);
} }
@@ -335,8 +335,8 @@ build_pipeline_statistics_query_shader(struct radv_device *device)
nir_store_var(&b, result, nir_isub(&b, end, start), 0x1); nir_store_var(&b, result, nir_isub(&b, end, start), 0x1);
nir_push_if(&b, nir_iand(&b, nir_i2b(&b, uses_gds), nir_push_if(&b, nir_iand(&b, nir_i2b(&b, uses_gds),
nir_ieq(&b, nir_imm_int(&b, 1u << i), nir_ieq_imm(&b, nir_imm_int(&b, 1u << i),
nir_imm_int(&b, VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT)))); VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT)));
{ {
/* Compute the GDS result if needed. */ /* Compute the GDS result if needed. */
nir_ssa_def *gds_start_offset = nir_ssa_def *gds_start_offset =

View File

@@ -600,7 +600,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b,
build_node_to_addr(device, b, nir_load_deref(b, args->vars.bvh_base), true); build_node_to_addr(device, b, nir_load_deref(b, args->vars.bvh_base), true);
nir_ssa_def *parent = fetch_parent_node(b, bvh_addr, prev); nir_ssa_def *parent = fetch_parent_node(b, bvh_addr, prev);
nir_push_if(b, nir_ieq(b, parent, nir_imm_int(b, RADV_BVH_INVALID_NODE))); nir_push_if(b, nir_ieq_imm(b, parent, RADV_BVH_INVALID_NODE));
{ {
nir_store_var(b, incomplete, nir_imm_bool(b, false), 0x1); nir_store_var(b, incomplete, nir_imm_bool(b, false), 0x1);
nir_jump(b, nir_jump_break); nir_jump(b, nir_jump_break);

View File

@@ -255,7 +255,7 @@ nir_clz_u(nir_builder *b, nir_ssa_def *a)
static inline nir_ssa_def * static inline nir_ssa_def *
nir_ctz_u(nir_builder *b, nir_ssa_def *a) nir_ctz_u(nir_builder *b, nir_ssa_def *a)
{ {
nir_ssa_def *cond = nir_ieq(b, a, nir_imm_intN_t(b, 0, a->bit_size)); nir_ssa_def *cond = nir_ieq_imm(b, a, 0);
return nir_bcsel(b, cond, return nir_bcsel(b, cond,
nir_imm_intN_t(b, a->bit_size, a->bit_size), nir_imm_intN_t(b, a->bit_size, a->bit_size),

View File

@@ -682,7 +682,7 @@ lower_ufind_msb64(nir_builder *b, nir_ssa_def *x)
nir_ssa_def *hi_count = nir_ufind_msb(b, x_hi); nir_ssa_def *hi_count = nir_ufind_msb(b, x_hi);
if (b->shader->options->lower_uadd_sat) { if (b->shader->options->lower_uadd_sat) {
nir_ssa_def *valid_hi_bits = nir_ine(b, x_hi, nir_imm_int(b, 0)); nir_ssa_def *valid_hi_bits = nir_ine_imm(b, x_hi, 0);
nir_ssa_def *hi_res = nir_iadd(b, nir_imm_intN_t(b, 32, 32), hi_count); nir_ssa_def *hi_res = nir_iadd(b, nir_imm_intN_t(b, 32, 32), hi_count);
return nir_bcsel(b, valid_hi_bits, hi_res, lo_count); return nir_bcsel(b, valid_hi_bits, hi_res, lo_count);
} else { } else {
@@ -765,7 +765,7 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size,
nir_ssa_def *half = COND_LOWER_OP(b, ishr, lsb_mask, nir_imm_int(b, 1)); nir_ssa_def *half = COND_LOWER_OP(b, ishr, lsb_mask, nir_imm_int(b, 1));
nir_ssa_def *rem = COND_LOWER_OP(b, iand, x, rem_mask); nir_ssa_def *rem = COND_LOWER_OP(b, iand, x, rem_mask);
nir_ssa_def *halfway = nir_iand(b, COND_LOWER_CMP(b, ieq, rem, half), nir_ssa_def *halfway = nir_iand(b, COND_LOWER_CMP(b, ieq, rem, half),
nir_ine(b, discard, nir_imm_int(b, 0))); nir_ine_imm(b, discard, 0));
nir_ssa_def *is_odd = COND_LOWER_CMP(b, ine, nir_imm_int64(b, 0), nir_ssa_def *is_odd = COND_LOWER_CMP(b, ine, nir_imm_int64(b, 0),
COND_LOWER_OP(b, iand, x, lsb_mask)); COND_LOWER_OP(b, iand, x, lsb_mask));
nir_ssa_def *round_up = nir_ior(b, COND_LOWER_CMP(b, ilt, half, rem), nir_ssa_def *round_up = nir_ior(b, COND_LOWER_CMP(b, ilt, half, rem),

View File

@@ -272,7 +272,7 @@ emit_shared_to_payload_copy(nir_builder *b,
/* Copy the last few dwords not forming full vec4. */ /* Copy the last few dwords not forming full vec4. */
if (remaining_dwords > 0) { if (remaining_dwords > 0) {
assert(remaining_dwords < 4); assert(remaining_dwords < 4);
nir_ssa_def *cmp = nir_ieq(b, invocation_index, nir_imm_int(b, 0)); nir_ssa_def *cmp = nir_ieq_imm(b, invocation_index, 0);
nir_if *if_stmt = nir_push_if(b, cmp); nir_if *if_stmt = nir_push_if(b, cmp);
{ {
copy_shared_to_payload(b, remaining_dwords, addr, base_shared_addr, off); copy_shared_to_payload(b, remaining_dwords, addr, base_shared_addr, off);

View File

@@ -1120,7 +1120,7 @@ static void
ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src) ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
{ {
ttn_move_dest(b, dest, nir_bcsel(b, ttn_move_dest(b, dest, nir_bcsel(b,
nir_ine(b, src[0], nir_imm_int(b, 0)), nir_ine_imm(b, src[0], 0),
src[1], src[2])); src[1], src[2]));
} }

View File

@@ -105,7 +105,7 @@ create_tess_ctrl_shader_variant(struct d3d12_context *ctx, struct d3d12_tcs_vari
out->data.driver_location = in->data.driver_location = var->driver_location; out->data.driver_location = in->data.driver_location = var->driver_location;
for (unsigned i = 0; i < key->vertices_out; i++) { for (unsigned i = 0; i < key->vertices_out; i++) {
nir_if *start_block = nir_push_if(&b, nir_ieq(&b, invocation_id, nir_imm_int(&b, i))); nir_if *start_block = nir_push_if(&b, nir_ieq_imm(&b, invocation_id, i));
nir_deref_instr *in_array_var = nir_build_deref_array(&b, nir_build_deref_var(&b, in), invocation_id); nir_deref_instr *in_array_var = nir_build_deref_array(&b, nir_build_deref_var(&b, in), invocation_id);
nir_deref_instr *out_array_var = nir_build_deref_array_imm(&b, nir_build_deref_var(&b, out), i); nir_deref_instr *out_array_var = nir_build_deref_array_imm(&b, nir_build_deref_var(&b, out), i);
copy_vars(&b, out_array_var, in_array_var); copy_vars(&b, out_array_var, in_array_var);

View File

@@ -50,7 +50,7 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
b.cursor = nir_after_instr(instr); b.cursor = nir_after_instr(instr);
nir_ssa_def *ssa = nir_ine(&b, &intr->dest.ssa, nir_imm_int(&b, 0)); nir_ssa_def *ssa = nir_ine_imm(&b, &intr->dest.ssa, 0);
if (v->key.front_ccw) if (v->key.front_ccw)
nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq; nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;

View File

@@ -289,7 +289,7 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
/* For each possible original face */ /* For each possible original face */
for (unsigned j = 0; j < 6; j++) { for (unsigned j = 0; j < 6; j++) {
nir_ssa_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq(b, face, nir_imm_int(b, j))); nir_ssa_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq_imm(b, face, j));
x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result); x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result);
y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result); y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result);

View File

@@ -110,7 +110,7 @@ brw_nir_lower_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, voi
/* Make sure that the mesh workgroup size is taken from the first invocation /* Make sure that the mesh workgroup size is taken from the first invocation
* (nir_intrinsic_launch_mesh_workgroups requirement) * (nir_intrinsic_launch_mesh_workgroups requirement)
*/ */
nir_ssa_def *cmp = nir_ieq(b, local_invocation_index, nir_imm_int(b, 0)); nir_ssa_def *cmp = nir_ieq_imm(b, local_invocation_index, 0);
nir_if *if_stmt = nir_push_if(b, cmp); nir_if *if_stmt = nir_push_if(b, cmp);
{ {
/* TUE header contains 4 words: /* TUE header contains 4 words:

View File

@@ -370,7 +370,7 @@ lower_ray_query_intrinsic(nir_builder *b,
* RayQueryCommittedIntersectionGeneratedEXT = 2U <= hit_in.leaf_type == BRW_RT_BVH_NODE_TYPE_PROCEDURAL (3) * RayQueryCommittedIntersectionGeneratedEXT = 2U <= hit_in.leaf_type == BRW_RT_BVH_NODE_TYPE_PROCEDURAL (3)
*/ */
sysval = sysval =
nir_bcsel(b, nir_ieq(b, hit_in.leaf_type, nir_imm_int(b, 4)), nir_bcsel(b, nir_ieq_imm(b, hit_in.leaf_type, 4),
nir_imm_int(b, 1), nir_imm_int(b, 2)); nir_imm_int(b, 1), nir_imm_int(b, 2));
sysval = sysval =
nir_bcsel(b, hit_in.valid, nir_bcsel(b, hit_in.valid,
@@ -379,8 +379,8 @@ lower_ray_query_intrinsic(nir_builder *b,
/* 0 -> triangle, 1 -> AABB */ /* 0 -> triangle, 1 -> AABB */
sysval = sysval =
nir_b2i32(b, nir_b2i32(b,
nir_ieq(b, hit_in.leaf_type, nir_ieq_imm(b, hit_in.leaf_type,
nir_imm_int(b, BRW_RT_BVH_NODE_TYPE_PROCEDURAL))); BRW_RT_BVH_NODE_TYPE_PROCEDURAL));
} }
break; break;

View File

@@ -926,8 +926,8 @@ brw_nir_rt_load_primitive_id_from_hit(nir_builder *b,
{ {
if (!is_procedural) { if (!is_procedural) {
is_procedural = is_procedural =
nir_ieq(b, defs->leaf_type, nir_ieq_imm(b, defs->leaf_type,
nir_imm_int(b, BRW_RT_BVH_NODE_TYPE_PROCEDURAL)); BRW_RT_BVH_NODE_TYPE_PROCEDURAL);
} }
nir_ssa_def *prim_id_proc, *prim_id_quad; nir_ssa_def *prim_id_proc, *prim_id_quad;
@@ -973,7 +973,7 @@ brw_nir_rt_acceleration_structure_to_root_node(nir_builder *b,
* BVH, we can find the root node at a given offset. * BVH, we can find the root node at a given offset.
*/ */
nir_ssa_def *root_node_ptr, *null_node_ptr; nir_ssa_def *root_node_ptr, *null_node_ptr;
nir_push_if(b, nir_ieq(b, as_addr, nir_imm_int64(b, 0))); nir_push_if(b, nir_ieq_imm(b, as_addr, 0));
{ {
null_node_ptr = nir_imm_int64(b, 0); null_node_ptr = nir_imm_int64(b, 0);
} }

View File

@@ -293,10 +293,10 @@ init_pbo_shader_data(nir_builder *b, struct pbo_shader_data *sd, unsigned coord_
nir_bcsel(b, nir_bcsel(b,
nir_uge(b, sd->channels, nir_imm_int(b, 3)), nir_uge(b, sd->channels, nir_imm_int(b, 3)),
nir_bcsel(b, nir_bcsel(b,
nir_ieq(b, sd->channels, nir_imm_int(b, 4)), nir_ieq_imm(b, sd->channels, 4),
nir_ball(b, nir_ieq(b, sd->bits, nir_imm_ivec4(b, 8, 8, 8, 8))), nir_ball(b, nir_ieq_imm(b, sd->bits, 8)),
nir_ball(b, nir_ieq(b, nir_channels(b, sd->bits, 7), nir_imm_ivec3(b, 8, 8, 8)))), nir_ball(b, nir_ieq_imm(b, nir_channels(b, sd->bits, 7), 8))),
nir_ball(b, nir_ieq(b, nir_channels(b, sd->bits, 3), nir_imm_ivec2(b, 8, 8)))), nir_ball(b, nir_ieq_imm(b, nir_channels(b, sd->bits, 3), 8))),
nir_imm_bool(b, 0)), nir_imm_bool(b, 0)),
nir_imm_bool(b, 0))), nir_imm_bool(b, 0))),
nir_imm_bool(b, 0), nir_imm_bool(b, 0),
@@ -490,7 +490,7 @@ check_for_weird_packing(nir_builder *b, struct pbo_shader_data *sd, unsigned com
nir_ige(b, sd->channels, nir_imm_int(b, component)), nir_ige(b, sd->channels, nir_imm_int(b, component)),
nir_ior(b, nir_ior(b,
nir_ine(b, c, sd->bits1), nir_ine(b, c, sd->bits1),
nir_ine(b, nir_imod(b, c, nir_imm_int(b, 8)), nir_imm_int(b, 0))), nir_ine_imm(b, nir_imod(b, c, nir_imm_int(b, 8)), 0)),
nir_imm_bool(b, 0)); nir_imm_bool(b, 0));
} }

View File

@@ -301,7 +301,7 @@ handle_cube_edge(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y, nir_ssa_def *fa
/* For each possible original face */ /* For each possible original face */
for (unsigned j = 0; j < 6; j++) { for (unsigned j = 0; j < 6; j++) {
nir_ssa_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq(b, face, nir_imm_int(b, j))); nir_ssa_def *predicate = nir_iand(b, remap_predicates[i], nir_ieq_imm(b, face, j));
x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result); x_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_x], x_result);
y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result); y_result = nir_bcsel(b, predicate, remap_array[remap_table[j].remap_y], y_result);

View File

@@ -160,7 +160,7 @@ dzn_nir_indirect_draw_shader(enum dzn_indirect_draw_type type)
nir_ssa_def *draw_count = nir_ssa_def *draw_count =
nir_load_ssbo(&b, 1, 32, count_buf_desc, nir_imm_int(&b, 0), .align_mul = 4); nir_load_ssbo(&b, 1, 32, count_buf_desc, nir_imm_int(&b, 0), .align_mul = 4);
nir_push_if(&b, nir_ieq(&b, index, nir_imm_int(&b, 0))); nir_push_if(&b, nir_ieq_imm(&b, index, 0));
nir_store_ssbo(&b, draw_count, exec_buf_desc, nir_imm_int(&b, 0), nir_store_ssbo(&b, draw_count, exec_buf_desc, nir_imm_int(&b, 0),
.write_mask = 0x1, .access = ACCESS_NON_READABLE, .write_mask = 0x1, .access = ACCESS_NON_READABLE,
.align_mul = 16); .align_mul = 16);

View File

@@ -108,7 +108,7 @@ pan_indirect_dispatch_init(struct panfrost_device *dev)
nir_ssa_def *num_wg_x_ptr = get_input_field(&b, num_wg_sysval[0]); nir_ssa_def *num_wg_x_ptr = get_input_field(&b, num_wg_sysval[0]);
nir_push_if(&b, nir_ine(&b, num_wg_x_ptr, nir_imm_int64(&b, 0))); nir_push_if(&b, nir_ine_imm(&b, num_wg_x_ptr, 0));
{ {
nir_store_global(&b, num_wg_x_ptr, 8, num_wg_x, 1); nir_store_global(&b, num_wg_x_ptr, 8, num_wg_x, 1);
nir_store_global(&b, get_input_field(&b, num_wg_sysval[1]), 8, nir_store_global(&b, get_input_field(&b, num_wg_sysval[1]), 8,

View File

@@ -42,7 +42,7 @@ pan_lower_helper_invocation_instr(nir_builder *b, nir_instr *instr, void *data)
b->cursor = nir_before_instr(instr); b->cursor = nir_before_instr(instr);
nir_ssa_def *mask = nir_load_sample_mask_in(b); nir_ssa_def *mask = nir_load_sample_mask_in(b);
nir_ssa_def *eq = nir_ieq(b, mask, nir_imm_int(b, 0)); nir_ssa_def *eq = nir_ieq_imm(b, mask, 0);
nir_ssa_def_rewrite_uses(&intr->dest.ssa, eq); nir_ssa_def_rewrite_uses(&intr->dest.ssa, eq);
return true; return true;