From 6d142078bc31e12e79c9a72034c1e1418c940efc Mon Sep 17 00:00:00 2001 From: Erik Faye-Lund Date: Mon, 8 May 2023 14:00:41 +0200 Subject: [PATCH] nir: use generated immediate comparison helpers This makes the code a bit less verbose, so let's use the helpers. Reviewed-by: Rhys Perry Part-of: --- src/amd/common/ac_nir.c | 2 +- src/amd/common/ac_nir_cull.c | 6 ++--- src/amd/common/ac_nir_lower_ngg.c | 8 +++--- src/amd/vulkan/meta/radv_meta_etc_decode.c | 12 ++++----- src/amd/vulkan/nir/radv_nir_lower_abi.c | 2 +- .../vulkan/radv_device_generated_commands.c | 6 ++--- src/amd/vulkan/radv_query.c | 6 ++--- src/amd/vulkan/radv_rt_common.c | 24 ++++++++--------- src/broadcom/vulkan/v3dv_meta_copy.c | 2 +- src/compiler/nir/nir_builder.c | 2 +- src/compiler/nir/nir_builder.h | 4 +-- src/compiler/nir/nir_builtin_builder.c | 3 +-- src/compiler/nir/nir_conversion_builder.h | 2 +- src/compiler/nir/nir_format_convert.h | 4 +-- .../nir/nir_lower_array_deref_of_vec.c | 2 +- src/compiler/nir/nir_lower_bitmap.c | 6 ++--- src/compiler/nir/nir_lower_clip.c | 2 +- src/compiler/nir/nir_lower_clip_disable.c | 2 +- src/compiler/nir/nir_lower_double_ops.c | 24 ++++++++--------- src/compiler/nir/nir_lower_fp16_conv.c | 4 +-- src/compiler/nir/nir_lower_gs_intrinsics.c | 7 ++--- src/compiler/nir/nir_lower_idiv.c | 4 +-- src/compiler/nir/nir_lower_indirect_derefs.c | 2 +- src/compiler/nir/nir_lower_int64.c | 26 +++++++++---------- src/compiler/nir/nir_lower_multiview.c | 2 +- src/compiler/nir/nir_lower_point_smooth.c | 2 +- src/compiler/nir/nir_lower_printf.c | 2 +- src/compiler/nir/nir_lower_subgroups.c | 4 +-- src/compiler/nir/nir_lower_task_shader.c | 2 +- src/compiler/nir/nir_lower_tex.c | 2 +- .../nir/nir_lower_variable_initializers.c | 2 +- src/compiler/nir/nir_lower_wpos_ytransform.c | 2 +- src/compiler/nir/nir_opt_idiv_const.c | 4 +-- src/compiler/nir/nir_passthrough_gs.c | 2 +- src/compiler/spirv/vtn_opencl.c | 2 +- src/freedreno/ir3/ir3_nir_lower_tess.c | 4 +-- src/gallium/auxiliary/nir/nir_draw_helpers.c | 3 +-- src/gallium/auxiliary/nir/tgsi_to_nir.c | 14 +++++----- .../drivers/d3d12/d3d12_gs_variant.cpp | 5 ++-- src/gallium/drivers/d3d12/d3d12_nir_passes.c | 2 +- .../sfn/sfn_nir_legalize_image_load_store.cpp | 2 +- .../drivers/radeonsi/si_nir_lower_abi.c | 2 +- .../drivers/radeonsi/si_nir_lower_vs_inputs.c | 4 +-- .../zink/zink_lower_cubemap_to_array.c | 6 ++--- src/intel/blorp/blorp_blit.c | 2 +- src/intel/compiler/brw_mesh.cpp | 3 +-- .../vulkan/anv_nir_apply_pipeline_layout.c | 2 +- src/intel/vulkan/anv_nir_lower_multiview.c | 2 +- .../anv_nir_apply_pipeline_layout.c | 2 +- .../vulkan_hasvk/anv_nir_lower_multiview.c | 2 +- src/mesa/program/prog_to_nir.c | 4 +-- src/mesa/state_tracker/st_atifs_to_nir.c | 2 +- src/mesa/state_tracker/st_draw_hw_select.c | 21 +++++++-------- src/mesa/state_tracker/st_pbo_compute.c | 10 +++---- src/microsoft/compiler/dxil_nir.c | 4 +-- .../compiler/dxil_nir_lower_int_cubemaps.c | 6 ++--- .../compiler/dxil_nir_lower_int_samplers.c | 4 +-- src/microsoft/compiler/dxil_nir_tess.c | 2 +- 58 files changed, 143 insertions(+), 153 deletions(-) diff --git a/src/amd/common/ac_nir.c b/src/amd/common/ac_nir.c index 8768a9b5b0a..2503f5acd0c 100644 --- a/src/amd/common/ac_nir.c +++ b/src/amd/common/ac_nir.c @@ -174,7 +174,7 @@ ac_nir_export_position(nir_builder *b, } else if (force_vrs) { /* If Pos.W != 1 (typical for non-GUI elements), use coarse shading. */ nir_ssa_def *pos_w = nir_channel(b, pos, 3); - nir_ssa_def *cond = nir_fneu(b, pos_w, nir_imm_float(b, 1)); + nir_ssa_def *cond = nir_fneu_imm(b, pos_w, 1); rates = nir_bcsel(b, cond, nir_load_force_vrs_rates_amd(b), nir_imm_int(b, 0)); } diff --git a/src/amd/common/ac_nir_cull.c b/src/amd/common/ac_nir_cull.c index 6dae73ad1b7..06fa2a13e27 100644 --- a/src/amd/common/ac_nir_cull.c +++ b/src/amd/common/ac_nir_cull.c @@ -26,7 +26,7 @@ analyze_position_w(nir_builder *b, nir_ssa_def *pos[][4], unsigned num_vertices, w_info->any_w_negative = nir_imm_bool(b, false); for (unsigned i = 0; i < num_vertices; ++i) { - nir_ssa_def *neg_w = nir_flt(b, pos[i][3], nir_imm_float(b, 0.0f)); + nir_ssa_def *neg_w = nir_flt_imm(b, pos[i][3], 0.0f); w_info->w_reflection = nir_ixor(b, neg_w, w_info->w_reflection); w_info->any_w_negative = nir_ior(b, neg_w, w_info->any_w_negative); w_info->all_w_negative = nir_iand(b, neg_w, w_info->all_w_negative); @@ -47,7 +47,7 @@ cull_face_triangle(nir_builder *b, nir_ssa_def *pos[3][4], const position_w_info det = nir_bcsel(b, w_info->w_reflection, nir_fneg(b, det), det); nir_ssa_def *front_facing_ccw = nir_flt(b, nir_imm_float(b, 0.0f), det); - nir_ssa_def *zero_area = nir_feq(b, nir_imm_float(b, 0.0f), det); + nir_ssa_def *zero_area = nir_feq_imm(b, det, 0.0f); nir_ssa_def *ccw = nir_load_cull_ccw_amd(b); nir_ssa_def *front_facing = nir_ieq(b, front_facing_ccw, ccw); nir_ssa_def *cull_front = nir_load_cull_front_face_enabled_amd(b); @@ -77,7 +77,7 @@ cull_frustrum(nir_builder *b, nir_ssa_def *bbox_min[2], nir_ssa_def *bbox_max[2] nir_ssa_def *prim_outside_view = nir_imm_false(b); for (unsigned chan = 0; chan < 2; ++chan) { - prim_outside_view = nir_ior(b, prim_outside_view, nir_flt(b, bbox_max[chan], nir_imm_float(b, -1.0f))); + prim_outside_view = nir_ior(b, prim_outside_view, nir_flt_imm(b, bbox_max[chan], -1.0f)); prim_outside_view = nir_ior(b, prim_outside_view, nir_flt(b, nir_imm_float(b, 1.0f), bbox_min[chan])); } diff --git a/src/amd/common/ac_nir_lower_ngg.c b/src/amd/common/ac_nir_lower_ngg.c index c8dc80789d8..7d120f528af 100644 --- a/src/amd/common/ac_nir_lower_ngg.c +++ b/src/amd/common/ac_nir_lower_ngg.c @@ -655,7 +655,7 @@ emit_store_ngg_nogs_es_primitive_id(nir_builder *b, lower_ngg_nogs_state *s) static void add_clipdist_bit(nir_builder *b, nir_ssa_def *dist, unsigned index, nir_variable *mask) { - nir_ssa_def *is_neg = nir_flt(b, dist, nir_imm_float(b, 0)); + nir_ssa_def *is_neg = nir_flt_imm(b, dist, 0); nir_ssa_def *neg_mask = nir_ishl_imm(b, nir_b2i32(b, is_neg), index); neg_mask = nir_ior(b, neg_mask, nir_load_var(b, mask)); nir_store_var(b, mask, neg_mask, 1); @@ -2616,7 +2616,7 @@ ngg_gs_clear_primflags(nir_builder *b, nir_ssa_def *num_vertices, unsigned strea nir_loop *loop = nir_push_loop(b); { nir_ssa_def *current_clear_primflag_idx = nir_load_var(b, s->current_clear_primflag_idx_var); - nir_if *if_break = nir_push_if(b, nir_uge(b, current_clear_primflag_idx, nir_imm_int(b, b->shader->info.gs.vertices_out))); + nir_if *if_break = nir_push_if(b, nir_uge_imm(b, current_clear_primflag_idx, b->shader->info.gs.vertices_out)); { nir_jump(b, nir_jump_break); } @@ -2819,7 +2819,7 @@ lower_ngg_gs_emit_vertex_with_counter(nir_builder *b, nir_intrinsic_instr *intri ? nir_ishl_imm(b, nir_b2i32(b, nir_inot(b, nir_load_cull_any_enabled_amd(b))), 2) : nir_imm_int(b, 0b100); - nir_ssa_def *completes_prim = nir_ige(b, current_vtx_per_prim, nir_imm_int(b, s->num_vertices_per_primitive - 1)); + nir_ssa_def *completes_prim = nir_ige_imm(b, current_vtx_per_prim, s->num_vertices_per_primitive - 1); nir_ssa_def *complete_flag = nir_b2i32(b, completes_prim); nir_ssa_def *prim_flag = nir_ior(b, vertex_live_flag, complete_flag); @@ -4489,7 +4489,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b, .memory_modes = nir_var_shader_out | nir_var_mem_shared); } - nir_ssa_def *has_api_ms_invocation = nir_ult(b, invocation_index, nir_imm_int(b, s->api_workgroup_size)); + nir_ssa_def *has_api_ms_invocation = nir_ult_imm(b, invocation_index, s->api_workgroup_size); nir_if *if_has_api_ms_invocation = nir_push_if(b, has_api_ms_invocation); { nir_cf_reinsert(&extracted, b->cursor); diff --git a/src/amd/vulkan/meta/radv_meta_etc_decode.c b/src/amd/vulkan/meta/radv_meta_etc_decode.c index 5444090822d..a9295af8fea 100644 --- a/src/amd/vulkan/meta/radv_meta_etc_decode.c +++ b/src/amd/vulkan/meta/radv_meta_etc_decode.c @@ -134,11 +134,11 @@ decode_etc2_alpha(struct nir_builder *b, nir_ssa_def *alpha_payload, nir_ssa_def } nir_ssa_def *lsb_index = - nir_ubfe(b, nir_bcsel(b, nir_uge(b, bit_offset, nir_imm_int(b, 32)), alpha_y, alpha_x), + nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x), nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 2)); bit_offset = nir_iadd_imm(b, bit_offset, 2); nir_ssa_def *msb = - nir_ubfe(b, nir_bcsel(b, nir_uge(b, bit_offset, nir_imm_int(b, 32)), alpha_y, alpha_x), + nir_ubfe(b, nir_bcsel(b, nir_uge_imm(b, bit_offset, 32), alpha_y, alpha_x), nir_iand_imm(b, bit_offset, 31), nir_imm_int(b, 1)); nir_ssa_def *mod = nir_ixor(b, etc1_alpha_modifier_lookup(b, table, lsb_index), nir_iadd_imm(b, msb, -1)); @@ -230,13 +230,13 @@ build_shader(struct radv_device *dev) nir_variable *color = nir_variable_create(b.shader, nir_var_shader_temp, glsl_vec4_type(), "color"); nir_store_var(&b, color, nir_imm_vec4(&b, 1.0, 0.0, 0.0, 1.0), 0xf); - nir_push_if(&b, nir_ilt(&b, format, nir_imm_int(&b, VK_FORMAT_EAC_R11_UNORM_BLOCK))); + nir_push_if(&b, nir_ilt_imm(&b, format, VK_FORMAT_EAC_R11_UNORM_BLOCK)); { nir_ssa_def *alpha_bits_8 = - nir_ige(&b, format, nir_imm_int(&b, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK)); + nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK); nir_ssa_def *alpha_bits_1 = - nir_iand(&b, nir_ige(&b, format, nir_imm_int(&b, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK)), - nir_ilt(&b, format, nir_imm_int(&b, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK))); + nir_iand(&b, nir_ige_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK), + nir_ilt_imm(&b, format, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK)); nir_ssa_def *color_payload = nir_bcsel(&b, alpha_bits_8, nir_channels(&b, payload, 0xC), nir_channels(&b, payload, 3)); diff --git a/src/amd/vulkan/nir/radv_nir_lower_abi.c b/src/amd/vulkan/nir/radv_nir_lower_abi.c index d669ae29ed6..bdf20620390 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_abi.c +++ b/src/amd/vulkan/nir/radv_nir_lower_abi.c @@ -459,7 +459,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state) case nir_intrinsic_load_barycentric_optimize_amd: { nir_ssa_def *prim_mask = ac_nir_load_arg(b, &s->args->ac, s->args->ac.prim_mask); /* enabled when bit 31 is set */ - replacement = nir_ilt(b, prim_mask, nir_imm_int(b, 0)); + replacement = nir_ilt_imm(b, prim_mask, 0); break; } case nir_intrinsic_load_poly_line_smooth_enabled: diff --git a/src/amd/vulkan/radv_device_generated_commands.c b/src/amd/vulkan/radv_device_generated_commands.c index 1e20c125280..937437dbfc5 100644 --- a/src/amd/vulkan/radv_device_generated_commands.c +++ b/src/amd/vulkan/radv_device_generated_commands.c @@ -587,7 +587,7 @@ build_dgc_prepare_shader(struct radv_device *dev) nir_ssa_def *update = nir_iand(&b, push_const_mask, nir_ishl(&b, nir_imm_int64(&b, 1), cur_idx)); update = nir_bcsel( - &b, nir_ult(&b, cur_idx, nir_imm_int(&b, 64 /* bits in push_const_mask */)), update, + &b, nir_ult_imm(&b, cur_idx, 64 /* bits in push_const_mask */), update, nir_imm_int64(&b, 0)); nir_push_if(&b, nir_ine_imm(&b, update, 0)); @@ -664,7 +664,7 @@ build_dgc_prepare_shader(struct radv_device *dev) { nir_ssa_def *cur_idx = nir_load_var(&b, idx); nir_push_if(&b, - nir_uge(&b, cur_idx, nir_imm_int(&b, 64 /* bits in inline_mask */))); + nir_uge_imm(&b, cur_idx, 64 /* bits in inline_mask */)); { nir_jump(&b, nir_jump_break); } @@ -682,7 +682,7 @@ build_dgc_prepare_shader(struct radv_device *dev) nir_ssa_def *update = nir_iand(&b, push_const_mask, nir_ishl(&b, nir_imm_int64(&b, 1), cur_idx)); update = nir_bcsel( - &b, nir_ult(&b, cur_idx, nir_imm_int(&b, 64 /* bits in push_const_mask */)), + &b, nir_ult_imm(&b, cur_idx, 64 /* bits in push_const_mask */), update, nir_imm_int64(&b, 0)); nir_push_if(&b, nir_ine_imm(&b, update, 0)); diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index 5f82b4a02ea..a0c8dff05b6 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -154,7 +154,7 @@ build_occlusion_query_shader(struct radv_device *device) nir_ssa_def *load = nir_load_ssbo(&b, 1, 32, src_buf, load_offset, .align_mul = 4, .access = ACCESS_COHERENT); - nir_push_if(&b, nir_ige(&b, load, nir_imm_int(&b, 0x80000000))); + nir_push_if(&b, nir_ige_imm(&b, load, 0x80000000)); { nir_jump(&b, nir_jump_break); } @@ -183,8 +183,8 @@ build_occlusion_query_shader(struct radv_device *device) nir_store_var(&b, start, nir_channel(&b, load, 0), 0x1); nir_store_var(&b, end, nir_channel(&b, load, 1), 0x1); - nir_ssa_def *start_done = nir_ilt(&b, nir_load_var(&b, start), nir_imm_int64(&b, 0)); - nir_ssa_def *end_done = nir_ilt(&b, nir_load_var(&b, end), nir_imm_int64(&b, 0)); + nir_ssa_def *start_done = nir_ilt_imm(&b, nir_load_var(&b, start), 0); + nir_ssa_def *end_done = nir_ilt_imm(&b, nir_load_var(&b, end), 0); nir_push_if(&b, nir_iand(&b, start_done, end_done)); diff --git a/src/amd/vulkan/radv_rt_common.c b/src/amd/vulkan/radv_rt_common.c index e4b1c020e04..478a0773627 100644 --- a/src/amd/vulkan/radv_rt_common.c +++ b/src/amd/vulkan/radv_rt_common.c @@ -240,7 +240,7 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s /* Swap kx and ky dimensions to preserve winding order */ unsigned swap_xy_swizzle[4] = {1, 0, 2, 3}; - k = nir_bcsel(b, nir_flt(b, nir_vector_extract(b, dir, kz), nir_imm_float(b, 0.0f)), + k = nir_bcsel(b, nir_flt_imm(b, nir_vector_extract(b, dir, kz), 0.0f), nir_swizzle(b, k, swap_xy_swizzle, 3), k); kx = nir_channel(b, k, 0); @@ -291,8 +291,8 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s * but we fail dEQP-VK.ray_tracing_pipeline.watertightness.closedFan2.1024 with * failures = 1 without doing this. :( */ nir_ssa_def *cond_retest = nir_ior( - b, nir_ior(b, nir_feq(b, u, nir_imm_float(b, 0.0f)), nir_feq(b, v, nir_imm_float(b, 0.0f))), - nir_feq(b, w, nir_imm_float(b, 0.0f))); + b, nir_ior(b, nir_feq_imm(b, u, 0.0f), nir_feq_imm(b, v, 0.0f)), + nir_feq_imm(b, w, 0.0f)); nir_push_if(b, cond_retest); { @@ -318,8 +318,8 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s /* Perform edge tests. */ nir_ssa_def *cond_back = nir_ior( - b, nir_ior(b, nir_flt(b, u, nir_imm_float(b, 0.0f)), nir_flt(b, v, nir_imm_float(b, 0.0f))), - nir_flt(b, w, nir_imm_float(b, 0.0f))); + b, nir_ior(b, nir_flt_imm(b, u, 0.0f), nir_flt_imm(b, v, 0.0f)), + nir_flt_imm(b, w, 0.0f)); nir_ssa_def *cond_front = nir_ior( b, nir_ior(b, nir_flt(b, nir_imm_float(b, 0.0f), u), nir_flt(b, nir_imm_float(b, 0.0f), v)), @@ -340,7 +340,7 @@ intersect_ray_amd_software_tri(struct radv_device *device, nir_builder *b, nir_s nir_ssa_def *t_signed = nir_fmul(b, nir_fsign(b, det), t); - nir_ssa_def *det_cond_front = nir_inot(b, nir_flt(b, t_signed, nir_imm_float(b, 0.0f))); + nir_ssa_def *det_cond_front = nir_inot(b, nir_flt_imm(b, t_signed, 0.0f)); nir_push_if(b, det_cond_front); { @@ -410,8 +410,8 @@ hit_is_opaque(nir_builder *b, nir_ssa_def *sbt_offset_and_flags, const struct radv_ray_flags *ray_flags, nir_ssa_def *geometry_id_and_flags) { nir_ssa_def *opaque = - nir_uge(b, nir_ior(b, geometry_id_and_flags, sbt_offset_and_flags), - nir_imm_int(b, RADV_INSTANCE_FORCE_OPAQUE | RADV_INSTANCE_NO_FORCE_NOT_OPAQUE)); + nir_uge_imm(b, nir_ior(b, geometry_id_and_flags, sbt_offset_and_flags), + RADV_INSTANCE_FORCE_OPAQUE | RADV_INSTANCE_NO_FORCE_NOT_OPAQUE); opaque = nir_bcsel(b, ray_flags->force_opaque, nir_imm_bool(b, true), opaque); opaque = nir_bcsel(b, ray_flags->force_not_opaque, nir_imm_bool(b, false), opaque); return opaque; @@ -562,8 +562,8 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, { /* Early exit if we never overflowed the stack, to avoid having to backtrack to * the root for no reason. */ - nir_push_if(b, nir_ilt(b, nir_load_deref(b, args->vars.stack), - nir_imm_int(b, args->stack_base + args->stack_stride))); + nir_push_if(b, nir_ilt_imm(b, nir_load_deref(b, args->vars.stack), + args->stack_base + args->stack_stride)); { nir_store_var(b, incomplete, nir_imm_bool(b, false), 0x1); nir_jump(b, nir_jump_break); @@ -648,9 +648,9 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, } nir_ssa_def *node_type = nir_iand_imm(b, bvh_node, 7); - nir_push_if(b, nir_uge(b, node_type, nir_imm_int(b, radv_bvh_node_box16))); + nir_push_if(b, nir_uge_imm(b, node_type, radv_bvh_node_box16)); { - nir_push_if(b, nir_uge(b, node_type, nir_imm_int(b, radv_bvh_node_instance))); + nir_push_if(b, nir_uge_imm(b, node_type, radv_bvh_node_instance)); { nir_push_if(b, nir_ieq_imm(b, node_type, radv_bvh_node_aabb)); { diff --git a/src/broadcom/vulkan/v3dv_meta_copy.c b/src/broadcom/vulkan/v3dv_meta_copy.c index 247ae94a1fc..fc4a438c404 100644 --- a/src/broadcom/vulkan/v3dv_meta_copy.c +++ b/src/broadcom/vulkan/v3dv_meta_copy.c @@ -3330,7 +3330,7 @@ gen_tex_coords(nir_builder *b) */ nir_ssa_def *one = nir_imm_int(b, 1); - nir_ssa_def *c0cmp = nir_ilt(b, vertex_id, nir_imm_int(b, 2)); + nir_ssa_def *c0cmp = nir_ilt_imm(b, vertex_id, 2); nir_ssa_def *c1cmp = nir_ieq(b, nir_iand(b, vertex_id, one), one); nir_ssa_def *comp[4]; diff --git a/src/compiler/nir/nir_builder.c b/src/compiler/nir/nir_builder.c index f2dd1ee84a5..055b7c6e19f 100644 --- a/src/compiler/nir/nir_builder.c +++ b/src/compiler/nir/nir_builder.c @@ -633,7 +633,7 @@ nir_gen_rect_vertices(nir_builder *b, nir_ssa_def *z, nir_ssa_def *w) * channel 1 is vertex_id & 1 ? 1.0 : -1.0 */ - nir_ssa_def *c0cmp = nir_ilt(b, vertex_id, nir_imm_int(b, 2)); + nir_ssa_def *c0cmp = nir_ilt_imm(b, vertex_id, 2); nir_ssa_def *c1cmp = nir_test_mask(b, vertex_id, 1); nir_ssa_def *comp[4]; diff --git a/src/compiler/nir/nir_builder.h b/src/compiler/nir/nir_builder.h index b3ef11c2186..036eed4f2d8 100644 --- a/src/compiler/nir/nir_builder.h +++ b/src/compiler/nir/nir_builder.h @@ -419,7 +419,7 @@ nir_f2fN(nir_builder *b, nir_ssa_def *src, unsigned bit_size) static inline nir_ssa_def * nir_i2b(nir_builder *b, nir_ssa_def *src) { - return nir_ine(b, src, nir_imm_intN_t(b, 0, src->bit_size)); + return nir_ine_imm(b, src, 0); } static inline nir_ssa_def * @@ -621,7 +621,7 @@ _nir_select_from_array_helper(nir_builder *b, nir_ssa_def **arr, return arr[start]; } else { unsigned mid = start + (end - start) / 2; - return nir_bcsel(b, nir_ilt(b, idx, nir_imm_intN_t(b, mid, idx->bit_size)), + return nir_bcsel(b, nir_ilt_imm(b, idx, mid), _nir_select_from_array_helper(b, arr, idx, start, mid), _nir_select_from_array_helper(b, arr, idx, mid, end)); } diff --git a/src/compiler/nir/nir_builtin_builder.c b/src/compiler/nir/nir_builtin_builder.c index d3ad4de027a..3c488965875 100644 --- a/src/compiler/nir/nir_builtin_builder.c +++ b/src/compiler/nir/nir_builtin_builder.c @@ -285,8 +285,7 @@ nir_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x) * 24-bit representation. */ const double huge_val = bit_size >= 32 ? 1e18 : 16384; - nir_ssa_def *huge = nir_imm_floatN_t(b, huge_val, bit_size); - nir_ssa_def *scale = nir_bcsel(b, nir_fge(b, nir_fabs(b, t), huge), + nir_ssa_def *scale = nir_bcsel(b, nir_fge_imm(b, nir_fabs(b, t), huge_val), nir_imm_floatN_t(b, 0.25, bit_size), one); nir_ssa_def *rcp_scaled_t = nir_frcp(b, nir_fmul(b, t, scale)); nir_ssa_def *s_over_t = nir_fmul(b, nir_fmul(b, s, scale), rcp_scaled_t); diff --git a/src/compiler/nir/nir_conversion_builder.h b/src/compiler/nir/nir_conversion_builder.h index bb623f1db4c..e926e8e1048 100644 --- a/src/compiler/nir/nir_conversion_builder.h +++ b/src/compiler/nir/nir_conversion_builder.h @@ -93,7 +93,7 @@ nir_round_float_to_float(nir_builder *b, nir_ssa_def *src, return nir_bcsel(b, cmp, nir_nextafter(b, lower_prec, neg_inf), lower_prec); } case nir_rounding_mode_rtz: - return nir_bcsel(b, nir_flt(b, src, nir_imm_zero(b, 1, src->bit_size)), + return nir_bcsel(b, nir_flt_imm(b, src, 1), nir_round_float_to_float(b, src, dest_bit_size, nir_rounding_mode_ru), nir_round_float_to_float(b, src, dest_bit_size, diff --git a/src/compiler/nir/nir_format_convert.h b/src/compiler/nir/nir_format_convert.h index 72a86c5d9ef..a825fc77ac6 100644 --- a/src/compiler/nir/nir_format_convert.h +++ b/src/compiler/nir/nir_format_convert.h @@ -40,7 +40,7 @@ static inline nir_ssa_def * nir_shift(nir_builder *b, nir_ssa_def *value, nir_ssa_def *left_shift) { return nir_bcsel(b, - nir_ige(b, left_shift, nir_imm_int(b, 0)), + nir_ige_imm(b, left_shift, 0), nir_ishl(b, value, left_shift), nir_ushr(b, value, nir_ineg(b, left_shift))); } @@ -307,7 +307,7 @@ nir_format_linear_to_srgb(nir_builder *b, nir_ssa_def *c) 1.055f), -0.055f); - return nir_fsat(b, nir_bcsel(b, nir_flt(b, c, nir_imm_float(b, 0.0031308f)), + return nir_fsat(b, nir_bcsel(b, nir_flt_imm(b, c, 0.0031308f), linear, curved)); } diff --git a/src/compiler/nir/nir_lower_array_deref_of_vec.c b/src/compiler/nir/nir_lower_array_deref_of_vec.c index daec785232f..62c4d2bc40f 100644 --- a/src/compiler/nir/nir_lower_array_deref_of_vec.c +++ b/src/compiler/nir/nir_lower_array_deref_of_vec.c @@ -50,7 +50,7 @@ build_write_masked_stores(nir_builder *b, nir_deref_instr *vec_deref, build_write_masked_store(b, vec_deref, value, start); } else { unsigned mid = start + (end - start) / 2; - nir_push_if(b, nir_ilt(b, index, nir_imm_int(b, mid))); + nir_push_if(b, nir_ilt_imm(b, index, mid)); build_write_masked_stores(b, vec_deref, value, index, start, mid); nir_push_else(b, NULL); build_write_masked_stores(b, vec_deref, value, index, mid, end); diff --git a/src/compiler/nir/nir_lower_bitmap.c b/src/compiler/nir/nir_lower_bitmap.c index c93d2f43a44..436b4c17525 100644 --- a/src/compiler/nir/nir_lower_bitmap.c +++ b/src/compiler/nir/nir_lower_bitmap.c @@ -92,9 +92,9 @@ lower_bitmap(nir_shader *shader, nir_builder *b, nir_builder_instr_insert(b, &tex->instr); /* kill if tex != 0.0.. take .x or .w channel according to format: */ - cond = nir_fneu(b, nir_channel(b, &tex->dest.ssa, - options->swizzle_xxxx ? 0 : 3), - nir_imm_floatN_t(b, 0.0, tex->dest.ssa.bit_size)); + cond = nir_fneu_imm(b, nir_channel(b, &tex->dest.ssa, + options->swizzle_xxxx ? 0 : 3), + 0.0); nir_discard_if(b, cond); diff --git a/src/compiler/nir/nir_lower_clip.c b/src/compiler/nir/nir_lower_clip.c index 755647bd410..ce50e9721a8 100644 --- a/src/compiler/nir/nir_lower_clip.c +++ b/src/compiler/nir/nir_lower_clip.c @@ -452,7 +452,7 @@ lower_clip_fs(nir_function_impl *impl, unsigned ucp_enables, for (int plane = 0; plane < MAX_CLIP_PLANES; plane++) { if (ucp_enables & (1 << plane)) { nir_ssa_def *this_cond = - nir_flt(&b, clipdist[plane], nir_imm_float(&b, 0.0)); + nir_flt_imm(&b, clipdist[plane], 0.0); cond = cond ? nir_ior(&b, cond, this_cond) : this_cond; } diff --git a/src/compiler/nir/nir_lower_clip_disable.c b/src/compiler/nir/nir_lower_clip_disable.c index e646f2b93e0..2be9c44516d 100644 --- a/src/compiler/nir/nir_lower_clip_disable.c +++ b/src/compiler/nir/nir_lower_clip_disable.c @@ -50,7 +50,7 @@ recursive_if_chain(nir_builder *b, nir_deref_instr *deref, nir_ssa_def *value, u } unsigned mid = start + (end - start) / 2; - nir_push_if(b, nir_ilt(b, index, nir_imm_int(b, mid))); + nir_push_if(b, nir_ilt_imm(b, index, mid)); recursive_if_chain(b, deref, value, clip_plane_enable, index, start, mid); nir_push_else(b, NULL); recursive_if_chain(b, deref, value, clip_plane_enable, index, mid, end); diff --git a/src/compiler/nir/nir_lower_double_ops.c b/src/compiler/nir/nir_lower_double_ops.c index e80900b71fc..cd4dcfa7522 100644 --- a/src/compiler/nir/nir_lower_double_ops.c +++ b/src/compiler/nir/nir_lower_double_ops.c @@ -99,12 +99,11 @@ fix_inv_result(nir_builder *b, nir_ssa_def *res, nir_ssa_def *src, * zeros, but GLSL doesn't require it. */ res = nir_bcsel(b, nir_ior(b, nir_ige(b, nir_imm_int(b, 0), exp), - nir_feq(b, nir_fabs(b, src), - nir_imm_double(b, INFINITY))), + nir_feq_imm(b, nir_fabs(b, src), INFINITY)), nir_imm_double(b, 0.0f), res); /* If the original input was 0, generate the correctly-signed infinity */ - res = nir_bcsel(b, nir_fneu(b, src, nir_imm_double(b, 0.0f)), + res = nir_bcsel(b, nir_fneu_imm(b, src, 0.0f), res, get_signed_inf(b, src)); return res; @@ -299,13 +298,12 @@ lower_sqrt_rsq(nir_builder *b, nir_ssa_def *src, bool sqrt) nir_ssa_def *src_flushed = src; if (!preserve_denorms) { src_flushed = nir_bcsel(b, - nir_flt(b, nir_fabs(b, src), - nir_imm_double(b, DBL_MIN)), + nir_flt_imm(b, nir_fabs(b, src), DBL_MIN), nir_imm_double(b, 0.0), src); } - res = nir_bcsel(b, nir_ior(b, nir_feq(b, src_flushed, nir_imm_double(b, 0.0)), - nir_feq(b, src, nir_imm_double(b, INFINITY))), + res = nir_bcsel(b, nir_ior(b, nir_feq_imm(b, src_flushed, 0.0), + nir_feq_imm(b, src, INFINITY)), src_flushed, res); } else { res = fix_inv_result(b, res, src, new_exp); @@ -340,13 +338,13 @@ lower_trunc(nir_builder *b, nir_ssa_def *src) /* Compute "~0 << frac_bits" in terms of hi/lo 32-bit integer math */ nir_ssa_def *mask_lo = nir_bcsel(b, - nir_ige(b, frac_bits, nir_imm_int(b, 32)), + nir_ige_imm(b, frac_bits, 32), nir_imm_int(b, 0), nir_ishl(b, nir_imm_int(b, ~0), frac_bits)); nir_ssa_def *mask_hi = nir_bcsel(b, - nir_ilt(b, frac_bits, nir_imm_int(b, 33)), + nir_ilt_imm(b, frac_bits, 33), nir_imm_int(b, ~0), nir_ishl(b, nir_imm_int(b, ~0), @@ -357,9 +355,9 @@ lower_trunc(nir_builder *b, nir_ssa_def *src) return nir_bcsel(b, - nir_ilt(b, unbiased_exp, nir_imm_int(b, 0)), + nir_ilt_imm(b, unbiased_exp, 0), nir_imm_double(b, 0.0), - nir_bcsel(b, nir_ige(b, unbiased_exp, nir_imm_int(b, 53)), + nir_bcsel(b, nir_ige_imm(b, unbiased_exp, 53), src, nir_pack_64_2x32_split(b, nir_iand(b, mask_lo, src_lo), @@ -376,7 +374,7 @@ lower_floor(nir_builder *b, nir_ssa_def *src) * - otherwise, floor(x) = trunc(x) - 1 */ nir_ssa_def *tr = nir_ftrunc(b, src); - nir_ssa_def *positive = nir_fge(b, src, nir_imm_double(b, 0.0)); + nir_ssa_def *positive = nir_fge_imm(b, src, 0.0); return nir_bcsel(b, nir_ior(b, positive, nir_feq(b, src, tr)), tr, @@ -391,7 +389,7 @@ lower_ceil(nir_builder *b, nir_ssa_def *src) * else, ceil(x) = trunc(x) + 1 */ nir_ssa_def *tr = nir_ftrunc(b, src); - nir_ssa_def *negative = nir_flt(b, src, nir_imm_double(b, 0.0)); + nir_ssa_def *negative = nir_flt_imm(b, src, 0.0); return nir_bcsel(b, nir_ior(b, negative, nir_feq(b, src, tr)), tr, diff --git a/src/compiler/nir/nir_lower_fp16_conv.c b/src/compiler/nir/nir_lower_fp16_conv.c index 951a36d6f21..539264d3a24 100644 --- a/src/compiler/nir/nir_lower_fp16_conv.c +++ b/src/compiler/nir/nir_lower_fp16_conv.c @@ -108,7 +108,7 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode) nir_ssa_def *zero = nir_imm_int(b, 0); - nir_push_if(b, nir_ige(b, abs, nir_imm_int(b, 113 << 23))); + nir_push_if(b, nir_ige_imm(b, abs, 113 << 23)); /* FP16 will be normal */ nir_ssa_def *value = nir_ior(b, @@ -123,7 +123,7 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode) nir_ssa_def *normal_fp16 = half_rounded(b, value, guard, sticky, sign, mode); nir_push_else(b, NULL); - nir_push_if(b, nir_ige(b, abs, nir_imm_int(b, 102 << 23))); + nir_push_if(b, nir_ige_imm(b, abs, 102 << 23)); /* FP16 will be denormal */ nir_ssa_def *i = nir_isub(b, nir_imm_int(b, 125), nir_ushr(b, abs, nir_imm_int(b, 23))); diff --git a/src/compiler/nir/nir_lower_gs_intrinsics.c b/src/compiler/nir/nir_lower_gs_intrinsics.c index 1308447e89b..4a2ab0e965e 100644 --- a/src/compiler/nir/nir_lower_gs_intrinsics.c +++ b/src/compiler/nir/nir_lower_gs_intrinsics.c @@ -95,15 +95,12 @@ rewrite_emit_vertex(nir_intrinsic_instr *intrin, struct state *state) else count_per_primitive = nir_ssa_undef(b, 1, 32); - nir_ssa_def *max_vertices = - nir_imm_int(b, b->shader->info.gs.vertices_out); - /* Create: if (vertex_count < max_vertices) and insert it. * * The new if statement needs to be hooked up to the control flow graph * before we start inserting instructions into it. */ - nir_push_if(b, nir_ilt(b, count, max_vertices)); + nir_push_if(b, nir_ilt_imm(b, count, b->shader->info.gs.vertices_out)); nir_emit_vertex_with_counter(b, count, count_per_primitive, stream); @@ -172,7 +169,7 @@ overwrite_incomplete_primitives(struct state *state, unsigned stream) /* See if the current primitive is a incomplete */ nir_ssa_def *is_inc_prim = - nir_ilt(b, vtxcnt_per_primitive, nir_imm_int(b, outprim_min_vertices)); + nir_ilt_imm(b, vtxcnt_per_primitive, outprim_min_vertices); /* Number of vertices in the incomplete primitive */ nir_ssa_def *num_inc_vtx = diff --git a/src/compiler/nir/nir_lower_idiv.c b/src/compiler/nir/nir_lower_idiv.c index 92a24e85908..ccda7d48933 100644 --- a/src/compiler/nir/nir_lower_idiv.c +++ b/src/compiler/nir/nir_lower_idiv.c @@ -68,8 +68,8 @@ emit_udiv(nir_builder *bld, nir_ssa_def *numer, nir_ssa_def *denom, bool modulo) static nir_ssa_def * emit_idiv(nir_builder *bld, nir_ssa_def *numer, nir_ssa_def *denom, nir_op op) { - nir_ssa_def *lh_sign = nir_ilt(bld, numer, nir_imm_int(bld, 0)); - nir_ssa_def *rh_sign = nir_ilt(bld, denom, nir_imm_int(bld, 0)); + nir_ssa_def *lh_sign = nir_ilt_imm(bld, numer, 0); + nir_ssa_def *rh_sign = nir_ilt_imm(bld, denom, 0); nir_ssa_def *lhs = nir_iabs(bld, numer); nir_ssa_def *rhs = nir_iabs(bld, denom); diff --git a/src/compiler/nir/nir_lower_indirect_derefs.c b/src/compiler/nir/nir_lower_indirect_derefs.c index b6d4106d511..a0b496514f0 100644 --- a/src/compiler/nir/nir_lower_indirect_derefs.c +++ b/src/compiler/nir/nir_lower_indirect_derefs.c @@ -51,7 +51,7 @@ emit_indirect_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr, nir_deref_instr *deref = *deref_arr; assert(deref->deref_type == nir_deref_type_array); - nir_push_if(b, nir_ilt(b, deref->arr.index.ssa, nir_imm_intN_t(b, mid, parent->dest.ssa.bit_size))); + nir_push_if(b, nir_ilt_imm(b, deref->arr.index.ssa, mid)); emit_indirect_load_store_deref(b, orig_instr, parent, deref_arr, start, mid, &then_dest, src); nir_push_else(b, NULL); diff --git a/src/compiler/nir/nir_lower_int64.c b/src/compiler/nir/nir_lower_int64.c index 311adcfe4d8..4e4a64b7d74 100644 --- a/src/compiler/nir/nir_lower_int64.c +++ b/src/compiler/nir/nir_lower_int64.c @@ -196,7 +196,7 @@ lower_ishl64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) nir_ishl(b, x_lo, reverse_count)); return nir_bcsel(b, nir_ieq_imm(b, y, 0), x, - nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)), + nir_bcsel(b, nir_uge_imm(b, y, 32), res_if_ge_32, res_if_lt_32)); } @@ -243,7 +243,7 @@ lower_ishr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) nir_ishr(b, x_hi, nir_imm_int(b, 31))); return nir_bcsel(b, nir_ieq_imm(b, y, 0), x, - nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)), + nir_bcsel(b, nir_uge_imm(b, y, 32), res_if_ge_32, res_if_lt_32)); } @@ -289,7 +289,7 @@ lower_ushr64(nir_builder *b, nir_ssa_def *x, nir_ssa_def *y) nir_imm_int(b, 0)); return nir_bcsel(b, nir_ieq_imm(b, y, 0), x, - nir_bcsel(b, nir_uge(b, y, nir_imm_int(b, 32)), + nir_bcsel(b, nir_uge_imm(b, y, 32), res_if_ge_32, res_if_lt_32)); } @@ -337,7 +337,7 @@ static nir_ssa_def * lower_iabs64(nir_builder *b, nir_ssa_def *x) { nir_ssa_def *x_hi = nir_unpack_64_2x32_split_y(b, x); - nir_ssa_def *x_is_neg = nir_ilt(b, x_hi, nir_imm_int(b, 0)); + nir_ssa_def *x_is_neg = nir_ilt_imm(b, x_hi, 0); return nir_bcsel(b, x_is_neg, nir_ineg(b, x), x); } @@ -600,8 +600,8 @@ lower_idiv64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d) nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n); nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d); - nir_ssa_def *negate = nir_ine(b, nir_ilt(b, n_hi, nir_imm_int(b, 0)), - nir_ilt(b, d_hi, nir_imm_int(b, 0))); + nir_ssa_def *negate = nir_ine(b, nir_ilt_imm(b, n_hi, 0), + nir_ilt_imm(b, d_hi, 0)); nir_ssa_def *q, *r; lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r); return nir_bcsel(b, negate, nir_ineg(b, q), q); @@ -620,8 +620,8 @@ lower_imod64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d) { nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n); nir_ssa_def *d_hi = nir_unpack_64_2x32_split_y(b, d); - nir_ssa_def *n_is_neg = nir_ilt(b, n_hi, nir_imm_int(b, 0)); - nir_ssa_def *d_is_neg = nir_ilt(b, d_hi, nir_imm_int(b, 0)); + nir_ssa_def *n_is_neg = nir_ilt_imm(b, n_hi, 0); + nir_ssa_def *d_is_neg = nir_ilt_imm(b, d_hi, 0); nir_ssa_def *q, *r; lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r); @@ -637,7 +637,7 @@ static nir_ssa_def * lower_irem64(nir_builder *b, nir_ssa_def *n, nir_ssa_def *d) { nir_ssa_def *n_hi = nir_unpack_64_2x32_split_y(b, n); - nir_ssa_def *n_is_neg = nir_ilt(b, n_hi, nir_imm_int(b, 0)); + nir_ssa_def *n_is_neg = nir_ilt_imm(b, n_hi, 0); nir_ssa_def *q, *r; lower_udiv64_mod64(b, nir_iabs(b, n), nir_iabs(b, d), &q, &r); @@ -796,15 +796,15 @@ lower_2f(nir_builder *b, nir_ssa_def *x, unsigned dest_bit_size, * overflow. */ nir_ssa_def *carry = nir_b2i32( - b, nir_uge(b, nir_unpack_64_2x32_split_y(b, significand), - nir_imm_int(b, 1 << (significand_bits - 31)))); + b, nir_uge_imm(b, nir_unpack_64_2x32_split_y(b, significand), + (uint64_t)(1 << (significand_bits - 31)))); significand = COND_LOWER_OP(b, ishr, significand, carry); exp = nir_iadd(b, exp, carry); /* Compute the biased exponent, taking care to handle a zero * input correctly, which would have caused exp to be negative. */ - nir_ssa_def *biased_exp = nir_bcsel(b, nir_ilt(b, exp, nir_imm_int(b, 0)), + nir_ssa_def *biased_exp = nir_bcsel(b, nir_ilt_imm(b, exp, 0), nir_imm_int(b, 0), nir_iadd(b, exp, nir_imm_int(b, 1023))); @@ -855,7 +855,7 @@ lower_f2(nir_builder *b, nir_ssa_def *x, bool dst_is_signed) } if (dst_is_signed) - res = nir_bcsel(b, nir_flt(b, x_sign, nir_imm_floatN_t(b, 0, x->bit_size)), + res = nir_bcsel(b, nir_flt_imm(b, x_sign, 0), nir_ineg(b, res), res); return res; diff --git a/src/compiler/nir/nir_lower_multiview.c b/src/compiler/nir/nir_lower_multiview.c index 7a7519cb063..4a6227c38f1 100644 --- a/src/compiler/nir/nir_lower_multiview.c +++ b/src/compiler/nir/nir_lower_multiview.c @@ -251,7 +251,7 @@ nir_lower_multiview(nir_shader *shader, uint32_t view_mask) nir_loop* loop = nir_push_loop(&b); nir_ssa_def *loop_index = nir_load_deref(&b, loop_index_deref); - nir_ssa_def *cmp = nir_ige(&b, loop_index, nir_imm_int(&b, view_count)); + nir_ssa_def *cmp = nir_ige_imm(&b, loop_index, view_count); nir_if *loop_check = nir_push_if(&b, cmp); nir_jump(&b, nir_jump_break); nir_pop_if(&b, loop_check); diff --git a/src/compiler/nir/nir_lower_point_smooth.c b/src/compiler/nir/nir_lower_point_smooth.c index 9a48f082ece..bc7e13a1ec8 100644 --- a/src/compiler/nir/nir_lower_point_smooth.c +++ b/src/compiler/nir/nir_lower_point_smooth.c @@ -84,7 +84,7 @@ lower_point_smooth(nir_builder *b, nir_instr *instr, UNUSED void *_state) nir_ssa_def *coverage = nir_fsat(b, nir_fsub(b, radius, distance)); /* Discard fragments that are not covered by the point */ - nir_discard_if(b, nir_feq(b, nir_imm_float(b, 0.0f), coverage)); + nir_discard_if(b, nir_feq_imm(b, coverage, 0.0f)); /* Write out the fragment color*vec4(1, 1, 1, coverage)*/ nir_ssa_def *one = nir_imm_float(b, 1.0f); diff --git a/src/compiler/nir/nir_lower_printf.c b/src/compiler/nir/nir_lower_printf.c index 915b7e852cd..821da988f06 100644 --- a/src/compiler/nir/nir_lower_printf.c +++ b/src/compiler/nir/nir_lower_printf.c @@ -77,7 +77,7 @@ lower_printf_instr(nir_builder *b, nir_instr *instr, void *_options) options->max_buffer_size : default_buffer_size; int max_valid_offset = buffer_size - args_size - fmt_str_id_size - counter_size; - nir_push_if(b, nir_ilt(b, offset, nir_imm_int(b, max_valid_offset))); + nir_push_if(b, nir_ilt_imm(b, offset, max_valid_offset)); nir_ssa_def *printf_succ_val = nir_imm_int(b, 0); diff --git a/src/compiler/nir/nir_lower_subgroups.c b/src/compiler/nir/nir_lower_subgroups.c index 93bbef4a614..4af7a2dd6bb 100644 --- a/src/compiler/nir/nir_lower_subgroups.c +++ b/src/compiler/nir/nir_lower_subgroups.c @@ -548,7 +548,7 @@ vec_find_lsb(nir_builder *b, nir_ssa_def *value) for (int i = value->num_components - 1; i >= 0; i--) { nir_ssa_def *channel = nir_channel(b, vec_result, i); /* result = channel >= 0 ? (i * bitsize + channel) : result */ - result = nir_bcsel(b, nir_ige(b, channel, nir_imm_int(b, 0)), + result = nir_bcsel(b, nir_ige_imm(b, channel, 0), nir_iadd_imm(b, channel, i * value->bit_size), result); } @@ -563,7 +563,7 @@ vec_find_msb(nir_builder *b, nir_ssa_def *value) for (unsigned i = 0; i < value->num_components; i++) { nir_ssa_def *channel = nir_channel(b, vec_result, i); /* result = channel >= 0 ? (i * bitsize + channel) : result */ - result = nir_bcsel(b, nir_ige(b, channel, nir_imm_int(b, 0)), + result = nir_bcsel(b, nir_ige_imm(b, channel, 0), nir_iadd_imm(b, channel, i * value->bit_size), result); } diff --git a/src/compiler/nir/nir_lower_task_shader.c b/src/compiler/nir/nir_lower_task_shader.c index 6c42498cc77..b7d6d597f13 100644 --- a/src/compiler/nir/nir_lower_task_shader.c +++ b/src/compiler/nir/nir_lower_task_shader.c @@ -260,7 +260,7 @@ emit_shared_to_payload_copy(nir_builder *b, if (remaining_vec4_copies > 0) { assert(remaining_vec4_copies < invocations); - nir_ssa_def *cmp = nir_ilt(b, invocation_index, nir_imm_int(b, remaining_vec4_copies)); + nir_ssa_def *cmp = nir_ilt_imm(b, invocation_index, remaining_vec4_copies); nir_if *if_stmt = nir_push_if(b, cmp); { copy_shared_to_payload(b, vec4size / 4, addr, base_shared_addr, off); diff --git a/src/compiler/nir/nir_lower_tex.c b/src/compiler/nir/nir_lower_tex.c index 18cb5bc5fda..402d57f0367 100644 --- a/src/compiler/nir/nir_lower_tex.c +++ b/src/compiler/nir/nir_lower_tex.c @@ -1337,7 +1337,7 @@ nir_lower_lod_zero_width(nir_builder *b, nir_tex_instr *tex) nir_ssa_def *fwidth = nir_fadd(b, nir_fabs(b, dfdx), nir_fabs(b, dfdy)); /* Check if the sum is 0. */ - is_zero = nir_iand(b, is_zero, nir_feq(b, fwidth, nir_imm_float(b, 0.0))); + is_zero = nir_iand(b, is_zero, nir_feq_imm(b, fwidth, 0.0)); } /* Replace the raw LOD by -FLT_MAX if the sum is 0 for all coordinates. */ diff --git a/src/compiler/nir/nir_lower_variable_initializers.c b/src/compiler/nir/nir_lower_variable_initializers.c index 384bdef505e..fd212538c29 100644 --- a/src/compiler/nir/nir_lower_variable_initializers.c +++ b/src/compiler/nir/nir_lower_variable_initializers.c @@ -176,7 +176,7 @@ nir_zero_initialize_shared_memory(nir_shader *shader, { nir_ssa_def *offset = nir_load_var(&b, it); - nir_push_if(&b, nir_uge(&b, offset, nir_imm_int(&b, shared_size))); + nir_push_if(&b, nir_uge_imm(&b, offset, shared_size)); { nir_jump(&b, nir_jump_break); } diff --git a/src/compiler/nir/nir_lower_wpos_ytransform.c b/src/compiler/nir/nir_lower_wpos_ytransform.c index 27a7fbb1eb4..c56d6af5063 100644 --- a/src/compiler/nir/nir_lower_wpos_ytransform.c +++ b/src/compiler/nir/nir_lower_wpos_ytransform.c @@ -63,7 +63,7 @@ get_transform(lower_wpos_ytransform_state *state) static nir_ssa_def * nir_cmp(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1, nir_ssa_def *src2) { - return nir_bcsel(b, nir_flt(b, src0, nir_imm_float(b, 0.0)), src1, src2); + return nir_bcsel(b, nir_flt_imm(b, src0, 0.0), src1, src2); } /* see emit_wpos_adjustment() in st_mesa_to_tgsi.c */ diff --git a/src/compiler/nir/nir_opt_idiv_const.c b/src/compiler/nir/nir_opt_idiv_const.c index 629c71e645e..bd676820b25 100644 --- a/src/compiler/nir/nir_opt_idiv_const.c +++ b/src/compiler/nir/nir_opt_idiv_const.c @@ -79,7 +79,7 @@ build_idiv(nir_builder *b, nir_ssa_def *n, int64_t d) return nir_ineg(b, n); } else if (util_is_power_of_two_or_zero64(abs_d)) { nir_ssa_def *uq = nir_ushr_imm(b, nir_iabs(b, n), util_logbase2_64(abs_d)); - nir_ssa_def *n_neg = nir_ilt(b, n, nir_imm_intN_t(b, 0, n->bit_size)); + nir_ssa_def *n_neg = nir_ilt_imm(b, n, 0); nir_ssa_def *neg = d < 0 ? nir_inot(b, n_neg) : n_neg; return nir_bcsel(b, neg, nir_ineg(b, uq), uq); } else { @@ -111,7 +111,7 @@ build_irem(nir_builder *b, nir_ssa_def *n, int64_t d) } else { d = d < 0 ? -d : d; if (util_is_power_of_two_or_zero64(d)) { - nir_ssa_def *tmp = nir_bcsel(b, nir_ilt(b, n, nir_imm_intN_t(b, 0, n->bit_size)), + nir_ssa_def *tmp = nir_bcsel(b, nir_ilt_imm(b, n, 0), nir_iadd_imm(b, n, d - 1), n); return nir_isub(b, n, nir_iand_imm(b, tmp, -d)); } else { diff --git a/src/compiler/nir/nir_passthrough_gs.c b/src/compiler/nir/nir_passthrough_gs.c index 06a14f0f2cd..4cb93e5c95f 100644 --- a/src/compiler/nir/nir_passthrough_gs.c +++ b/src/compiler/nir/nir_passthrough_gs.c @@ -274,7 +274,7 @@ nir_create_passthrough_gs(const nir_shader_compiler_options *options, nir_emit_vertex(&b, 0); if (emulate_edgeflags) { nir_ssa_def *edge_value = nir_channel(&b, nir_load_array_var_imm(&b, edge_var, idx), 0); - nir_if *edge_if = nir_push_if(&b, nir_fneu(&b, edge_value, nir_imm_float(&b, 1.0))); + nir_if *edge_if = nir_push_if(&b, nir_fneu_imm(&b, edge_value, 1.0)); nir_end_primitive(&b, 0); nir_pop_if(&b, edge_if); } diff --git a/src/compiler/spirv/vtn_opencl.c b/src/compiler/spirv/vtn_opencl.c index af76bef9a6e..4dda8c93629 100644 --- a/src/compiler/spirv/vtn_opencl.c +++ b/src/compiler/spirv/vtn_opencl.c @@ -891,7 +891,7 @@ handle_shuffle2(struct vtn_builder *b, uint32_t opcode, nir_ssa_def *vmask = nir_iand(&b->nb, this_mask, nir_imm_intN_t(&b->nb, half_mask, mask->bit_size)); nir_ssa_def *val0 = nir_vector_extract(&b->nb, input0, vmask); nir_ssa_def *val1 = nir_vector_extract(&b->nb, input1, vmask); - nir_ssa_def *sel = nir_ilt(&b->nb, this_mask, nir_imm_intN_t(&b->nb, in_elems, mask->bit_size)); + nir_ssa_def *sel = nir_ilt_imm(&b->nb, this_mask, in_elems); outres[i] = nir_bcsel(&b->nb, sel, val0, val1); } return nir_vec(&b->nb, outres, out_elems); diff --git a/src/freedreno/ir3/ir3_nir_lower_tess.c b/src/freedreno/ir3/ir3_nir_lower_tess.c index 5bf4bfa9548..94730d8e2ca 100644 --- a/src/freedreno/ir3/ir3_nir_lower_tess.c +++ b/src/freedreno/ir3/ir3_nir_lower_tess.c @@ -621,7 +621,7 @@ lower_tess_ctrl_block(nir_block *block, nir_builder *b, struct state *state) nir_ssa_def *offset = nir_iadd_imm( b, intr->src[1].ssa, nir_intrinsic_component(intr)); - nif = nir_push_if(b, nir_ult(b, offset, nir_imm_int(b, levels))); + nif = nir_push_if(b, nir_ult_imm(b, offset, levels)); } nir_ssa_def *offset = build_tessfactor_base( @@ -723,7 +723,7 @@ ir3_nir_lower_tess_ctrl(nir_shader *shader, struct ir3_shader_variant *v, nir_ssa_def *iid = build_invocation_id(&b, &state); const uint32_t nvertices = shader->info.tess.tcs_vertices_out; - nir_ssa_def *cond = nir_ult(&b, iid, nir_imm_int(&b, nvertices)); + nir_ssa_def *cond = nir_ult_imm(&b, iid, nvertices); nir_if *nif = nir_push_if(&b, cond); diff --git a/src/gallium/auxiliary/nir/nir_draw_helpers.c b/src/gallium/auxiliary/nir/nir_draw_helpers.c index 606aa6a3bef..9390e6065b9 100644 --- a/src/gallium/auxiliary/nir/nir_draw_helpers.c +++ b/src/gallium/auxiliary/nir/nir_draw_helpers.c @@ -87,8 +87,7 @@ nir_lower_pstipple_block(nir_block *block, switch (state->bool_type) { case nir_type_bool1: - condition = nir_fneu(b, nir_channel(b, &tex->dest.ssa, 3), - nir_imm_floatN_t(b, 0.0, tex->dest.ssa.bit_size)); + condition = nir_fneu_imm(b, nir_channel(b, &tex->dest.ssa, 3), 0.0); break; case nir_type_bool32: condition = nir_fneu32(b, nir_channel(b, &tex->dest.ssa, 3), diff --git a/src/gallium/auxiliary/nir/tgsi_to_nir.c b/src/gallium/auxiliary/nir/tgsi_to_nir.c index 2fc65e174a0..807d7ff7725 100644 --- a/src/gallium/auxiliary/nir/tgsi_to_nir.c +++ b/src/gallium/auxiliary/nir/tgsi_to_nir.c @@ -1057,9 +1057,9 @@ ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src) ttn_move_dest_masked(b, dest, nir_bcsel(b, - nir_flt(b, - ttn_channel(b, src[0], X), - nir_imm_float(b, 0.0)), + nir_flt_imm(b, + ttn_channel(b, src[0], X), + 0.0), nir_imm_float(b, 0.0), pow), TGSI_WRITEMASK_Z); @@ -1112,7 +1112,7 @@ static void ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src) { ttn_move_dest(b, dest, nir_bcsel(b, - nir_flt(b, src[0], nir_imm_float(b, 0.0)), + nir_flt_imm(b, src[0], 0.0), src[1], src[2])); } @@ -1142,7 +1142,7 @@ ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src) { /* flt must be exact, because NaN shouldn't discard. (apps rely on this) */ b->exact = true; - nir_ssa_def *cmp = nir_bany(b, nir_flt(b, src[0], nir_imm_float(b, 0.0))); + nir_ssa_def *cmp = nir_bany(b, nir_flt_imm(b, src[0], 0.0)); b->exact = false; nir_discard_if(b, cmp); @@ -2142,11 +2142,11 @@ ttn_emit_instruction(struct ttn_compile *c) break; case TGSI_OPCODE_IF: - nir_push_if(b, nir_fneu(b, nir_channel(b, src[0], 0), nir_imm_float(b, 0.0))); + nir_push_if(b, nir_fneu_imm(b, nir_channel(b, src[0], 0), 0.0)); break; case TGSI_OPCODE_UIF: - nir_push_if(b, nir_ine(b, nir_channel(b, src[0], 0), nir_imm_int(b, 0))); + nir_push_if(b, nir_ine_imm(b, nir_channel(b, src[0], 0), 0)); break; case TGSI_OPCODE_ELSE: diff --git a/src/gallium/drivers/d3d12/d3d12_gs_variant.cpp b/src/gallium/drivers/d3d12/d3d12_gs_variant.cpp index 8a3f164bf45..e2a0e77b54e 100644 --- a/src/gallium/drivers/d3d12/d3d12_gs_variant.cpp +++ b/src/gallium/drivers/d3d12/d3d12_gs_variant.cpp @@ -280,8 +280,7 @@ d3d12_begin_emit_primitives_gs(struct emit_primitives_context *emit_ctx, emit_ctx->loop = nir_push_loop(b); emit_ctx->loop_index = nir_load_deref(b, emit_ctx->loop_index_deref); - nir_ssa_def *cmp = nir_ige(b, emit_ctx->loop_index, - nir_imm_int(b, 3)); + nir_ssa_def *cmp = nir_ige_imm(b, emit_ctx->loop_index, 3); nir_if *loop_check = nir_push_if(b, cmp); nir_jump(b, nir_jump_break); nir_pop_if(b, loop_check); @@ -289,7 +288,7 @@ d3d12_begin_emit_primitives_gs(struct emit_primitives_context *emit_ctx, if (edgeflag_var) { nir_ssa_def *edge_flag = nir_load_deref(b, nir_build_deref_array(b, nir_build_deref_var(b, edgeflag_var), emit_ctx->loop_index)); - nir_ssa_def *is_edge = nir_feq(b, nir_channel(b, edge_flag, 0), nir_imm_float(b, 1.0)); + nir_ssa_def *is_edge = nir_feq_imm(b, nir_channel(b, edge_flag, 0), 1.0); if (emit_ctx->edgeflag_cmp) emit_ctx->edgeflag_cmp = nir_iand(b, emit_ctx->edgeflag_cmp, is_edge); else diff --git a/src/gallium/drivers/d3d12/d3d12_nir_passes.c b/src/gallium/drivers/d3d12/d3d12_nir_passes.c index 3c5458a1848..2f844401d6a 100644 --- a/src/gallium/drivers/d3d12/d3d12_nir_passes.c +++ b/src/gallium/drivers/d3d12/d3d12_nir_passes.c @@ -235,7 +235,7 @@ lower_uint_color_write(nir_builder *b, struct nir_instr *instr, bool is_signed) nir_ssa_def *def = is_signed ? nir_format_float_to_snorm(b, col, bits) : nir_format_float_to_unorm(b, col, bits); if (is_signed) - def = nir_bcsel(b, nir_ilt(b, def, nir_imm_int(b, 0)), + def = nir_bcsel(b, nir_ilt_imm(b, def, 0), nir_iadd(b, def, nir_imm_int(b, 1 << NUM_BITS)), def); nir_instr_rewrite_src(&intr->instr, intr->src + 1, nir_src_for_ssa(def)); diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp b/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp index 1e6f0fe6a52..3a28f25366c 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir_legalize_image_load_store.cpp @@ -49,7 +49,7 @@ r600_legalize_image_load_store_impl(nir_builder *b, nir_imm_zero(b, nir_dest_num_components(ir->dest), nir_dest_bit_size(ir->dest)); auto image_exists = - nir_ult(b, ir->src[0].ssa, nir_imm_int(b, b->shader->info.num_images)); + nir_ult_imm(b, ir->src[0].ssa, b->shader->info.num_images); nir_if *if_exists = nir_push_if(b, image_exists); diff --git a/src/gallium/drivers/radeonsi/si_nir_lower_abi.c b/src/gallium/drivers/radeonsi/si_nir_lower_abi.c index 3c6660cbbdf..fd04faf15c4 100644 --- a/src/gallium/drivers/radeonsi/si_nir_lower_abi.c +++ b/src/gallium/drivers/radeonsi/si_nir_lower_abi.c @@ -508,7 +508,7 @@ static bool lower_intrinsic(nir_builder *b, nir_instr *instr, struct lower_abi_s case nir_intrinsic_load_barycentric_optimize_amd: { nir_ssa_def *prim_mask = ac_nir_load_arg(b, &args->ac, args->ac.prim_mask); /* enabled when bit 31 is set */ - replacement = nir_ilt(b, prim_mask, nir_imm_int(b, 0)); + replacement = nir_ilt_imm(b, prim_mask, 0); break; } case nir_intrinsic_load_color0: diff --git a/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c b/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c index a69f56fe627..1a9db7e4371 100644 --- a/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c +++ b/src/gallium/drivers/radeonsi/si_nir_lower_vs_inputs.c @@ -198,10 +198,10 @@ ufN_to_float(nir_builder *b, nir_ssa_def *src, unsigned exp_bits, unsigned mant_ denormal = nir_iadd(b, denormal, nir_ishl_imm(b, tmp, 23)); /* Select the final result. */ - nir_ssa_def *cond = nir_uge(b, src, nir_imm_int(b, ((1ULL << exp_bits) - 1) << mant_bits)); + nir_ssa_def *cond = nir_uge_imm(b, src, ((1ULL << exp_bits) - 1) << mant_bits); nir_ssa_def *result = nir_bcsel(b, cond, naninf, normal); - cond = nir_uge(b, src, nir_imm_int(b, 1ULL << mant_bits)); + cond = nir_uge_imm(b, src, 1ULL << mant_bits); result = nir_bcsel(b, cond, result, denormal); cond = nir_ine_imm(b, src, 0); diff --git a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c index d0a97567cd4..bd80c3d55c8 100644 --- a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c +++ b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c @@ -91,7 +91,7 @@ static nir_ssa_def * evaluate_face_x(nir_builder *b, coord_t *coord) { nir_ssa_def *sign = nir_fsign(b, coord->rx); - nir_ssa_def *positive = nir_fge(b, coord->rx, nir_imm_float(b, 0.0)); + nir_ssa_def *positive = nir_fge_imm(b, coord->rx, 0.0); nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx); nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5); @@ -108,7 +108,7 @@ static nir_ssa_def * evaluate_face_y(nir_builder *b, coord_t *coord) { nir_ssa_def *sign = nir_fsign(b, coord->ry); - nir_ssa_def *positive = nir_fge(b, coord->ry, nir_imm_float(b, 0.0)); + nir_ssa_def *positive = nir_fge_imm(b, coord->ry, 0.0); nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary); nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, ima, coord->rx), 0.5); @@ -125,7 +125,7 @@ static nir_ssa_def * evaluate_face_z(nir_builder *b, coord_t *coord) { nir_ssa_def *sign = nir_fsign(b, coord->rz); - nir_ssa_def *positive = nir_fge(b, coord->rz, nir_imm_float(b, 0.0)); + nir_ssa_def *positive = nir_fge_imm(b, coord->rz, 0.0); nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz); nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), 0.5); diff --git a/src/intel/blorp/blorp_blit.c b/src/intel/blorp/blorp_blit.c index 33a7ac79d81..70684b94c0f 100644 --- a/src/intel/blorp/blorp_blit.c +++ b/src/intel/blorp/blorp_blit.c @@ -839,7 +839,7 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos, nir_imm_int(b, 2))), nir_imm_int(b, 0xf)); - sample = nir_bcsel(b, nir_ilt(b, sample, nir_imm_int(b, 8)), + sample = nir_bcsel(b, nir_ilt_imm(b, sample, 8), sample_low, sample_high); } nir_ssa_def *pos_ms = nir_vec3(b, nir_channel(b, sample_coords_int, 0), diff --git a/src/intel/compiler/brw_mesh.cpp b/src/intel/compiler/brw_mesh.cpp index 147c1d0a89f..debfee92a83 100644 --- a/src/intel/compiler/brw_mesh.cpp +++ b/src/intel/compiler/brw_mesh.cpp @@ -684,8 +684,7 @@ brw_nir_initialize_mue(nir_shader *nir, /* Zero "remaining" primitive headers starting from the last one covered * by the loop above + workgroup_size. */ - nir_ssa_def *cmp = nir_ilt(&b, local_invocation_index, - nir_imm_int(&b, remaining)); + nir_ssa_def *cmp = nir_ilt_imm(&b, local_invocation_index, remaining); nir_if *if_stmt = nir_push_if(&b, cmp); { nir_ssa_def *prim = nir_iadd_imm(&b, local_invocation_index, diff --git a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c index 8cf76334ba0..40318dfa7d4 100644 --- a/src/intel/vulkan/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan/anv_nir_apply_pipeline_layout.c @@ -1612,7 +1612,7 @@ build_def_array_select(nir_builder *b, nir_ssa_def **srcs, nir_ssa_def *idx, return srcs[start]; } else { unsigned mid = start + (end - start) / 2; - return nir_bcsel(b, nir_ilt(b, idx, nir_imm_int(b, mid)), + return nir_bcsel(b, nir_ilt_imm(b, idx, mid), build_def_array_select(b, srcs, idx, start, mid), build_def_array_select(b, srcs, idx, mid, end)); } diff --git a/src/intel/vulkan/anv_nir_lower_multiview.c b/src/intel/vulkan/anv_nir_lower_multiview.c index 074622b550d..cb5ce761d5d 100644 --- a/src/intel/vulkan/anv_nir_lower_multiview.c +++ b/src/intel/vulkan/anv_nir_lower_multiview.c @@ -123,7 +123,7 @@ build_view_index(struct lower_multiview_state *state) nir_ssa_def *shifted_high = nir_ushr(b, nir_imm_int(b, remap >> 32), nir_isub(b, shift, nir_imm_int(b, 32))); - shifted = nir_bcsel(b, nir_ilt(b, shift, nir_imm_int(b, 32)), + shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32), shifted_low, shifted_high); } state->view_index = nir_iand(b, shifted, nir_imm_int(b, 0xf)); diff --git a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c index 791083b8090..b5fd65cfd9e 100644 --- a/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c +++ b/src/intel/vulkan_hasvk/anv_nir_apply_pipeline_layout.c @@ -1102,7 +1102,7 @@ build_def_array_select(nir_builder *b, nir_ssa_def **srcs, nir_ssa_def *idx, return srcs[start]; } else { unsigned mid = start + (end - start) / 2; - return nir_bcsel(b, nir_ilt(b, idx, nir_imm_int(b, mid)), + return nir_bcsel(b, nir_ilt_imm(b, idx, mid), build_def_array_select(b, srcs, idx, start, mid), build_def_array_select(b, srcs, idx, mid, end)); } diff --git a/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c b/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c index ef408bbe5bb..3375714b31d 100644 --- a/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c +++ b/src/intel/vulkan_hasvk/anv_nir_lower_multiview.c @@ -123,7 +123,7 @@ build_view_index(struct lower_multiview_state *state) nir_ssa_def *shifted_high = nir_ushr(b, nir_imm_int(b, remap >> 32), nir_isub(b, shift, nir_imm_int(b, 32))); - shifted = nir_bcsel(b, nir_ilt(b, shift, nir_imm_int(b, 32)), + shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32), shifted_low, shifted_high); } state->view_index = nir_iand(b, shifted, nir_imm_int(b, 0xf)); diff --git a/src/mesa/program/prog_to_nir.c b/src/mesa/program/prog_to_nir.c index ce12c5f036b..6b684ed0e6d 100644 --- a/src/mesa/program/prog_to_nir.c +++ b/src/mesa/program/prog_to_nir.c @@ -446,7 +446,7 @@ static void ptn_cmp(nir_builder *b, nir_alu_dest dest, nir_ssa_def **src) { ptn_move_dest(b, dest, nir_bcsel(b, - nir_flt(b, src[0], nir_imm_float(b, 0.0)), + nir_flt_imm(b, src[0], 0.0), src[1], src[2])); } @@ -461,7 +461,7 @@ ptn_kil(nir_builder *b, nir_ssa_def **src) { /* flt must be exact, because NaN shouldn't discard. (apps rely on this) */ b->exact = true; - nir_ssa_def *cmp = nir_bany(b, nir_flt(b, src[0], nir_imm_float(b, 0.0))); + nir_ssa_def *cmp = nir_bany(b, nir_flt_imm(b, src[0], 0.0)); b->exact = false; nir_discard_if(b, cmp); diff --git a/src/mesa/state_tracker/st_atifs_to_nir.c b/src/mesa/state_tracker/st_atifs_to_nir.c index 5c13d07aaa7..42584f0159e 100644 --- a/src/mesa/state_tracker/st_atifs_to_nir.c +++ b/src/mesa/state_tracker/st_atifs_to_nir.c @@ -247,7 +247,7 @@ emit_arith_inst(struct st_translate *t, case GL_CND0_ATI: return nir_bcsel(t->b, - nir_fge(t->b, src[2], nir_imm_vec4_float(t->b, 0.0)), + nir_fge_imm(t->b, src[2], 0.0), src[0], src[1]); diff --git a/src/mesa/state_tracker/st_draw_hw_select.c b/src/mesa/state_tracker/st_draw_hw_select.c index fc89309c7d5..54d56fb30fc 100644 --- a/src/mesa/state_tracker/st_draw_hw_select.c +++ b/src/mesa/state_tracker/st_draw_hw_select.c @@ -71,8 +71,7 @@ has_nan_or_inf(nir_builder *b, nir_ssa_def *v) { nir_ssa_def *nan = nir_bany_fnequal4(b, v, v); - nir_ssa_def *imm = nir_imm_float(b, INFINITY); - nir_ssa_def *inf = nir_bany(b, nir_feq(b, nir_fabs(b, v), imm)); + nir_ssa_def *inf = nir_bany(b, nir_feq_imm(b, nir_fabs(b, v), INFINITY)); return nir_ior(b, nan, inf); } @@ -132,9 +131,9 @@ face_culling(nir_builder *b, nir_ssa_def **v, bool packed) nir_ssa_def *det = nir_fadd(b, nir_fadd(b, t0, t1), t2); /* invert det sign once any vertex w < 0 */ - nir_ssa_def *n0 = nir_flt(b, nir_channel(b, v[0], 3), nir_imm_float(b, 0)); - nir_ssa_def *n1 = nir_flt(b, nir_channel(b, v[1], 3), nir_imm_float(b, 0)); - nir_ssa_def *n2 = nir_flt(b, nir_channel(b, v[2], 3), nir_imm_float(b, 0)); + nir_ssa_def *n0 = nir_flt_imm(b, nir_channel(b, v[0], 3), 0); + nir_ssa_def *n1 = nir_flt_imm(b, nir_channel(b, v[1], 3), 0); + nir_ssa_def *n2 = nir_flt_imm(b, nir_channel(b, v[2], 3), 0); nir_ssa_def *cond = nir_ixor(b, nir_ixor(b, n0, n1), n2); det = nir_bcsel(b, cond, nir_fneg(b, det), det); @@ -229,7 +228,7 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert, nir_ssa_def *d = nir_fdot(b, v, plane); nir_store_array_var(b, dist, idx, d, 1); - nir_ssa_def *clipped = nir_flt(b, d, nir_imm_float(b, 0)); + nir_ssa_def *clipped = nir_flt_imm(b, d, 0); nir_store_var(b, all_clipped, nir_iand(b, nir_load_var(b, all_clipped), clipped), 1); } @@ -267,7 +266,7 @@ clip_with_plane(nir_builder *b, nir_variable *vert, nir_variable *num_vert, begin_for_loop(vert_loop, num) { nir_ssa_def *di = nir_load_array_var(b, dist, idx); - nir_if *if_clipped = nir_push_if(b, nir_flt(b, di, nir_imm_float(b, 0))); + nir_if *if_clipped = nir_push_if(b, nir_flt_imm(b, di, 0)); { /* - case, we need to take care of sign change and insert vertex */ @@ -382,7 +381,7 @@ get_window_space_depth(nir_builder *b, nir_ssa_def *v, nir_ssa_def **trans) /* do perspective division, if w==0, xyz must be 0 too (otherwise can't pass * the clip test), 0/0=NaN, but we want it to be the nearest point. */ - nir_ssa_def *c = nir_feq(b, w, nir_imm_float(b, 0)); + nir_ssa_def *c = nir_feq_imm(b, w, 0); nir_ssa_def *d = nir_bcsel(b, c, nir_imm_float(b, -1), nir_fdiv(b, z, w)); /* map [-1, 1] to [near, far] set by glDepthRange(near, far) */ @@ -435,7 +434,7 @@ build_point_nir_shader(nir_builder *b, union state_key state, bool packed) for (int i = 0; i < state.num_user_clip_planes; i++) { nir_ssa_def *p = get_user_clip_plane(b, i, packed); nir_ssa_def *d = nir_fdot(b, v, p); - nir_ssa_def *r = nir_flt(b, d, nir_imm_float(b, 0)); + nir_ssa_def *r = nir_flt_imm(b, d, 0); outside = i ? nir_ior(b, outside, r) : r; } if (outside) @@ -502,8 +501,8 @@ build_line_nir_shader(nir_builder *b, union state_key state, bool packed) nir_ssa_def *v1 = nir_load_var(b, vert1); nir_ssa_def *d0 = nir_fdot(b, v0, plane); nir_ssa_def *d1 = nir_fdot(b, v1, plane); - nir_ssa_def *n0 = nir_flt(b, d0, nir_imm_float(b, 0)); - nir_ssa_def *n1 = nir_flt(b, d1, nir_imm_float(b, 0)); + nir_ssa_def *n0 = nir_flt_imm(b, d0, 0); + nir_ssa_def *n1 = nir_flt_imm(b, d1, 0); return_if_true(b, nir_iand(b, n0, n1)); diff --git a/src/mesa/state_tracker/st_pbo_compute.c b/src/mesa/state_tracker/st_pbo_compute.c index abb58659c00..66979f23bd6 100644 --- a/src/mesa/state_tracker/st_pbo_compute.c +++ b/src/mesa/state_tracker/st_pbo_compute.c @@ -289,9 +289,9 @@ init_pbo_shader_data(nir_builder *b, struct pbo_shader_data *sd, unsigned coord_ nir_bcsel(b, nir_ieq_imm(b, sd->bits1, 8), nir_bcsel(b, - nir_uge(b, sd->channels, nir_imm_int(b, 2)), + nir_uge_imm(b, sd->channels, 2), nir_bcsel(b, - nir_uge(b, sd->channels, nir_imm_int(b, 3)), + nir_uge_imm(b, sd->channels, 3), nir_bcsel(b, nir_ieq_imm(b, sd->channels, 4), nir_ball(b, nir_ieq_imm(b, sd->bits, 8)), @@ -363,7 +363,7 @@ get_buffer_offset(nir_builder *b, nir_ssa_def *coord, struct pbo_shader_data *sd + (skipimages + img) * bytes_per_image; */ nir_ssa_def *bytes_per_row = nir_imul(b, nir_channel(b, sd->range, 0), sd->blocksize); - bytes_per_row = nir_bcsel(b, nir_ult(b, sd->alignment, nir_imm_int(b, 2)), + bytes_per_row = nir_bcsel(b, nir_ult_imm(b, sd->alignment, 2), bytes_per_row, nir_iand(b, nir_isub(b, nir_iadd(b, bytes_per_row, sd->alignment), nir_imm_int(b, 1)), @@ -390,7 +390,7 @@ write_ssbo(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset) static void write_conversion(nir_builder *b, nir_ssa_def *pixel, nir_ssa_def *buffer_offset, struct pbo_shader_data *sd) { - nir_push_if(b, nir_ilt(b, sd->dst_bit_size, nir_imm_int(b, 32))); + nir_push_if(b, nir_ilt_imm(b, sd->dst_bit_size, 32)); nir_push_if(b, nir_ieq_imm(b, sd->dst_bit_size, 16)); write_ssbo(b, nir_u2u16(b, pixel), buffer_offset); nir_push_else(b, NULL); @@ -487,7 +487,7 @@ check_for_weird_packing(nir_builder *b, struct pbo_shader_data *sd, unsigned com nir_ssa_def *c = nir_channel(b, sd->bits, component - 1); return nir_bcsel(b, - nir_ige(b, sd->channels, nir_imm_int(b, component)), + nir_ige_imm(b, sd->channels, component), nir_ior(b, nir_ine(b, c, sd->bits1), nir_ine_imm(b, nir_imod(b, c, nir_imm_int(b, 8)), 0)), diff --git a/src/microsoft/compiler/dxil_nir.c b/src/microsoft/compiler/dxil_nir.c index a5721b607d6..cedc6348cc9 100644 --- a/src/microsoft/compiler/dxil_nir.c +++ b/src/microsoft/compiler/dxil_nir.c @@ -1806,11 +1806,11 @@ lower_fquantize2f16(struct nir_builder *b, nir_instr *instr, void *data) nir_ssa_for_src(b, alu->src[0].src, nir_src_num_components(alu->src[0].src)); nir_ssa_def *neg_inf_cond = - nir_flt(b, src, nir_imm_float(b, -65504.0f)); + nir_flt_imm(b, src, -65504.0f); nir_ssa_def *pos_inf_cond = nir_flt(b, nir_imm_float(b, 65504.0f), src); nir_ssa_def *zero_cond = - nir_flt(b, nir_fabs(b, src), nir_imm_float(b, ldexpf(1.0, -14))); + nir_flt_imm(b, nir_fabs(b, src), ldexpf(1.0, -14)); nir_ssa_def *zero = nir_iand_imm(b, src, 1 << 31); nir_ssa_def *round = nir_iand_imm(b, src, ~BITFIELD_MASK(13)); diff --git a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c index 89ea7e1ca2d..09840ecd1dd 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c @@ -110,7 +110,7 @@ static nir_ssa_def * evaluate_face_x(nir_builder *b, coord_t *coord) { nir_ssa_def *sign = nir_fsign(b, coord->rx); - nir_ssa_def *positive = nir_fge(b, coord->rx, nir_imm_float(b, 0.0)); + nir_ssa_def *positive = nir_fge_imm(b, coord->rx, 0.0); nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arx); nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), coord->rz), 0.5); @@ -129,7 +129,7 @@ static nir_ssa_def * evaluate_face_y(nir_builder *b, coord_t *coord) { nir_ssa_def *sign = nir_fsign(b, coord->ry); - nir_ssa_def *positive = nir_fge(b, coord->ry, nir_imm_float(b, 0.0)); + nir_ssa_def *positive = nir_fge_imm(b, coord->ry, 0.0); nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, 0.5), coord->ary); nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, ima, coord->rx), 0.5); @@ -148,7 +148,7 @@ static nir_ssa_def * evaluate_face_z(nir_builder *b, coord_t *coord) { nir_ssa_def *sign = nir_fsign(b, coord->rz); - nir_ssa_def *positive = nir_fge(b, coord->rz, nir_imm_float(b, 0.0)); + nir_ssa_def *positive = nir_fge_imm(b, coord->rz, 0.0); nir_ssa_def *ima = nir_fdiv(b, nir_imm_float(b, -0.5), coord->arz); nir_ssa_def *x = nir_fadd_imm(b, nir_fmul(b, nir_fmul(b, sign, ima), nir_fneg(b, coord->rx)), 0.5); diff --git a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c index 00ab29ed26a..35894ad9f03 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c @@ -146,7 +146,7 @@ static nir_ssa_def * mirror(nir_builder *b, nir_ssa_def *coord) { /* coord if >= 0, otherwise -(1 + coord) */ - return nir_bcsel(b, nir_fge(b, coord, nir_imm_float(b, 0.0f)), coord, + return nir_bcsel(b, nir_fge_imm(b, coord, 0.0f), coord, nir_fneg(b, nir_fadd(b, nir_imm_float(b, 1.0f), coord))); } @@ -171,7 +171,7 @@ wrap_mirror_clamp_to_edge(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_de static void wrap_clamp(nir_builder *b, wrap_result_t *wrap_params, nir_ssa_def *size) { - nir_ssa_def *is_low = nir_flt(b, wrap_params->coords, nir_imm_float(b, 0.0)); + nir_ssa_def *is_low = nir_flt_imm(b, wrap_params->coords, 0.0); nir_ssa_def *is_high = nir_fge(b, wrap_params->coords, size); wrap_params->use_border_color = nir_ior(b, is_low, is_high); } diff --git a/src/microsoft/compiler/dxil_nir_tess.c b/src/microsoft/compiler/dxil_nir_tess.c index 2977f92f9c0..7394a3d8e52 100644 --- a/src/microsoft/compiler/dxil_nir_tess.c +++ b/src/microsoft/compiler/dxil_nir_tess.c @@ -144,7 +144,7 @@ start_tcs_loop(nir_builder *b, struct tcs_patch_loop_state *state, nir_deref_ins nir_store_deref(b, loop_var_deref, nir_imm_int(b, 0), 1); state->loop = nir_push_loop(b); state->count = nir_load_deref(b, loop_var_deref); - nir_push_if(b, nir_ige(b, state->count, nir_imm_int(b, b->impl->function->shader->info.tess.tcs_vertices_out))); + nir_push_if(b, nir_ige_imm(b, state->count, b->impl->function->shader->info.tess.tcs_vertices_out)); nir_jump(b, nir_jump_break); nir_pop_if(b, NULL); state->insert_cursor = b->cursor;