diff --git a/src/amd/common/ac_nir_lower_ngg.c b/src/amd/common/ac_nir_lower_ngg.c index 7a845f7cbc5..f6643b228d3 100644 --- a/src/amd/common/ac_nir_lower_ngg.c +++ b/src/amd/common/ac_nir_lower_ngg.c @@ -4175,7 +4175,7 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s) else loaded_cull_flag = nir_u2u32(b, nir_load_shared(b, 1, 8, prim_idx_addr, .base = s->layout.lds.cull_flags_addr)); - cull_flag = nir_i2b1(b, loaded_cull_flag); + cull_flag = nir_i2b(b, loaded_cull_flag); } nir_ssa_def *indices[3]; diff --git a/src/compiler/nir/nir_conversion_builder.h b/src/compiler/nir/nir_conversion_builder.h index c124e2650f0..21d8e8b20d1 100644 --- a/src/compiler/nir/nir_conversion_builder.h +++ b/src/compiler/nir/nir_conversion_builder.h @@ -133,7 +133,7 @@ nir_round_int_to_float(nir_builder *b, nir_ssa_def *src, if (src_type == nir_type_int) { nir_ssa_def *sign = - nir_i2b1(b, nir_ishr(b, src, nir_imm_int(b, src->bit_size - 1))); + nir_i2b(b, nir_ishr(b, src, nir_imm_int(b, src->bit_size - 1))); nir_ssa_def *abs = nir_iabs(b, src); nir_ssa_def *positive_rounded = nir_round_int_to_float(b, abs, nir_type_uint, dest_bit_size, round); diff --git a/src/compiler/nir/nir_lower_fp16_conv.c b/src/compiler/nir/nir_lower_fp16_conv.c index c2808a9ddd9..2f6862731a3 100644 --- a/src/compiler/nir/nir_lower_fp16_conv.c +++ b/src/compiler/nir/nir_lower_fp16_conv.c @@ -115,11 +115,11 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode) break; case nir_rounding_mode_ru: /* Negative becomes max float, positive becomes inf */ - overflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), nir_imm_int(b, 0x7BFF), nir_imm_int(b, 0x7C00)); + overflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), nir_imm_int(b, 0x7BFF), nir_imm_int(b, 0x7C00)); break; case nir_rounding_mode_rd: /* Negative becomes inf, positive becomes max float */ - overflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), nir_imm_int(b, 0x7C00), nir_imm_int(b, 0x7BFF)); + overflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), nir_imm_int(b, 0x7C00), nir_imm_int(b, 0x7BFF)); break; default: unreachable("Should've been handled already"); } @@ -158,12 +158,12 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode) nir_ssa_def *underflowed_fp16 = zero; if (mode == nir_rounding_mode_ru || mode == nir_rounding_mode_rd) { - nir_push_if(b, nir_i2b1(b, abs)); + nir_push_if(b, nir_i2b(b, abs)); if (mode == nir_rounding_mode_ru) - underflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), zero, one); + underflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), zero, one); else - underflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), one, zero); + underflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), one, zero); nir_push_else(b, NULL); nir_pop_if(b, NULL); diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index 5dbd90c5def..b315e32d8d5 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -3864,7 +3864,7 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, nir_builder_instr_insert(&b->nb, &atomic->instr); if (opcode == SpvOpAtomicFlagTestAndSet) { - vtn_push_nir_ssa(b, w[2], nir_i2b1(&b->nb, &atomic->dest.ssa)); + vtn_push_nir_ssa(b, w[2], nir_i2b(&b->nb, &atomic->dest.ssa)); } if (after_semantics) vtn_emit_memory_barrier(b, scope, after_semantics); @@ -5823,7 +5823,7 @@ vtn_handle_ray_query_intrinsic(struct vtn_builder *b, SpvOp opcode, case SpvOpRayQueryGetIntersectionWorldToObjectKHR: ray_query_load_intrinsic_create(b, opcode, w, vtn_ssa_value(b, w[3])->def, - nir_i2b1(&b->nb, vtn_ssa_value(b, w[4])->def)); + nir_i2b(&b->nb, vtn_ssa_value(b, w[4])->def)); break; case SpvOpRayQueryGetRayTMinKHR: diff --git a/src/freedreno/ir3/ir3_nir_opt_preamble.c b/src/freedreno/ir3/ir3_nir_opt_preamble.c index a36c14130bf..43f757f9722 100644 --- a/src/freedreno/ir3/ir3_nir_opt_preamble.c +++ b/src/freedreno/ir3/ir3_nir_opt_preamble.c @@ -335,7 +335,7 @@ ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v) .base = offset); if (dest->bit_size == 1) { - new_dest = nir_i2b1(b, new_dest); + new_dest = nir_i2b(b, new_dest); } else if (dest->bit_size != 32) { assert(dest->bit_size == 16); if (all_uses_float(dest, true)) { diff --git a/src/freedreno/vulkan/tu_clear_blit.c b/src/freedreno/vulkan/tu_clear_blit.c index d1fb8238b34..2577bd68e18 100644 --- a/src/freedreno/vulkan/tu_clear_blit.c +++ b/src/freedreno/vulkan/tu_clear_blit.c @@ -460,7 +460,7 @@ build_blit_vs_shader(void) nir_ssa_def *vert1_pos = load_const(b, 4, 2); nir_ssa_def *vertex = nir_load_vertex_id(b); - nir_ssa_def *pos = nir_bcsel(b, nir_i2b1(b, vertex), vert1_pos, vert0_pos); + nir_ssa_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos); pos = nir_vec4(b, nir_channel(b, pos, 0), nir_channel(b, pos, 1), nir_imm_float(b, 0.0), @@ -479,7 +479,7 @@ build_blit_vs_shader(void) /* Only used with "z scale" blit path which uses a 3d texture */ nir_ssa_def *z_coord = load_const(b, 8, 1); - nir_ssa_def *coords = nir_bcsel(b, nir_i2b1(b, vertex), vert1_coords, vert0_coords); + nir_ssa_def *coords = nir_bcsel(b, nir_i2b(b, vertex), vert1_coords, vert0_coords); coords = nir_vec3(b, nir_channel(b, coords, 0), nir_channel(b, coords, 1), z_coord); @@ -507,7 +507,7 @@ build_clear_vs_shader(void) nir_ssa_def *depth = load_const(b, 2, 1); nir_ssa_def *vertex = nir_load_vertex_id(b); - nir_ssa_def *pos = nir_bcsel(b, nir_i2b1(b, vertex), vert1_pos, vert0_pos); + nir_ssa_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos); pos = nir_vec4(b, nir_channel(b, pos, 0), nir_channel(b, pos, 1), depth, nir_imm_float(b, 1.0)); diff --git a/src/microsoft/compiler/dxil_nir.c b/src/microsoft/compiler/dxil_nir.c index 63d2a0c77f7..b3b1820060c 100644 --- a/src/microsoft/compiler/dxil_nir.c +++ b/src/microsoft/compiler/dxil_nir.c @@ -1601,7 +1601,7 @@ lower_bool_input_impl(nir_builder *b, nir_instr *instr, } intr->dest.ssa.bit_size = 32; - return nir_i2b1(b, &intr->dest.ssa); + return nir_i2b(b, &intr->dest.ssa); } bool