nir: Use nir_i2b wrapper everywhere instead of using nir_i2b1 directly
No shader-db or fossil-db changes on any Intel platform. v2: Add missed i2b1 in ir3_nir_opt_preamble.c. v3: Add missed i2b1 in ac_nir_lower_ngg.c. Reviewed-by: Jason Ekstrand <jason.ekstrand@collabora.com> Acked-by: Jesse Natalie <jenatali@microsoft.com> Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Tested-by: Daniel Schürmann <daniel@schuermann.dev> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15121>
This commit is contained in:
@@ -4175,7 +4175,7 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
|
||||
else
|
||||
loaded_cull_flag = nir_u2u32(b, nir_load_shared(b, 1, 8, prim_idx_addr, .base = s->layout.lds.cull_flags_addr));
|
||||
|
||||
cull_flag = nir_i2b1(b, loaded_cull_flag);
|
||||
cull_flag = nir_i2b(b, loaded_cull_flag);
|
||||
}
|
||||
|
||||
nir_ssa_def *indices[3];
|
||||
|
@@ -133,7 +133,7 @@ nir_round_int_to_float(nir_builder *b, nir_ssa_def *src,
|
||||
|
||||
if (src_type == nir_type_int) {
|
||||
nir_ssa_def *sign =
|
||||
nir_i2b1(b, nir_ishr(b, src, nir_imm_int(b, src->bit_size - 1)));
|
||||
nir_i2b(b, nir_ishr(b, src, nir_imm_int(b, src->bit_size - 1)));
|
||||
nir_ssa_def *abs = nir_iabs(b, src);
|
||||
nir_ssa_def *positive_rounded =
|
||||
nir_round_int_to_float(b, abs, nir_type_uint, dest_bit_size, round);
|
||||
|
@@ -115,11 +115,11 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode)
|
||||
break;
|
||||
case nir_rounding_mode_ru:
|
||||
/* Negative becomes max float, positive becomes inf */
|
||||
overflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), nir_imm_int(b, 0x7BFF), nir_imm_int(b, 0x7C00));
|
||||
overflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), nir_imm_int(b, 0x7BFF), nir_imm_int(b, 0x7C00));
|
||||
break;
|
||||
case nir_rounding_mode_rd:
|
||||
/* Negative becomes inf, positive becomes max float */
|
||||
overflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), nir_imm_int(b, 0x7C00), nir_imm_int(b, 0x7BFF));
|
||||
overflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), nir_imm_int(b, 0x7C00), nir_imm_int(b, 0x7BFF));
|
||||
break;
|
||||
default: unreachable("Should've been handled already");
|
||||
}
|
||||
@@ -158,12 +158,12 @@ float_to_half_impl(nir_builder *b, nir_ssa_def *src, nir_rounding_mode mode)
|
||||
nir_ssa_def *underflowed_fp16 = zero;
|
||||
if (mode == nir_rounding_mode_ru ||
|
||||
mode == nir_rounding_mode_rd) {
|
||||
nir_push_if(b, nir_i2b1(b, abs));
|
||||
nir_push_if(b, nir_i2b(b, abs));
|
||||
|
||||
if (mode == nir_rounding_mode_ru)
|
||||
underflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), zero, one);
|
||||
underflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), zero, one);
|
||||
else
|
||||
underflowed_fp16 = nir_bcsel(b, nir_i2b1(b, sign), one, zero);
|
||||
underflowed_fp16 = nir_bcsel(b, nir_i2b(b, sign), one, zero);
|
||||
|
||||
nir_push_else(b, NULL);
|
||||
nir_pop_if(b, NULL);
|
||||
|
@@ -3864,7 +3864,7 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
|
||||
nir_builder_instr_insert(&b->nb, &atomic->instr);
|
||||
|
||||
if (opcode == SpvOpAtomicFlagTestAndSet) {
|
||||
vtn_push_nir_ssa(b, w[2], nir_i2b1(&b->nb, &atomic->dest.ssa));
|
||||
vtn_push_nir_ssa(b, w[2], nir_i2b(&b->nb, &atomic->dest.ssa));
|
||||
}
|
||||
if (after_semantics)
|
||||
vtn_emit_memory_barrier(b, scope, after_semantics);
|
||||
@@ -5823,7 +5823,7 @@ vtn_handle_ray_query_intrinsic(struct vtn_builder *b, SpvOp opcode,
|
||||
case SpvOpRayQueryGetIntersectionWorldToObjectKHR:
|
||||
ray_query_load_intrinsic_create(b, opcode, w,
|
||||
vtn_ssa_value(b, w[3])->def,
|
||||
nir_i2b1(&b->nb, vtn_ssa_value(b, w[4])->def));
|
||||
nir_i2b(&b->nb, vtn_ssa_value(b, w[4])->def));
|
||||
break;
|
||||
|
||||
case SpvOpRayQueryGetRayTMinKHR:
|
||||
|
@@ -335,7 +335,7 @@ ir3_nir_lower_preamble(nir_shader *nir, struct ir3_shader_variant *v)
|
||||
.base = offset);
|
||||
|
||||
if (dest->bit_size == 1) {
|
||||
new_dest = nir_i2b1(b, new_dest);
|
||||
new_dest = nir_i2b(b, new_dest);
|
||||
} else if (dest->bit_size != 32) {
|
||||
assert(dest->bit_size == 16);
|
||||
if (all_uses_float(dest, true)) {
|
||||
|
@@ -460,7 +460,7 @@ build_blit_vs_shader(void)
|
||||
nir_ssa_def *vert1_pos = load_const(b, 4, 2);
|
||||
nir_ssa_def *vertex = nir_load_vertex_id(b);
|
||||
|
||||
nir_ssa_def *pos = nir_bcsel(b, nir_i2b1(b, vertex), vert1_pos, vert0_pos);
|
||||
nir_ssa_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos);
|
||||
pos = nir_vec4(b, nir_channel(b, pos, 0),
|
||||
nir_channel(b, pos, 1),
|
||||
nir_imm_float(b, 0.0),
|
||||
@@ -479,7 +479,7 @@ build_blit_vs_shader(void)
|
||||
/* Only used with "z scale" blit path which uses a 3d texture */
|
||||
nir_ssa_def *z_coord = load_const(b, 8, 1);
|
||||
|
||||
nir_ssa_def *coords = nir_bcsel(b, nir_i2b1(b, vertex), vert1_coords, vert0_coords);
|
||||
nir_ssa_def *coords = nir_bcsel(b, nir_i2b(b, vertex), vert1_coords, vert0_coords);
|
||||
coords = nir_vec3(b, nir_channel(b, coords, 0), nir_channel(b, coords, 1),
|
||||
z_coord);
|
||||
|
||||
@@ -507,7 +507,7 @@ build_clear_vs_shader(void)
|
||||
nir_ssa_def *depth = load_const(b, 2, 1);
|
||||
nir_ssa_def *vertex = nir_load_vertex_id(b);
|
||||
|
||||
nir_ssa_def *pos = nir_bcsel(b, nir_i2b1(b, vertex), vert1_pos, vert0_pos);
|
||||
nir_ssa_def *pos = nir_bcsel(b, nir_i2b(b, vertex), vert1_pos, vert0_pos);
|
||||
pos = nir_vec4(b, nir_channel(b, pos, 0),
|
||||
nir_channel(b, pos, 1),
|
||||
depth, nir_imm_float(b, 1.0));
|
||||
|
@@ -1601,7 +1601,7 @@ lower_bool_input_impl(nir_builder *b, nir_instr *instr,
|
||||
}
|
||||
|
||||
intr->dest.ssa.bit_size = 32;
|
||||
return nir_i2b1(b, &intr->dest.ssa);
|
||||
return nir_i2b(b, &intr->dest.ssa);
|
||||
}
|
||||
|
||||
bool
|
||||
|
Reference in New Issue
Block a user