spirv/alu: Use vtn_push_ssa_value
Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5278>
This commit is contained in:
@@ -153,48 +153,46 @@ mat_times_scalar(struct vtn_builder *b,
|
|||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static struct vtn_ssa_value *
|
||||||
vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
|
vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
|
||||||
struct vtn_value *dest,
|
|
||||||
struct vtn_ssa_value *src0, struct vtn_ssa_value *src1)
|
struct vtn_ssa_value *src0, struct vtn_ssa_value *src1)
|
||||||
{
|
{
|
||||||
switch (opcode) {
|
switch (opcode) {
|
||||||
case SpvOpFNegate: {
|
case SpvOpFNegate: {
|
||||||
dest->ssa = vtn_create_ssa_value(b, src0->type);
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, src0->type);
|
||||||
unsigned cols = glsl_get_matrix_columns(src0->type);
|
unsigned cols = glsl_get_matrix_columns(src0->type);
|
||||||
for (unsigned i = 0; i < cols; i++)
|
for (unsigned i = 0; i < cols; i++)
|
||||||
dest->ssa->elems[i]->def = nir_fneg(&b->nb, src0->elems[i]->def);
|
dest->elems[i]->def = nir_fneg(&b->nb, src0->elems[i]->def);
|
||||||
break;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SpvOpFAdd: {
|
case SpvOpFAdd: {
|
||||||
dest->ssa = vtn_create_ssa_value(b, src0->type);
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, src0->type);
|
||||||
unsigned cols = glsl_get_matrix_columns(src0->type);
|
unsigned cols = glsl_get_matrix_columns(src0->type);
|
||||||
for (unsigned i = 0; i < cols; i++)
|
for (unsigned i = 0; i < cols; i++)
|
||||||
dest->ssa->elems[i]->def =
|
dest->elems[i]->def =
|
||||||
nir_fadd(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
|
nir_fadd(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
|
||||||
break;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SpvOpFSub: {
|
case SpvOpFSub: {
|
||||||
dest->ssa = vtn_create_ssa_value(b, src0->type);
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, src0->type);
|
||||||
unsigned cols = glsl_get_matrix_columns(src0->type);
|
unsigned cols = glsl_get_matrix_columns(src0->type);
|
||||||
for (unsigned i = 0; i < cols; i++)
|
for (unsigned i = 0; i < cols; i++)
|
||||||
dest->ssa->elems[i]->def =
|
dest->elems[i]->def =
|
||||||
nir_fsub(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
|
nir_fsub(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
|
||||||
break;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SpvOpTranspose:
|
case SpvOpTranspose:
|
||||||
dest->ssa = vtn_ssa_transpose(b, src0);
|
return vtn_ssa_transpose(b, src0);
|
||||||
break;
|
|
||||||
|
|
||||||
case SpvOpMatrixTimesScalar:
|
case SpvOpMatrixTimesScalar:
|
||||||
if (src0->transposed) {
|
if (src0->transposed) {
|
||||||
dest->ssa = vtn_ssa_transpose(b, mat_times_scalar(b, src0->transposed,
|
return vtn_ssa_transpose(b, mat_times_scalar(b, src0->transposed,
|
||||||
src1->def));
|
src1->def));
|
||||||
} else {
|
} else {
|
||||||
dest->ssa = mat_times_scalar(b, src0, src1->def);
|
return mat_times_scalar(b, src0, src1->def);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -202,9 +200,9 @@ vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
case SpvOpMatrixTimesVector:
|
case SpvOpMatrixTimesVector:
|
||||||
case SpvOpMatrixTimesMatrix:
|
case SpvOpMatrixTimesMatrix:
|
||||||
if (opcode == SpvOpVectorTimesMatrix) {
|
if (opcode == SpvOpVectorTimesMatrix) {
|
||||||
dest->ssa = matrix_multiply(b, vtn_ssa_transpose(b, src1), src0);
|
return matrix_multiply(b, vtn_ssa_transpose(b, src1), src0);
|
||||||
} else {
|
} else {
|
||||||
dest->ssa = matrix_multiply(b, src0, src1);
|
return matrix_multiply(b, src0, src1);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@@ -414,10 +412,10 @@ void
|
|||||||
vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
||||||
const uint32_t *w, unsigned count)
|
const uint32_t *w, unsigned count)
|
||||||
{
|
{
|
||||||
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
|
struct vtn_value *dest_val = vtn_untyped_value(b, w[2]);
|
||||||
const struct glsl_type *type = vtn_get_type(b, w[1])->type;
|
const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type;
|
||||||
|
|
||||||
vtn_foreach_decoration(b, val, handle_no_contraction, NULL);
|
vtn_foreach_decoration(b, dest_val, handle_no_contraction, NULL);
|
||||||
|
|
||||||
/* Collect the various SSA sources */
|
/* Collect the various SSA sources */
|
||||||
const unsigned num_inputs = count - 3;
|
const unsigned num_inputs = count - 3;
|
||||||
@@ -427,12 +425,13 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
|
|
||||||
if (glsl_type_is_matrix(vtn_src[0]->type) ||
|
if (glsl_type_is_matrix(vtn_src[0]->type) ||
|
||||||
(num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) {
|
(num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) {
|
||||||
vtn_handle_matrix_alu(b, opcode, val, vtn_src[0], vtn_src[1]);
|
vtn_push_ssa_value(b, w[2],
|
||||||
|
vtn_handle_matrix_alu(b, opcode, vtn_src[0], vtn_src[1]));
|
||||||
b->nb.exact = b->exact;
|
b->nb.exact = b->exact;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
val->ssa = vtn_create_ssa_value(b, type);
|
struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
|
||||||
nir_ssa_def *src[4] = { NULL, };
|
nir_ssa_def *src[4] = { NULL, };
|
||||||
for (unsigned i = 0; i < num_inputs; i++) {
|
for (unsigned i = 0; i < num_inputs; i++) {
|
||||||
vtn_assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
|
vtn_assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
|
||||||
@@ -441,81 +440,81 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
|
|
||||||
switch (opcode) {
|
switch (opcode) {
|
||||||
case SpvOpAny:
|
case SpvOpAny:
|
||||||
val->ssa->def = nir_bany(&b->nb, src[0]);
|
dest->def = nir_bany(&b->nb, src[0]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpAll:
|
case SpvOpAll:
|
||||||
val->ssa->def = nir_ball(&b->nb, src[0]);
|
dest->def = nir_ball(&b->nb, src[0]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpOuterProduct: {
|
case SpvOpOuterProduct: {
|
||||||
for (unsigned i = 0; i < src[1]->num_components; i++) {
|
for (unsigned i = 0; i < src[1]->num_components; i++) {
|
||||||
val->ssa->elems[i]->def =
|
dest->elems[i]->def =
|
||||||
nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i));
|
nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SpvOpDot:
|
case SpvOpDot:
|
||||||
val->ssa->def = nir_fdot(&b->nb, src[0], src[1]);
|
dest->def = nir_fdot(&b->nb, src[0], src[1]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpIAddCarry:
|
case SpvOpIAddCarry:
|
||||||
vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
||||||
val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]);
|
dest->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]);
|
||||||
val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]);
|
dest->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpISubBorrow:
|
case SpvOpISubBorrow:
|
||||||
vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
||||||
val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]);
|
dest->elems[0]->def = nir_isub(&b->nb, src[0], src[1]);
|
||||||
val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]);
|
dest->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpUMulExtended: {
|
case SpvOpUMulExtended: {
|
||||||
vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
||||||
nir_ssa_def *umul = nir_umul_2x32_64(&b->nb, src[0], src[1]);
|
nir_ssa_def *umul = nir_umul_2x32_64(&b->nb, src[0], src[1]);
|
||||||
val->ssa->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, umul);
|
dest->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, umul);
|
||||||
val->ssa->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, umul);
|
dest->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, umul);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SpvOpSMulExtended: {
|
case SpvOpSMulExtended: {
|
||||||
vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
|
vtn_assert(glsl_type_is_struct_or_ifc(dest_type));
|
||||||
nir_ssa_def *smul = nir_imul_2x32_64(&b->nb, src[0], src[1]);
|
nir_ssa_def *smul = nir_imul_2x32_64(&b->nb, src[0], src[1]);
|
||||||
val->ssa->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, smul);
|
dest->elems[0]->def = nir_unpack_64_2x32_split_x(&b->nb, smul);
|
||||||
val->ssa->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, smul);
|
dest->elems[1]->def = nir_unpack_64_2x32_split_y(&b->nb, smul);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SpvOpFwidth:
|
case SpvOpFwidth:
|
||||||
val->ssa->def = nir_fadd(&b->nb,
|
dest->def = nir_fadd(&b->nb,
|
||||||
nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
|
nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
|
||||||
nir_fabs(&b->nb, nir_fddy(&b->nb, src[0])));
|
nir_fabs(&b->nb, nir_fddy(&b->nb, src[0])));
|
||||||
break;
|
break;
|
||||||
case SpvOpFwidthFine:
|
case SpvOpFwidthFine:
|
||||||
val->ssa->def = nir_fadd(&b->nb,
|
dest->def = nir_fadd(&b->nb,
|
||||||
nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
|
nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
|
||||||
nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0])));
|
nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0])));
|
||||||
break;
|
break;
|
||||||
case SpvOpFwidthCoarse:
|
case SpvOpFwidthCoarse:
|
||||||
val->ssa->def = nir_fadd(&b->nb,
|
dest->def = nir_fadd(&b->nb,
|
||||||
nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
|
nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
|
||||||
nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0])));
|
nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0])));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpVectorTimesScalar:
|
case SpvOpVectorTimesScalar:
|
||||||
/* The builder will take care of splatting for us. */
|
/* The builder will take care of splatting for us. */
|
||||||
val->ssa->def = nir_fmul(&b->nb, src[0], src[1]);
|
dest->def = nir_fmul(&b->nb, src[0], src[1]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpIsNan:
|
case SpvOpIsNan:
|
||||||
val->ssa->def = nir_fne(&b->nb, src[0], src[0]);
|
dest->def = nir_fne(&b->nb, src[0], src[0]);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpIsInf: {
|
case SpvOpIsInf: {
|
||||||
nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
|
nir_ssa_def *inf = nir_imm_floatN_t(&b->nb, INFINITY, src[0]->bit_size);
|
||||||
val->ssa->def = nir_ieq(&b->nb, nir_fabs(&b->nb, src[0]), inf);
|
dest->def = nir_ieq(&b->nb, nir_fabs(&b->nb, src[0]), inf);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -527,7 +526,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
case SpvOpFUnordGreaterThanEqual: {
|
case SpvOpFUnordGreaterThanEqual: {
|
||||||
bool swap;
|
bool swap;
|
||||||
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
||||||
unsigned dst_bit_size = glsl_get_bit_size(type);
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
||||||
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
||||||
src_bit_size, dst_bit_size);
|
src_bit_size, dst_bit_size);
|
||||||
|
|
||||||
@@ -537,7 +536,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
src[1] = tmp;
|
src[1] = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
val->ssa->def =
|
dest->def =
|
||||||
nir_ior(&b->nb,
|
nir_ior(&b->nb,
|
||||||
nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
|
nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
|
||||||
nir_ior(&b->nb,
|
nir_ior(&b->nb,
|
||||||
@@ -553,13 +552,13 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
*/
|
*/
|
||||||
bool swap;
|
bool swap;
|
||||||
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
||||||
unsigned dst_bit_size = glsl_get_bit_size(type);
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
||||||
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
||||||
src_bit_size, dst_bit_size);
|
src_bit_size, dst_bit_size);
|
||||||
|
|
||||||
assert(!swap);
|
assert(!swap);
|
||||||
|
|
||||||
val->ssa->def =
|
dest->def =
|
||||||
nir_iand(&b->nb,
|
nir_iand(&b->nb,
|
||||||
nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
|
nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
|
||||||
nir_iand(&b->nb,
|
nir_iand(&b->nb,
|
||||||
@@ -570,13 +569,13 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
|
|
||||||
case SpvOpFConvert: {
|
case SpvOpFConvert: {
|
||||||
nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type);
|
nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type);
|
||||||
nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type);
|
nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(dest_type);
|
||||||
nir_rounding_mode rounding_mode = nir_rounding_mode_undef;
|
nir_rounding_mode rounding_mode = nir_rounding_mode_undef;
|
||||||
|
|
||||||
vtn_foreach_decoration(b, val, handle_rounding_mode, &rounding_mode);
|
vtn_foreach_decoration(b, dest_val, handle_rounding_mode, &rounding_mode);
|
||||||
nir_op op = nir_type_conversion_op(src_alu_type, dst_alu_type, rounding_mode);
|
nir_op op = nir_type_conversion_op(src_alu_type, dst_alu_type, rounding_mode);
|
||||||
|
|
||||||
val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL);
|
dest->def = nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -588,7 +587,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
case SpvOpShiftRightLogical: {
|
case SpvOpShiftRightLogical: {
|
||||||
bool swap;
|
bool swap;
|
||||||
unsigned src0_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
unsigned src0_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
||||||
unsigned dst_bit_size = glsl_get_bit_size(type);
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
||||||
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
||||||
src0_bit_size, dst_bit_size);
|
src0_bit_size, dst_bit_size);
|
||||||
|
|
||||||
@@ -611,17 +610,17 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
src[i] = nir_u2u32(&b->nb, src[i]);
|
src[i] = nir_u2u32(&b->nb, src[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
|
dest->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case SpvOpSignBitSet:
|
case SpvOpSignBitSet:
|
||||||
val->ssa->def = nir_i2b(&b->nb,
|
dest->def = nir_i2b(&b->nb,
|
||||||
nir_ushr(&b->nb, src[0], nir_imm_int(&b->nb, src[0]->bit_size - 1)));
|
nir_ushr(&b->nb, src[0], nir_imm_int(&b->nb, src[0]->bit_size - 1)));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SpvOpUCountTrailingZerosINTEL:
|
case SpvOpUCountTrailingZerosINTEL:
|
||||||
val->ssa->def = nir_umin(&b->nb,
|
dest->def = nir_umin(&b->nb,
|
||||||
nir_find_lsb(&b->nb, src[0]),
|
nir_find_lsb(&b->nb, src[0]),
|
||||||
nir_imm_int(&b->nb, 32u));
|
nir_imm_int(&b->nb, 32u));
|
||||||
break;
|
break;
|
||||||
@@ -629,7 +628,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
default: {
|
default: {
|
||||||
bool swap;
|
bool swap;
|
||||||
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
|
||||||
unsigned dst_bit_size = glsl_get_bit_size(type);
|
unsigned dst_bit_size = glsl_get_bit_size(dest_type);
|
||||||
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
|
||||||
src_bit_size, dst_bit_size);
|
src_bit_size, dst_bit_size);
|
||||||
|
|
||||||
@@ -650,7 +649,7 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
|
dest->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
|
||||||
break;
|
break;
|
||||||
} /* default */
|
} /* default */
|
||||||
}
|
}
|
||||||
@@ -661,8 +660,8 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
case SpvOpISub:
|
case SpvOpISub:
|
||||||
case SpvOpShiftLeftLogical:
|
case SpvOpShiftLeftLogical:
|
||||||
case SpvOpSNegate: {
|
case SpvOpSNegate: {
|
||||||
nir_alu_instr *alu = nir_instr_as_alu(val->ssa->def->parent_instr);
|
nir_alu_instr *alu = nir_instr_as_alu(dest->def->parent_instr);
|
||||||
vtn_foreach_decoration(b, val, handle_no_wrap, alu);
|
vtn_foreach_decoration(b, dest_val, handle_no_wrap, alu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@@ -670,6 +669,8 @@ vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vtn_push_ssa_value(b, w[2], dest);
|
||||||
|
|
||||||
b->nb.exact = b->exact;
|
b->nb.exact = b->exact;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user