spirv: Replace vtn_constant_value with vtn_constant_uint

The uint version is less typing, supports different bit sizes, and is
probably a bit more safe because we're actually verifying that the
SPIR-V value is an integer scalar constant.

Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
This commit is contained in:
Jason Ekstrand
2019-01-22 11:57:48 -06:00
committed by Jason Ekstrand
parent 5e7f800f32
commit 9b37e93e42
3 changed files with 23 additions and 15 deletions

View File

@@ -3358,7 +3358,7 @@ vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
switch (opcode) {
case SpvOpEmitStreamVertex:
case SpvOpEndStreamPrimitive: {
unsigned stream = vtn_constant_value(b, w[1])->values[0].u32[0];
unsigned stream = vtn_constant_uint(b, w[1]);
nir_intrinsic_set_stream_id(intrin, stream);
break;
}
@@ -3372,23 +3372,19 @@ vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
}
case SpvOpMemoryBarrier: {
SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0];
SpvMemorySemanticsMask semantics =
vtn_constant_value(b, w[2])->values[0].u32[0];
SpvScope scope = vtn_constant_uint(b, w[1]);
SpvMemorySemanticsMask semantics = vtn_constant_uint(b, w[2]);
vtn_emit_memory_barrier(b, scope, semantics);
return;
}
case SpvOpControlBarrier: {
SpvScope execution_scope =
vtn_constant_value(b, w[1])->values[0].u32[0];
SpvScope execution_scope = vtn_constant_uint(b, w[1]);
if (execution_scope == SpvScopeWorkgroup)
vtn_emit_barrier(b, nir_intrinsic_barrier);
SpvScope memory_scope =
vtn_constant_value(b, w[2])->values[0].u32[0];
SpvMemorySemanticsMask memory_semantics =
vtn_constant_value(b, w[3])->values[0].u32[0];
SpvScope memory_scope = vtn_constant_uint(b, w[2]);
SpvMemorySemanticsMask memory_semantics = vtn_constant_uint(b, w[3]);
vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
break;
}

View File

@@ -673,10 +673,22 @@ bool
vtn_set_instruction_result_type(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
static inline nir_constant *
vtn_constant_value(struct vtn_builder *b, uint32_t value_id)
static inline uint64_t
vtn_constant_uint(struct vtn_builder *b, uint32_t value_id)
{
return vtn_value(b, value_id, vtn_value_type_constant)->constant;
struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant);
vtn_fail_if(val->type->base_type != vtn_base_type_scalar ||
!glsl_type_is_integer(val->type->type),
"Expected id %u to be an integer constant", value_id);
switch (glsl_get_bit_size(val->type->type)) {
case 8: return val->constant->values[0].u8[0];
case 16: return val->constant->values[0].u16[0];
case 32: return val->constant->values[0].u32[0];
case 64: return val->constant->values[0].u64[0];
default: unreachable("Invalid bit size");
}
}
struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);

View File

@@ -269,7 +269,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
break;
case SpvOpGroupNonUniformQuadSwap: {
unsigned direction = vtn_constant_value(b, w[5])->values[0].u32[0];
unsigned direction = vtn_constant_uint(b, w[5]);
nir_intrinsic_op op;
switch (direction) {
case 0:
@@ -368,7 +368,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
case SpvGroupOperationClusteredReduce:
op = nir_intrinsic_reduce;
assert(count == 7);
cluster_size = vtn_constant_value(b, w[6])->values[0].u32[0];
cluster_size = vtn_constant_uint(b, w[6]);
break;
default:
unreachable("Invalid group operation");