spirv: Drop lower_workgroup_access_to_offsets

Intel drivers are not using this anymore, and turnip still don't have
Compute Shaders, so won't make a difference to stop using this option.

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Acked-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
Caio Marcelo de Oliveira Filho
2019-08-08 10:00:45 -07:00
parent 925e9142bd
commit 5ed4e31c08
5 changed files with 14 additions and 138 deletions

View File

@@ -59,14 +59,6 @@ enum nir_spirv_execution_environment {
struct spirv_to_nir_options {
enum nir_spirv_execution_environment environment;
/* Whether or not to lower all workgroup variable access to offsets
* up-front. This means you will _shared intrinsics instead of _var
* for workgroup data access.
*
* This is currently required for full variable pointers support.
*/
bool lower_workgroup_access_to_offsets;
/* Whether or not to lower all UBO/SSBO access to offsets up-front. */
bool lower_ubo_ssbo_access_to_offsets;

View File

@@ -1086,65 +1086,6 @@ translate_image_format(struct vtn_builder *b, SpvImageFormat format)
}
}
static struct vtn_type *
vtn_type_layout_std430(struct vtn_builder *b, struct vtn_type *type,
uint32_t *size_out, uint32_t *align_out)
{
switch (type->base_type) {
case vtn_base_type_scalar: {
uint32_t comp_size = glsl_type_is_boolean(type->type)
? 4 : glsl_get_bit_size(type->type) / 8;
*size_out = comp_size;
*align_out = comp_size;
return type;
}
case vtn_base_type_vector: {
uint32_t comp_size = glsl_type_is_boolean(type->type)
? 4 : glsl_get_bit_size(type->type) / 8;
unsigned align_comps = type->length == 3 ? 4 : type->length;
*size_out = comp_size * type->length,
*align_out = comp_size * align_comps;
return type;
}
case vtn_base_type_matrix:
case vtn_base_type_array: {
/* We're going to add an array stride */
type = vtn_type_copy(b, type);
uint32_t elem_size, elem_align;
type->array_element = vtn_type_layout_std430(b, type->array_element,
&elem_size, &elem_align);
type->stride = vtn_align_u32(elem_size, elem_align);
*size_out = type->stride * type->length;
*align_out = elem_align;
return type;
}
case vtn_base_type_struct: {
/* We're going to add member offsets */
type = vtn_type_copy(b, type);
uint32_t offset = 0;
uint32_t align = 0;
for (unsigned i = 0; i < type->length; i++) {
uint32_t mem_size, mem_align;
type->members[i] = vtn_type_layout_std430(b, type->members[i],
&mem_size, &mem_align);
offset = vtn_align_u32(offset, mem_align);
type->offsets[i] = offset;
offset += mem_size;
align = MAX2(align, mem_align);
}
*size_out = offset;
*align_out = align;
return type;
}
default:
unreachable("Invalid SPIR-V type for std430");
}
}
static void
vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
@@ -1416,18 +1357,6 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
default:
break;
}
} else if (storage_class == SpvStorageClassWorkgroup &&
b->options->lower_workgroup_access_to_offsets) {
/* Lay out Workgroup types so it can be lowered to offsets during
* SPIR-V to NIR conversion. When not lowering to offsets, the
* stride will be calculated by the driver.
*/
uint32_t size, align;
val->type->deref = vtn_type_layout_std430(b, val->type->deref,
&size, &align);
val->type->length = size;
val->type->align = align;
val->type->stride = vtn_align_u32(size, align);
}
}
break;
@@ -2696,33 +2625,6 @@ get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
}
}
static nir_intrinsic_op
get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
case SpvOpAtomicLoad: return nir_intrinsic_load_shared;
case SpvOpAtomicStore: return nir_intrinsic_store_shared;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
OP(AtomicExchange, atomic_exchange)
OP(AtomicCompareExchange, atomic_comp_swap)
OP(AtomicCompareExchangeWeak, atomic_comp_swap)
OP(AtomicIIncrement, atomic_add)
OP(AtomicIDecrement, atomic_add)
OP(AtomicIAdd, atomic_add)
OP(AtomicISub, atomic_add)
OP(AtomicSMin, atomic_imin)
OP(AtomicUMin, atomic_umin)
OP(AtomicSMax, atomic_imax)
OP(AtomicUMax, atomic_umax)
OP(AtomicAnd, atomic_and)
OP(AtomicOr, atomic_or)
OP(AtomicXor, atomic_xor)
#undef OP
default:
vtn_fail_with_opcode("Invalid shared atomic", opcode);
}
}
static nir_intrinsic_op
get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
@@ -2842,15 +2744,9 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
nir_ssa_def *offset, *index;
offset = vtn_pointer_to_offset(b, ptr, &index);
nir_intrinsic_op op;
if (ptr->mode == vtn_variable_mode_ssbo) {
op = get_ssbo_nir_atomic_op(b, opcode);
} else {
vtn_assert(ptr->mode == vtn_variable_mode_workgroup &&
b->options->lower_workgroup_access_to_offsets);
op = get_shared_nir_atomic_op(b, opcode);
}
assert(ptr->mode == vtn_variable_mode_ssbo);
nir_intrinsic_op op = get_ssbo_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
int src = 0;

View File

@@ -97,9 +97,7 @@ vtn_mode_uses_ssa_offset(struct vtn_builder *b,
return ((mode == vtn_variable_mode_ubo ||
mode == vtn_variable_mode_ssbo) &&
b->options->lower_ubo_ssbo_access_to_offsets) ||
mode == vtn_variable_mode_push_constant ||
(mode == vtn_variable_mode_workgroup &&
b->options->lower_workgroup_access_to_offsets);
mode == vtn_variable_mode_push_constant;
}
static bool
@@ -109,9 +107,7 @@ vtn_pointer_is_external_block(struct vtn_builder *b,
return ptr->mode == vtn_variable_mode_ssbo ||
ptr->mode == vtn_variable_mode_ubo ||
ptr->mode == vtn_variable_mode_phys_ssbo ||
ptr->mode == vtn_variable_mode_push_constant ||
(ptr->mode == vtn_variable_mode_workgroup &&
b->options->lower_workgroup_access_to_offsets);
ptr->mode == vtn_variable_mode_push_constant;
}
static nir_ssa_def *
@@ -1752,9 +1748,7 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
*/
vtn_assert(vtn_var->mode == vtn_variable_mode_ubo ||
vtn_var->mode == vtn_variable_mode_ssbo ||
vtn_var->mode == vtn_variable_mode_push_constant ||
(vtn_var->mode == vtn_variable_mode_workgroup &&
b->options->lower_workgroup_access_to_offsets));
vtn_var->mode == vtn_variable_mode_push_constant);
}
}
}
@@ -2211,19 +2205,15 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
break;
case vtn_variable_mode_workgroup:
if (b->options->lower_workgroup_access_to_offsets) {
var->shared_location = -1;
} else {
/* Create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
/* Workgroup variables don't have any explicit layout but some
* layouts may have leaked through due to type deduplication in the
* SPIR-V.
*/
var->var->type = var->type->type;
var->var->data.mode = nir_var_mem_shared;
}
/* Create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
/* Workgroup variables don't have any explicit layout but some
* layouts may have leaked through due to type deduplication in the
* SPIR-V.
*/
var->var->type = var->type->type;
var->var->data.mode = nir_var_mem_shared;
break;
case vtn_variable_mode_input: