spirv: Rework our handling of images and samplers

Previously, objects of type OpTypeImage or OpTypeSampler were treated as
vtn_pointers and objects of type OpTypeSampledImage were a special-use
vtn_sampled_image struct.  This commit changes that so that all of those
objects are stored in vtn_ssa_values.  Each of images, samplers, and
sampled images, are stored as a scalar or vector nir_ssa_def whose
components are NIR deref values.  We now use vtn_type_get_nir_type to
re-resolve those as-needed into GLSL sampler types for NIR.

This simplification has a number of benefits:

 1. We can git rid of the rest of our special-cases for handling images
    and samplers in function arguments.  Now that they're treated as
    structs at the glsl_type level, the generic paths can handle images
    and samplers.

 2. We can now construct composite values containing images and samplers
    internally.  It's unclear from the SPIR-V spec whether or not this
    is allowed and it's not a pattern that GLSLang currently generates
    thanks to GLSL rules.  However, if we do start seeing SPIR-V that
    contains such composites, we should now be able to handle it.

 3. SPIR-V OpNull and OpUndef instructions can now create samplers,
    images, and sampled images.  The NIR generated won't likely be fully
    valid but, given a NIR pass to do something sensible, it should be a
    thing we can compile.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5278>
This commit is contained in:
Jason Ekstrand
2020-05-29 16:45:21 -05:00
parent 196db51fc2
commit 14a12b771d
4 changed files with 201 additions and 307 deletions

View File

@@ -310,6 +310,66 @@ vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_ssa_def *def)
return vtn_push_ssa_value(b, value_id, ssa);
}
static nir_deref_instr *
vtn_get_image(struct vtn_builder *b, uint32_t value_id)
{
struct vtn_type *type = vtn_get_value_type(b, value_id);
vtn_assert(type->base_type == vtn_base_type_image);
return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
nir_var_uniform, type->glsl_image, 0);
}
static void
vtn_push_image(struct vtn_builder *b, uint32_t value_id,
nir_deref_instr *deref)
{
struct vtn_type *type = vtn_get_value_type(b, value_id);
vtn_assert(type->base_type == vtn_base_type_image);
vtn_push_nir_ssa(b, value_id, &deref->dest.ssa);
}
static nir_deref_instr *
vtn_get_sampler(struct vtn_builder *b, uint32_t value_id)
{
struct vtn_type *type = vtn_get_value_type(b, value_id);
vtn_assert(type->base_type == vtn_base_type_sampler);
return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
nir_var_uniform, glsl_bare_sampler_type(), 0);
}
nir_ssa_def *
vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
struct vtn_sampled_image si)
{
return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa);
}
static void
vtn_push_sampled_image(struct vtn_builder *b, uint32_t value_id,
struct vtn_sampled_image si)
{
struct vtn_type *type = vtn_get_value_type(b, value_id);
vtn_assert(type->base_type == vtn_base_type_sampled_image);
vtn_push_nir_ssa(b, value_id, vtn_sampled_image_to_nir_ssa(b, si));
}
static struct vtn_sampled_image
vtn_get_sampled_image(struct vtn_builder *b, uint32_t value_id)
{
struct vtn_type *type = vtn_get_value_type(b, value_id);
vtn_assert(type->base_type == vtn_base_type_sampled_image);
nir_ssa_def *si_vec2 = vtn_get_nir_ssa(b, value_id);
struct vtn_sampled_image si = { NULL, };
si.image = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 0),
nir_var_uniform,
type->image->glsl_image, 0);
si.sampler = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 1),
nir_var_uniform,
glsl_bare_sampler_type(), 0);
return si;
}
static char *
vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
unsigned word_count, unsigned *words_used)
@@ -726,6 +786,17 @@ vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
return wrap_type_in_array(glsl_atomic_uint_type(), type->type);
}
if (mode == vtn_variable_mode_uniform) {
struct vtn_type *tail = vtn_type_without_array(type);
if (tail->base_type == vtn_base_type_image) {
return wrap_type_in_array(tail->glsl_image, type->type);
} else if (tail->base_type == vtn_base_type_sampler) {
return wrap_type_in_array(glsl_bare_sampler_type(), type->type);
} else if (tail->base_type == vtn_base_type_sampled_image) {
return wrap_type_in_array(tail->image->glsl_image, type->type);
}
}
return type->type;
}
@@ -1413,6 +1484,14 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
case SpvOpTypeImage: {
val->type->base_type = vtn_base_type_image;
/* Images are represented in NIR as a scalar SSA value that is the
* result of a deref instruction. An OpLoad on an OpTypeImage pointer
* from UniformConstant memory just takes the NIR deref from the pointer
* and turns it into an SSA value.
*/
val->type->type = nir_address_format_to_glsl_type(
vtn_mode_to_address_format(b, vtn_variable_mode_function));
const struct vtn_type *sampled_type = vtn_get_type(b, w[2]);
vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
glsl_get_bit_size(sampled_type->type) != 32,
@@ -1459,30 +1538,50 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
enum glsl_base_type sampled_base_type =
glsl_get_base_type(sampled_type->type);
if (sampled == 1) {
val->type->type = glsl_sampler_type(dim, false, is_array,
sampled_base_type);
val->type->glsl_image = glsl_sampler_type(dim, false, is_array,
sampled_base_type);
} else if (sampled == 2) {
val->type->type = glsl_image_type(dim, is_array, sampled_base_type);
val->type->glsl_image = glsl_image_type(dim, is_array,
sampled_base_type);
} else {
vtn_fail("We need to know if the image will be sampled");
}
break;
}
case SpvOpTypeSampledImage:
case SpvOpTypeSampledImage: {
val->type->base_type = vtn_base_type_sampled_image;
val->type->image = vtn_get_type(b, w[2]);
val->type->type = val->type->image->type;
/* Sampled images are represented NIR as a vec2 SSA value where each
* component is the result of a deref instruction. The first component
* is the image and the second is the sampler. An OpLoad on an
* OpTypeSampledImage pointer from UniformConstant memory just takes
* the NIR deref from the pointer and duplicates it to both vector
* components.
*/
nir_address_format addr_format =
vtn_mode_to_address_format(b, vtn_variable_mode_function);
assert(nir_address_format_num_components(addr_format) == 1);
unsigned bit_size = nir_address_format_bit_size(addr_format);
assert(bit_size == 32 || bit_size == 64);
enum glsl_base_type base_type =
bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64;
val->type->type = glsl_vector_type(base_type, 2);
break;
}
case SpvOpTypeSampler:
/* The actual sampler type here doesn't really matter. It gets
* thrown away the moment you combine it with an image. What really
* matters is that it's a sampler type as opposed to an integer type
* so the backend knows what to do.
*/
val->type->base_type = vtn_base_type_sampler;
val->type->type = glsl_bare_sampler_type();
/* Samplers are represented in NIR as a scalar SSA value that is the
* result of a deref instruction. An OpLoad on an OpTypeSampler pointer
* from UniformConstant memory just takes the NIR deref from the pointer
* and turns it into an SSA value.
*/
val->type->type = nir_address_format_to_glsl_type(
vtn_mode_to_address_format(b, vtn_variable_mode_function));
break;
case SpvOpTypeOpaque:
@@ -2285,68 +2384,37 @@ non_uniform_decoration_cb(struct vtn_builder *b,
}
}
static void
vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
if (opcode == SpvOpSampledImage) {
struct vtn_value *val =
vtn_push_value(b, w[2], vtn_value_type_sampled_image);
val->sampled_image = ralloc(b, struct vtn_sampled_image);
/* It seems valid to use OpSampledImage with OpUndef instead of
* OpTypeImage or OpTypeSampler.
*/
if (vtn_untyped_value(b, w[3])->value_type == vtn_value_type_undef) {
val->sampled_image->image = NULL;
} else {
val->sampled_image->image =
vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
}
if (vtn_untyped_value(b, w[4])->value_type == vtn_value_type_undef) {
val->sampled_image->sampler = NULL;
} else {
val->sampled_image->sampler =
vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
}
return;
} else if (opcode == SpvOpImage) {
struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
if (src_val->value_type == vtn_value_type_sampled_image) {
vtn_push_pointer(b, w[2], src_val->sampled_image->image);
} else {
vtn_assert(src_val->value_type == vtn_value_type_pointer);
vtn_push_pointer(b, w[2], src_val->pointer);
}
return;
}
struct vtn_type *ret_type = vtn_get_type(b, w[1]);
struct vtn_pointer *image = NULL, *sampler = NULL;
struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
if (sampled_val->value_type == vtn_value_type_sampled_image) {
image = sampled_val->sampled_image->image;
sampler = sampled_val->sampled_image->sampler;
} else {
vtn_assert(sampled_val->value_type == vtn_value_type_pointer);
image = sampled_val->pointer;
}
if (!image) {
vtn_push_value(b, w[2], vtn_value_type_undef);
if (opcode == SpvOpSampledImage) {
struct vtn_sampled_image si = {
.image = vtn_get_image(b, w[3]),
.sampler = vtn_get_sampler(b, w[4]),
};
vtn_push_sampled_image(b, w[2], si);
return;
} else if (opcode == SpvOpImage) {
struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
vtn_push_image(b, w[2], si.image);
return;
}
nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image);
nir_deref_instr *sampler_deref =
sampler ? vtn_pointer_to_deref(b, sampler) : NULL;
nir_deref_instr *image = NULL, *sampler = NULL;
struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
if (sampled_val->type->base_type == vtn_base_type_sampled_image) {
struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
image = si.image;
sampler = si.sampler;
} else {
image = vtn_get_image(b, w[3]);
}
const struct glsl_type *image_type = sampled_val->type->type;
const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
const bool is_array = glsl_sampler_type_is_array(image_type);
const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image->type);
const bool is_array = glsl_sampler_type_is_array(image->type);
nir_alu_type dest_type = nir_type_invalid;
/* Figure out the base texture operation */
@@ -2415,7 +2483,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
nir_tex_src srcs[10]; /* 10 should be enough */
nir_tex_src *p = srcs;
p->src = nir_src_for_ssa(&image_deref->dest.ssa);
p->src = nir_src_for_ssa(&image->dest.ssa);
p->src_type = nir_tex_src_texture_deref;
p++;
@@ -2429,7 +2497,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
vtn_fail_if(sampler == NULL,
"%s requires an image of type OpTypeSampledImage",
spirv_op_to_string(opcode));
p->src = nir_src_for_ssa(&sampler_deref->dest.ssa);
p->src = nir_src_for_ssa(&sampler->dest.ssa);
p->src_type = nir_tex_src_sampler_deref;
p++;
break;
@@ -2648,7 +2716,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
/* for non-query ops, get dest_type from sampler type */
if (dest_type == nir_type_invalid) {
switch (glsl_get_sampler_result_type(image_type)) {
switch (glsl_get_sampler_result_type(image->type)) {
case GLSL_TYPE_FLOAT: dest_type = nir_type_float; break;
case GLSL_TYPE_INT: dest_type = nir_type_int; break;
case GLSL_TYPE_UINT: dest_type = nir_type_uint; break;
@@ -2778,7 +2846,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
vtn_push_value(b, w[2], vtn_value_type_image_pointer);
val->image = ralloc(b, struct vtn_image_pointer);
val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
val->image->image = vtn_nir_deref(b, w[3]);
val->image->coord = get_image_coord(b, w[4]);
val->image->sample = vtn_get_nir_ssa(b, w[5]);
val->image->lod = nir_imm_int(&b->nb, 0);
@@ -2821,16 +2889,16 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
break;
case SpvOpImageQuerySize:
res_val = vtn_value(b, w[3], vtn_value_type_pointer);
image.image = res_val->pointer;
res_val = vtn_untyped_value(b, w[3]);
image.image = vtn_get_image(b, w[3]);
image.coord = NULL;
image.sample = NULL;
image.lod = NULL;
break;
case SpvOpImageRead: {
res_val = vtn_value(b, w[3], vtn_value_type_pointer);
image.image = res_val->pointer;
res_val = vtn_untyped_value(b, w[3]);
image.image = vtn_get_image(b, w[3]);
image.coord = get_image_coord(b, w[4]);
const SpvImageOperandsMask operands =
@@ -2867,8 +2935,8 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
}
case SpvOpImageWrite: {
res_val = vtn_value(b, w[1], vtn_value_type_pointer);
image.image = res_val->pointer;
res_val = vtn_untyped_value(b, w[1]);
image.image = vtn_get_image(b, w[1]);
image.coord = get_image_coord(b, w[2]);
/* texel = w[3] */
@@ -2940,8 +3008,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image);
intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
intrin->src[0] = nir_src_for_ssa(&image.image->dest.ssa);
/* ImageQuerySize doesn't take any extra parameters */
if (opcode != SpvOpImageQuerySize) {
@@ -4865,12 +4932,12 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
break;
case SpvOpImageQuerySize: {
struct vtn_pointer *image =
vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
if (glsl_type_is_image(image->type->type)) {
struct vtn_type *image_type = vtn_get_value_type(b, w[3]);
vtn_assert(image_type->base_type == vtn_base_type_image);
if (glsl_type_is_image(image_type->glsl_image)) {
vtn_handle_image(b, opcode, w, count);
} else {
vtn_assert(glsl_type_is_sampler(image->type->type));
vtn_assert(glsl_type_is_sampler(image_type->glsl_image));
vtn_handle_texture(b, opcode, w, count);
}
break;
@@ -5244,12 +5311,6 @@ vtn_create_builder(const uint32_t *words, size_t word_count,
uint16_t generator_id = words[2] >> 16;
uint16_t generator_version = words[2];
/* The first GLSLang version bump actually 1.5 years after #179 was fixed
* but this should at least let us shut the workaround off for modern
* versions of GLSLang.
*/
b->wa_glslang_179 = (generator_id == 8 && generator_version == 1);
/* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
* to provide correct memory semantics on compute shader barrier()
* commands. Prior to that, we need to fix them up ourselves. This

View File

@@ -30,22 +30,6 @@ vtn_block(struct vtn_builder *b, uint32_t value_id)
return vtn_value(b, value_id, vtn_value_type_block)->block;
}
static struct vtn_pointer *
vtn_load_param_pointer(struct vtn_builder *b,
struct vtn_type *param_type,
uint32_t param_idx)
{
struct vtn_type *ptr_type = param_type;
assert(param_type->base_type == vtn_base_type_image ||
param_type->base_type == vtn_base_type_sampler);
ptr_type = rzalloc(b, struct vtn_type);
ptr_type->base_type = vtn_base_type_pointer;
ptr_type->deref = param_type;
ptr_type->storage_class = SpvStorageClassUniformConstant;
return vtn_pointer_from_ssa(b, nir_load_param(&b->nb, param_idx), ptr_type);
}
static unsigned
glsl_type_count_function_params(const struct glsl_type *type)
{
@@ -66,26 +50,6 @@ glsl_type_count_function_params(const struct glsl_type *type)
}
}
static unsigned
vtn_type_count_function_params(struct vtn_type *type)
{
switch (type->base_type) {
case vtn_base_type_scalar:
case vtn_base_type_vector:
case vtn_base_type_array:
case vtn_base_type_matrix:
case vtn_base_type_struct:
case vtn_base_type_pointer:
return glsl_type_count_function_params(type->type);
case vtn_base_type_sampled_image:
return 2;
default:
return 1;
}
}
static void
glsl_type_add_to_function_params(const struct glsl_type *type,
nir_function *func,
@@ -111,41 +75,6 @@ glsl_type_add_to_function_params(const struct glsl_type *type,
}
}
static void
vtn_type_add_to_function_params(struct vtn_type *type,
nir_function *func,
unsigned *param_idx)
{
static const nir_parameter nir_deref_param = {
.num_components = 1,
.bit_size = 32,
};
switch (type->base_type) {
case vtn_base_type_scalar:
case vtn_base_type_vector:
case vtn_base_type_array:
case vtn_base_type_matrix:
case vtn_base_type_struct:
case vtn_base_type_pointer:
glsl_type_add_to_function_params(type->type, func, param_idx);
break;
case vtn_base_type_sampled_image:
func->params[(*param_idx)++] = nir_deref_param;
func->params[(*param_idx)++] = nir_deref_param;
break;
case vtn_base_type_image:
case vtn_base_type_sampler:
func->params[(*param_idx)++] = nir_deref_param;
break;
default:
unreachable("Unsupported type");
}
}
static void
vtn_ssa_value_add_to_call_params(struct vtn_builder *b,
struct vtn_ssa_value *value,
@@ -203,28 +132,8 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
}
for (unsigned i = 0; i < vtn_callee->type->length; i++) {
struct vtn_type *arg_type = vtn_callee->type->params[i];
unsigned arg_id = w[4 + i];
if (arg_type->base_type == vtn_base_type_sampled_image) {
struct vtn_sampled_image *sampled_image =
vtn_value(b, arg_id, vtn_value_type_sampled_image)->sampled_image;
call->params[param_idx++] =
nir_src_for_ssa(vtn_pointer_to_ssa(b, sampled_image->image));
call->params[param_idx++] =
nir_src_for_ssa(vtn_pointer_to_ssa(b, sampled_image->sampler));
} else if (arg_type->base_type == vtn_base_type_image ||
arg_type->base_type == vtn_base_type_sampler) {
struct vtn_pointer *pointer =
vtn_value(b, arg_id, vtn_value_type_pointer)->pointer;
call->params[param_idx++] =
nir_src_for_ssa(vtn_pointer_to_ssa(b, pointer));
} else {
struct vtn_ssa_value *arg = vtn_ssa_value(b, arg_id);
vtn_assert(arg->type == glsl_get_bare_type(arg_type->type));
vtn_ssa_value_add_to_call_params(b, arg, call, &param_idx);
}
vtn_ssa_value_add_to_call_params(b, vtn_ssa_value(b, w[4 + i]),
call, &param_idx);
}
assert(param_idx == call->num_params);
@@ -265,7 +174,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
unsigned num_params = 0;
for (unsigned i = 0; i < func_type->length; i++)
num_params += vtn_type_count_function_params(func_type->params[i]);
num_params += glsl_type_count_function_params(func_type->params[i]->type);
/* Add one parameter for the function return value */
if (func_type->return_type->base_type != vtn_base_type_void)
@@ -286,7 +195,7 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
}
for (unsigned i = 0; i < func_type->length; i++)
vtn_type_add_to_function_params(func_type->params[i], func, &idx);
glsl_type_add_to_function_params(func_type->params[i]->type, func, &idx);
assert(idx == num_params);
b->func->impl = nir_function_impl_create(func);
@@ -308,40 +217,11 @@ vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
break;
case SpvOpFunctionParameter: {
struct vtn_type *type = vtn_get_type(b, w[1]);
vtn_assert(b->func_param_idx < b->func->impl->function->num_params);
if (type->base_type == vtn_base_type_sampled_image) {
/* Sampled images are actually two parameters. The first is the
* image and the second is the sampler.
*/
struct vtn_value *val =
vtn_push_value(b, w[2], vtn_value_type_sampled_image);
val->sampled_image = ralloc(b, struct vtn_sampled_image);
struct vtn_type *image_type = rzalloc(b, struct vtn_type);
image_type->base_type = vtn_base_type_image;
image_type->type = type->type;
struct vtn_type *sampler_type = rzalloc(b, struct vtn_type);
sampler_type->base_type = vtn_base_type_sampler;
sampler_type->type = glsl_bare_sampler_type();
val->sampled_image->image =
vtn_load_param_pointer(b, image_type, b->func_param_idx++);
val->sampled_image->sampler =
vtn_load_param_pointer(b, sampler_type, b->func_param_idx++);
} else if (type->base_type == vtn_base_type_image ||
type->base_type == vtn_base_type_sampler) {
vtn_push_pointer(b, w[2], vtn_load_param_pointer(b, type, b->func_param_idx++));
} else {
/* We're a regular SSA value. */
struct vtn_ssa_value *value = vtn_create_ssa_value(b, type->type);
vtn_ssa_value_load_function_param(b, value, &b->func_param_idx);
vtn_push_ssa_value(b, w[2], value);
}
struct vtn_type *type = vtn_get_type(b, w[1]);
struct vtn_ssa_value *value = vtn_create_ssa_value(b, type->type);
vtn_ssa_value_load_function_param(b, value, &b->func_param_idx);
vtn_push_ssa_value(b, w[2], value);
break;
}

View File

@@ -118,7 +118,6 @@ enum vtn_value_type {
vtn_value_type_ssa,
vtn_value_type_extension,
vtn_value_type_image_pointer,
vtn_value_type_sampled_image,
};
enum vtn_branch_type {
@@ -401,6 +400,12 @@ struct vtn_type {
/* Members for image types */
struct {
/* GLSL image type for this type. This is not to be confused with
* vtn_type::type which is actually going to be the GLSL type for a
* pointer to an image, likely a uint32_t.
*/
const struct glsl_type *glsl_image;
/* Image format for image_load_store type images */
unsigned image_format;
@@ -567,17 +572,12 @@ vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
enum vtn_variable_mode mode);
struct vtn_image_pointer {
struct vtn_pointer *image;
nir_deref_instr *image;
nir_ssa_def *coord;
nir_ssa_def *sample;
nir_ssa_def *lod;
};
struct vtn_sampled_image {
struct vtn_pointer *image; /* Image or array of images */
struct vtn_pointer *sampler; /* Sampler */
};
struct vtn_value {
enum vtn_value_type value_type;
const char *name;
@@ -588,7 +588,6 @@ struct vtn_value {
nir_constant *constant;
struct vtn_pointer *pointer;
struct vtn_image_pointer *image;
struct vtn_sampled_image *sampled_image;
struct vtn_function *func;
struct vtn_block *block;
struct vtn_ssa_value *ssa;
@@ -658,9 +657,6 @@ struct vtn_builder {
unsigned value_id_bound;
struct vtn_value *values;
/* True if we should watch out for GLSLang issue #179 */
bool wa_glslang_179;
/* True if we need to fix up CS OpControlBarrier */
bool wa_glslang_cs_barrier;
@@ -801,6 +797,14 @@ struct vtn_value *vtn_push_pointer(struct vtn_builder *b,
uint32_t value_id,
struct vtn_pointer *ptr);
struct vtn_sampled_image {
nir_deref_instr *image;
nir_deref_instr *sampler;
};
nir_ssa_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
struct vtn_sampled_image si);
void
vtn_copy_value(struct vtn_builder *b, uint32_t src_value_id,
uint32_t dst_value_id);

View File

@@ -606,12 +606,6 @@ vtn_pointer_dereference(struct vtn_builder *b,
nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
if (b->wa_glslang_179) {
/* Do on-the-fly copy propagation for samplers. */
if (ptr->var && ptr->var->copy_prop_sampler)
return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
}
vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
if (!ptr->deref) {
struct vtn_access_chain chain = {
@@ -1044,6 +1038,25 @@ _vtn_variable_load_store(struct vtn_builder *b, bool load,
enum gl_access_qualifier access,
struct vtn_ssa_value **inout)
{
if (ptr->mode == vtn_variable_mode_uniform) {
if (ptr->type->base_type == vtn_base_type_image ||
ptr->type->base_type == vtn_base_type_sampler) {
/* See also our handling of OpTypeSampler and OpTypeImage */
vtn_assert(load);
(*inout)->def = vtn_pointer_to_ssa(b, ptr);
return;
} else if (ptr->type->base_type == vtn_base_type_sampled_image) {
/* See also our handling of OpTypeSampledImage */
vtn_assert(load);
struct vtn_sampled_image si = {
.image = vtn_pointer_to_deref(b, ptr),
.sampler = vtn_pointer_to_deref(b, ptr),
};
(*inout)->def = vtn_sampled_image_to_nir_ssa(b, si);
return;
}
}
enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
switch (base_type) {
case GLSL_TYPE_UINT:
@@ -1965,20 +1978,6 @@ vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
ptr->type = ptr_type->deref;
ptr->ptr_type = ptr_type;
if (b->wa_glslang_179) {
/* To work around https://github.com/KhronosGroup/glslang/issues/179 we
* need to whack the mode because it creates a function parameter with
* the Function storage class even though it's a pointer to a sampler.
* If we don't do this, then NIR won't get rid of the deref_cast for us.
*/
if (ptr->mode == vtn_variable_mode_function &&
(ptr->type->base_type == vtn_base_type_sampler ||
ptr->type->base_type == vtn_base_type_sampled_image)) {
ptr->mode = vtn_variable_mode_uniform;
nir_mode = nir_var_uniform;
}
}
if (vtn_pointer_uses_ssa_offset(b, ptr)) {
/* This pointer type needs to have actual storage */
vtn_assert(ptr_type->type);
@@ -2130,10 +2129,12 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
b->shader->info.num_ssbos++;
break;
case vtn_variable_mode_uniform:
if (glsl_type_is_image(without_array->type))
b->shader->info.num_images++;
else if (glsl_type_is_sampler(without_array->type))
b->shader->info.num_textures++;
if (without_array->base_type == vtn_base_type_image) {
if (glsl_type_is_image(without_array->glsl_image))
b->shader->info.num_images++;
else if (glsl_type_is_sampler(without_array->glsl_image))
b->shader->info.num_textures++;
}
break;
case vtn_variable_mode_push_constant:
b->shader->num_uniforms = vtn_type_block_size(b, type);
@@ -2350,7 +2351,7 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
var->var->data.index = var->input_attachment_index;
var->var->data.offset = var->offset;
if (glsl_type_is_image(without_array->type))
if (glsl_type_is_image(glsl_without_array(var->var->type)))
var->var->data.image.format = without_array->image_format;
}
@@ -2493,33 +2494,12 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
}
struct vtn_type *ptr_type = vtn_get_type(b, w[1]);
struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
if (base_val->value_type == vtn_value_type_sampled_image) {
/* This is rather insane. SPIR-V allows you to use OpSampledImage
* to combine an array of images with a single sampler to get an
* array of sampled images that all share the same sampler.
* Fortunately, this means that we can more-or-less ignore the
* sampler when crawling the access chain, but it does leave us
* with this rather awkward little special-case.
*/
struct vtn_value *val =
vtn_push_value(b, w[2], vtn_value_type_sampled_image);
val->sampled_image = ralloc(b, struct vtn_sampled_image);
val->sampled_image->image =
vtn_pointer_dereference(b, base_val->sampled_image->image, chain);
val->sampled_image->sampler = base_val->sampled_image->sampler;
val->sampled_image->image =
vtn_decorate_pointer(b, val, val->sampled_image->image);
val->sampled_image->sampler =
vtn_decorate_pointer(b, val, val->sampled_image->sampler);
} else {
vtn_assert(base_val->value_type == vtn_value_type_pointer);
struct vtn_pointer *ptr =
vtn_pointer_dereference(b, base_val->pointer, chain);
ptr->ptr_type = ptr_type;
ptr->access |= access;
vtn_push_pointer(b, w[2], ptr);
}
struct vtn_pointer *base =
vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
struct vtn_pointer *ptr = vtn_pointer_dereference(b, base, chain);
ptr->ptr_type = ptr_type;
ptr->access |= access;
vtn_push_pointer(b, w[2], ptr);
break;
}
@@ -2540,19 +2520,6 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
if (res_type->base_type == vtn_base_type_image ||
res_type->base_type == vtn_base_type_sampler) {
vtn_push_pointer(b, w[2], src);
return;
} else if (res_type->base_type == vtn_base_type_sampled_image) {
struct vtn_value *val =
vtn_push_value(b, w[2], vtn_value_type_sampled_image);
val->sampled_image = ralloc(b, struct vtn_sampled_image);
val->sampled_image->image = val->sampled_image->sampler =
vtn_decorate_pointer(b, val, src);
return;
}
if (count > 4) {
unsigned idx = 5;
SpvMemoryAccessMask access = w[4];
@@ -2603,24 +2570,6 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
if (glsl_type_is_sampler(dest->type->type)) {
if (b->wa_glslang_179) {
vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
"propagation to workaround the problem.");
vtn_assert(dest->var->copy_prop_sampler == NULL);
struct vtn_value *v = vtn_untyped_value(b, w[2]);
if (v->value_type == vtn_value_type_sampled_image) {
dest->var->copy_prop_sampler = v->sampled_image->sampler;
} else {
vtn_assert(v->value_type == vtn_value_type_pointer);
dest->var->copy_prop_sampler = v->pointer;
}
} else {
vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
}
break;
}
struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
vtn_variable_store(b, src, dest);