glsl: Add an option to clamp block indices when lowering UBO/SSBOs

This prevents array overflow when the block is actually an array of UBOs or
SSBOs.  On some hardware such as i965, such overflows can cause GPU hangs.

Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Jason Ekstrand
2016-05-18 20:28:07 -07:00
parent ac242aac3d
commit 27b9481d03
5 changed files with 39 additions and 6 deletions

View File

@@ -123,7 +123,7 @@ bool lower_clip_cull_distance(struct gl_shader_program *prog, gl_shader *shader)
void lower_output_reads(unsigned stage, exec_list *instructions); void lower_output_reads(unsigned stage, exec_list *instructions);
bool lower_packing_builtins(exec_list *instructions, int op_mask); bool lower_packing_builtins(exec_list *instructions, int op_mask);
void lower_shared_reference(struct gl_shader *shader, unsigned *shared_size); void lower_shared_reference(struct gl_shader *shader, unsigned *shared_size);
void lower_ubo_reference(struct gl_shader *shader); void lower_ubo_reference(struct gl_shader *shader, bool clamp_block_indices);
void lower_packed_varyings(void *mem_ctx, void lower_packed_varyings(void *mem_ctx,
unsigned locations_used, ir_variable_mode mode, unsigned locations_used, ir_variable_mode mode,
unsigned gs_input_vertices, gl_shader *shader, unsigned gs_input_vertices, gl_shader *shader,

View File

@@ -4906,7 +4906,8 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
&ctx->Const.ShaderCompilerOptions[i]; &ctx->Const.ShaderCompilerOptions[i];
if (options->LowerBufferInterfaceBlocks) if (options->LowerBufferInterfaceBlocks)
lower_ubo_reference(prog->_LinkedShaders[i]); lower_ubo_reference(prog->_LinkedShaders[i],
options->ClampBlockIndicesToArrayBounds);
if (options->LowerShaderSharedVariables) if (options->LowerShaderSharedVariables)
lower_shared_reference(prog->_LinkedShaders[i], lower_shared_reference(prog->_LinkedShaders[i],

View File

@@ -44,8 +44,10 @@ namespace {
class lower_ubo_reference_visitor : class lower_ubo_reference_visitor :
public lower_buffer_access::lower_buffer_access { public lower_buffer_access::lower_buffer_access {
public: public:
lower_ubo_reference_visitor(struct gl_shader *shader) lower_ubo_reference_visitor(struct gl_shader *shader,
: shader(shader), struct_field(NULL), variable(NULL) bool clamp_block_indices)
: shader(shader), clamp_block_indices(clamp_block_indices),
struct_field(NULL), variable(NULL)
{ {
} }
@@ -104,6 +106,7 @@ public:
ir_visitor_status visit_enter(ir_call *ir); ir_visitor_status visit_enter(ir_call *ir);
struct gl_shader *shader; struct gl_shader *shader;
bool clamp_block_indices;
struct gl_uniform_buffer_variable *ubo_var; struct gl_uniform_buffer_variable *ubo_var;
const struct glsl_struct_field *struct_field; const struct glsl_struct_field *struct_field;
ir_variable *variable; ir_variable *variable;
@@ -242,6 +245,26 @@ interface_field_name(void *mem_ctx, char *base_name, ir_rvalue *d,
return NULL; return NULL;
} }
static ir_rvalue *
clamp_to_array_bounds(void *mem_ctx, ir_rvalue *index, const glsl_type *type)
{
assert(type->is_array());
const unsigned array_size = type->arrays_of_arrays_size();
ir_constant *max_index = new(mem_ctx) ir_constant(array_size - 1);
max_index->type = index->type;
ir_constant *zero = new(mem_ctx) ir_constant(0);
zero->type = index->type;
if (index->type->base_type == GLSL_TYPE_INT)
index = max2(index, zero);
index = min2(index, max_index);
return index;
}
void void
lower_ubo_reference_visitor::setup_for_load_or_store(void *mem_ctx, lower_ubo_reference_visitor::setup_for_load_or_store(void *mem_ctx,
ir_variable *var, ir_variable *var,
@@ -258,6 +281,11 @@ lower_ubo_reference_visitor::setup_for_load_or_store(void *mem_ctx,
interface_field_name(mem_ctx, (char *) var->get_interface_type()->name, interface_field_name(mem_ctx, (char *) var->get_interface_type()->name,
deref, &nonconst_block_index); deref, &nonconst_block_index);
if (nonconst_block_index && clamp_block_indices) {
nonconst_block_index =
clamp_to_array_bounds(mem_ctx, nonconst_block_index, var->type);
}
/* Locate the block by interface name */ /* Locate the block by interface name */
unsigned num_blocks; unsigned num_blocks;
struct gl_uniform_block **blocks; struct gl_uniform_block **blocks;
@@ -1062,9 +1090,9 @@ lower_ubo_reference_visitor::visit_enter(ir_call *ir)
} /* unnamed namespace */ } /* unnamed namespace */
void void
lower_ubo_reference(struct gl_shader *shader) lower_ubo_reference(struct gl_shader *shader, bool clamp_block_indices)
{ {
lower_ubo_reference_visitor v(shader); lower_ubo_reference_visitor v(shader, clamp_block_indices);
/* Loop over the instructions lowering references, because we take /* Loop over the instructions lowering references, because we take
* a deref of a UBO array using a UBO dereference as the index will * a deref of a UBO array using a UBO dereference as the index will

View File

@@ -188,6 +188,7 @@ brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo)
} }
compiler->glsl_compiler_options[i].LowerBufferInterfaceBlocks = true; compiler->glsl_compiler_options[i].LowerBufferInterfaceBlocks = true;
compiler->glsl_compiler_options[i].ClampBlockIndicesToArrayBounds = true;
} }
compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectInput = false; compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectInput = false;

View File

@@ -2951,6 +2951,9 @@ struct gl_shader_compiler_options
GLboolean LowerBufferInterfaceBlocks; /**< Lower UBO and SSBO access to intrinsics. */ GLboolean LowerBufferInterfaceBlocks; /**< Lower UBO and SSBO access to intrinsics. */
/** Clamp UBO and SSBO block indices so they don't go out-of-bounds. */
GLboolean ClampBlockIndicesToArrayBounds;
GLboolean LowerShaderSharedVariables; /**< Lower compute shader shared GLboolean LowerShaderSharedVariables; /**< Lower compute shader shared
* variable access to intrinsics. */ * variable access to intrinsics. */