mesa: Add GL/GLSL plumbing for ARB_fragment_shader_interlock.
This extension provides new GLSL built-in functions beginInvocationInterlockARB() and endInvocationInterlockARB() that delimit a critical section of fragment shader code. For pairs of shader invocations with "overlapping" coverage in a given pixel, the OpenGL implementation will guarantee that the critical section of the fragment shader will be executed for only one fragment at a time. Signed-off-by: Plamena Manolova <plamena.manolova@intel.com> Reviewed-by: Francisco Jerez <currojerez@riseup.net>
This commit is contained in:
@@ -626,6 +626,16 @@ struct ast_type_qualifier {
|
|||||||
* Flag set if GL_ARB_post_depth_coverage layout qualifier is used.
|
* Flag set if GL_ARB_post_depth_coverage layout qualifier is used.
|
||||||
*/
|
*/
|
||||||
unsigned post_depth_coverage:1;
|
unsigned post_depth_coverage:1;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flags for the layout qualifers added by ARB_fragment_shader_interlock
|
||||||
|
*/
|
||||||
|
|
||||||
|
unsigned pixel_interlock_ordered:1;
|
||||||
|
unsigned pixel_interlock_unordered:1;
|
||||||
|
unsigned sample_interlock_ordered:1;
|
||||||
|
unsigned sample_interlock_unordered:1;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flag set if GL_INTEL_conservartive_rasterization layout qualifier
|
* Flag set if GL_INTEL_conservartive_rasterization layout qualifier
|
||||||
* is used.
|
* is used.
|
||||||
|
@@ -3897,6 +3897,16 @@ apply_layout_qualifier_to_variable(const struct ast_type_qualifier *qual,
|
|||||||
|
|
||||||
if (state->has_bindless())
|
if (state->has_bindless())
|
||||||
apply_bindless_qualifier_to_variable(qual, var, state, loc);
|
apply_bindless_qualifier_to_variable(qual, var, state, loc);
|
||||||
|
|
||||||
|
if (qual->flags.q.pixel_interlock_ordered ||
|
||||||
|
qual->flags.q.pixel_interlock_unordered ||
|
||||||
|
qual->flags.q.sample_interlock_ordered ||
|
||||||
|
qual->flags.q.sample_interlock_unordered) {
|
||||||
|
_mesa_glsl_error(loc, state, "interlock layout qualifiers: "
|
||||||
|
"pixel_interlock_ordered, pixel_interlock_unordered, "
|
||||||
|
"sample_interlock_ordered and sample_interlock_unordered, "
|
||||||
|
"only valid in fragment shader input layout declaration.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@@ -637,6 +637,10 @@ ast_type_qualifier::validate_in_qualifier(YYLTYPE *loc,
|
|||||||
valid_in_mask.flags.q.early_fragment_tests = 1;
|
valid_in_mask.flags.q.early_fragment_tests = 1;
|
||||||
valid_in_mask.flags.q.inner_coverage = 1;
|
valid_in_mask.flags.q.inner_coverage = 1;
|
||||||
valid_in_mask.flags.q.post_depth_coverage = 1;
|
valid_in_mask.flags.q.post_depth_coverage = 1;
|
||||||
|
valid_in_mask.flags.q.pixel_interlock_ordered = 1;
|
||||||
|
valid_in_mask.flags.q.pixel_interlock_unordered = 1;
|
||||||
|
valid_in_mask.flags.q.sample_interlock_ordered = 1;
|
||||||
|
valid_in_mask.flags.q.sample_interlock_unordered = 1;
|
||||||
break;
|
break;
|
||||||
case MESA_SHADER_COMPUTE:
|
case MESA_SHADER_COMPUTE:
|
||||||
valid_in_mask.flags.q.local_size = 7;
|
valid_in_mask.flags.q.local_size = 7;
|
||||||
@@ -708,6 +712,35 @@ ast_type_qualifier::merge_into_in_qualifier(YYLTYPE *loc,
|
|||||||
r = false;
|
r = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (state->in_qualifier->flags.q.pixel_interlock_ordered) {
|
||||||
|
state->fs_pixel_interlock_ordered = true;
|
||||||
|
state->in_qualifier->flags.q.pixel_interlock_ordered = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state->in_qualifier->flags.q.pixel_interlock_unordered) {
|
||||||
|
state->fs_pixel_interlock_unordered = true;
|
||||||
|
state->in_qualifier->flags.q.pixel_interlock_unordered = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state->in_qualifier->flags.q.sample_interlock_ordered) {
|
||||||
|
state->fs_sample_interlock_ordered = true;
|
||||||
|
state->in_qualifier->flags.q.sample_interlock_ordered = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state->in_qualifier->flags.q.sample_interlock_unordered) {
|
||||||
|
state->fs_sample_interlock_unordered = true;
|
||||||
|
state->in_qualifier->flags.q.sample_interlock_unordered = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state->fs_pixel_interlock_ordered +
|
||||||
|
state->fs_pixel_interlock_unordered +
|
||||||
|
state->fs_sample_interlock_ordered +
|
||||||
|
state->fs_sample_interlock_unordered > 1) {
|
||||||
|
_mesa_glsl_error(loc, state,
|
||||||
|
"only one interlock mode can be used at any time.");
|
||||||
|
r = false;
|
||||||
|
}
|
||||||
|
|
||||||
/* We allow the creation of multiple cs_input_layout nodes. Coherence among
|
/* We allow the creation of multiple cs_input_layout nodes. Coherence among
|
||||||
* all existing nodes is checked later, when the AST node is transformed
|
* all existing nodes is checked later, when the AST node is transformed
|
||||||
* into HIR.
|
* into HIR.
|
||||||
@@ -776,7 +809,7 @@ ast_type_qualifier::validate_flags(YYLTYPE *loc,
|
|||||||
"%s '%s':"
|
"%s '%s':"
|
||||||
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
|
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
|
||||||
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
|
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
|
||||||
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
|
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
|
||||||
message, name,
|
message, name,
|
||||||
bad.flags.q.invariant ? " invariant" : "",
|
bad.flags.q.invariant ? " invariant" : "",
|
||||||
bad.flags.q.precise ? " precise" : "",
|
bad.flags.q.precise ? " precise" : "",
|
||||||
@@ -840,6 +873,10 @@ ast_type_qualifier::validate_flags(YYLTYPE *loc,
|
|||||||
bad.flags.q.bound_sampler ? " bound_sampler" : "",
|
bad.flags.q.bound_sampler ? " bound_sampler" : "",
|
||||||
bad.flags.q.bound_image ? " bound_image" : "",
|
bad.flags.q.bound_image ? " bound_image" : "",
|
||||||
bad.flags.q.post_depth_coverage ? " post_depth_coverage" : "",
|
bad.flags.q.post_depth_coverage ? " post_depth_coverage" : "",
|
||||||
|
bad.flags.q.pixel_interlock_ordered ? " pixel_interlock_ordered" : "",
|
||||||
|
bad.flags.q.pixel_interlock_unordered ? " pixel_interlock_unordered": "",
|
||||||
|
bad.flags.q.sample_interlock_ordered ? " sample_interlock_ordered": "",
|
||||||
|
bad.flags.q.sample_interlock_unordered ? " sample_interlock_unordered": "",
|
||||||
bad.flags.q.non_coherent ? " noncoherent" : "");
|
bad.flags.q.non_coherent ? " noncoherent" : "");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@@ -512,6 +512,12 @@ shader_ballot(const _mesa_glsl_parse_state *state)
|
|||||||
return state->ARB_shader_ballot_enable;
|
return state->ARB_shader_ballot_enable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
supports_arb_fragment_shader_interlock(const _mesa_glsl_parse_state *state)
|
||||||
|
{
|
||||||
|
return state->ARB_fragment_shader_interlock_enable;
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
shader_clock(const _mesa_glsl_parse_state *state)
|
shader_clock(const _mesa_glsl_parse_state *state)
|
||||||
{
|
{
|
||||||
@@ -982,6 +988,14 @@ private:
|
|||||||
ir_function_signature *_read_invocation_intrinsic(const glsl_type *type);
|
ir_function_signature *_read_invocation_intrinsic(const glsl_type *type);
|
||||||
ir_function_signature *_read_invocation(const glsl_type *type);
|
ir_function_signature *_read_invocation(const glsl_type *type);
|
||||||
|
|
||||||
|
|
||||||
|
ir_function_signature *_invocation_interlock_intrinsic(
|
||||||
|
builtin_available_predicate avail,
|
||||||
|
enum ir_intrinsic_id id);
|
||||||
|
ir_function_signature *_invocation_interlock(
|
||||||
|
const char *intrinsic_name,
|
||||||
|
builtin_available_predicate avail);
|
||||||
|
|
||||||
ir_function_signature *_shader_clock_intrinsic(builtin_available_predicate avail,
|
ir_function_signature *_shader_clock_intrinsic(builtin_available_predicate avail,
|
||||||
const glsl_type *type);
|
const glsl_type *type);
|
||||||
ir_function_signature *_shader_clock(builtin_available_predicate avail,
|
ir_function_signature *_shader_clock(builtin_available_predicate avail,
|
||||||
@@ -1219,6 +1233,16 @@ builtin_builder::create_intrinsics()
|
|||||||
ir_intrinsic_memory_barrier_shared),
|
ir_intrinsic_memory_barrier_shared),
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
|
add_function("__intrinsic_begin_invocation_interlock",
|
||||||
|
_invocation_interlock_intrinsic(
|
||||||
|
supports_arb_fragment_shader_interlock,
|
||||||
|
ir_intrinsic_begin_invocation_interlock), NULL);
|
||||||
|
|
||||||
|
add_function("__intrinsic_end_invocation_interlock",
|
||||||
|
_invocation_interlock_intrinsic(
|
||||||
|
supports_arb_fragment_shader_interlock,
|
||||||
|
ir_intrinsic_end_invocation_interlock), NULL);
|
||||||
|
|
||||||
add_function("__intrinsic_shader_clock",
|
add_function("__intrinsic_shader_clock",
|
||||||
_shader_clock_intrinsic(shader_clock,
|
_shader_clock_intrinsic(shader_clock,
|
||||||
glsl_type::uvec2_type),
|
glsl_type::uvec2_type),
|
||||||
@@ -3294,6 +3318,18 @@ builtin_builder::create_builtins()
|
|||||||
glsl_type::uint64_t_type),
|
glsl_type::uint64_t_type),
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
|
add_function("beginInvocationInterlockARB",
|
||||||
|
_invocation_interlock(
|
||||||
|
"__intrinsic_begin_invocation_interlock",
|
||||||
|
supports_arb_fragment_shader_interlock),
|
||||||
|
NULL);
|
||||||
|
|
||||||
|
add_function("endInvocationInterlockARB",
|
||||||
|
_invocation_interlock(
|
||||||
|
"__intrinsic_end_invocation_interlock",
|
||||||
|
supports_arb_fragment_shader_interlock),
|
||||||
|
NULL);
|
||||||
|
|
||||||
add_function("anyInvocationARB",
|
add_function("anyInvocationARB",
|
||||||
_vote("__intrinsic_vote_any", vote),
|
_vote("__intrinsic_vote_any", vote),
|
||||||
NULL);
|
NULL);
|
||||||
@@ -6227,6 +6263,24 @@ builtin_builder::_read_invocation(const glsl_type *type)
|
|||||||
return sig;
|
return sig;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ir_function_signature *
|
||||||
|
builtin_builder::_invocation_interlock_intrinsic(builtin_available_predicate avail,
|
||||||
|
enum ir_intrinsic_id id)
|
||||||
|
{
|
||||||
|
MAKE_INTRINSIC(glsl_type::void_type, id, avail, 0);
|
||||||
|
return sig;
|
||||||
|
}
|
||||||
|
|
||||||
|
ir_function_signature *
|
||||||
|
builtin_builder::_invocation_interlock(const char *intrinsic_name,
|
||||||
|
builtin_available_predicate avail)
|
||||||
|
{
|
||||||
|
MAKE_SIG(glsl_type::void_type, avail, 0);
|
||||||
|
body.emit(call(shader->symbols->get_function(intrinsic_name),
|
||||||
|
NULL, sig->parameters));
|
||||||
|
return sig;
|
||||||
|
}
|
||||||
|
|
||||||
ir_function_signature *
|
ir_function_signature *
|
||||||
builtin_builder::_shader_clock_intrinsic(builtin_available_predicate avail,
|
builtin_builder::_shader_clock_intrinsic(builtin_available_predicate avail,
|
||||||
const glsl_type *type)
|
const glsl_type *type)
|
||||||
|
@@ -1432,6 +1432,36 @@ layout_qualifier_id:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const bool pixel_interlock_ordered = match_layout_qualifier($1,
|
||||||
|
"pixel_interlock_ordered", state) == 0;
|
||||||
|
const bool pixel_interlock_unordered = match_layout_qualifier($1,
|
||||||
|
"pixel_interlock_unordered", state) == 0;
|
||||||
|
const bool sample_interlock_ordered = match_layout_qualifier($1,
|
||||||
|
"sample_interlock_ordered", state) == 0;
|
||||||
|
const bool sample_interlock_unordered = match_layout_qualifier($1,
|
||||||
|
"sample_interlock_unordered", state) == 0;
|
||||||
|
|
||||||
|
if (pixel_interlock_ordered + pixel_interlock_unordered +
|
||||||
|
sample_interlock_ordered + sample_interlock_unordered > 0 &&
|
||||||
|
state->stage != MESA_SHADER_FRAGMENT) {
|
||||||
|
_mesa_glsl_error(& @1, state, "interlock layout qualifiers: "
|
||||||
|
"pixel_interlock_ordered, pixel_interlock_unordered, "
|
||||||
|
"sample_interlock_ordered and sample_interlock_unordered, "
|
||||||
|
"only valid in fragment shader input layout declaration.");
|
||||||
|
} else if (pixel_interlock_ordered + pixel_interlock_unordered +
|
||||||
|
sample_interlock_ordered + sample_interlock_unordered > 0 &&
|
||||||
|
!state->ARB_fragment_shader_interlock_enable) {
|
||||||
|
_mesa_glsl_error(& @1, state,
|
||||||
|
"interlock layout qualifier present, but the "
|
||||||
|
"GL_ARB_fragment_shader_interlock extension is not "
|
||||||
|
"enabled.");
|
||||||
|
} else {
|
||||||
|
$$.flags.q.pixel_interlock_ordered = pixel_interlock_ordered;
|
||||||
|
$$.flags.q.pixel_interlock_unordered = pixel_interlock_unordered;
|
||||||
|
$$.flags.q.sample_interlock_ordered = sample_interlock_ordered;
|
||||||
|
$$.flags.q.sample_interlock_unordered = sample_interlock_unordered;
|
||||||
|
}
|
||||||
|
|
||||||
/* Layout qualifiers for tessellation evaluation shaders. */
|
/* Layout qualifiers for tessellation evaluation shaders. */
|
||||||
if (!$$.flags.i) {
|
if (!$$.flags.i) {
|
||||||
static const struct {
|
static const struct {
|
||||||
|
@@ -299,6 +299,10 @@ _mesa_glsl_parse_state::_mesa_glsl_parse_state(struct gl_context *_ctx,
|
|||||||
this->fs_early_fragment_tests = false;
|
this->fs_early_fragment_tests = false;
|
||||||
this->fs_inner_coverage = false;
|
this->fs_inner_coverage = false;
|
||||||
this->fs_post_depth_coverage = false;
|
this->fs_post_depth_coverage = false;
|
||||||
|
this->fs_pixel_interlock_ordered = false;
|
||||||
|
this->fs_pixel_interlock_unordered = false;
|
||||||
|
this->fs_sample_interlock_ordered = false;
|
||||||
|
this->fs_sample_interlock_unordered = false;
|
||||||
this->fs_blend_support = 0;
|
this->fs_blend_support = 0;
|
||||||
memset(this->atomic_counter_offsets, 0,
|
memset(this->atomic_counter_offsets, 0,
|
||||||
sizeof(this->atomic_counter_offsets));
|
sizeof(this->atomic_counter_offsets));
|
||||||
@@ -630,6 +634,7 @@ static const _mesa_glsl_extension _mesa_glsl_supported_extensions[] = {
|
|||||||
EXT(ARB_explicit_uniform_location),
|
EXT(ARB_explicit_uniform_location),
|
||||||
EXT(ARB_fragment_coord_conventions),
|
EXT(ARB_fragment_coord_conventions),
|
||||||
EXT(ARB_fragment_layer_viewport),
|
EXT(ARB_fragment_layer_viewport),
|
||||||
|
EXT(ARB_fragment_shader_interlock),
|
||||||
EXT(ARB_gpu_shader5),
|
EXT(ARB_gpu_shader5),
|
||||||
EXT(ARB_gpu_shader_fp64),
|
EXT(ARB_gpu_shader_fp64),
|
||||||
EXT(ARB_gpu_shader_int64),
|
EXT(ARB_gpu_shader_int64),
|
||||||
@@ -1721,6 +1726,10 @@ set_shader_inout_layout(struct gl_shader *shader,
|
|||||||
assert(!state->fs_early_fragment_tests);
|
assert(!state->fs_early_fragment_tests);
|
||||||
assert(!state->fs_inner_coverage);
|
assert(!state->fs_inner_coverage);
|
||||||
assert(!state->fs_post_depth_coverage);
|
assert(!state->fs_post_depth_coverage);
|
||||||
|
assert(!state->fs_pixel_interlock_ordered);
|
||||||
|
assert(!state->fs_pixel_interlock_unordered);
|
||||||
|
assert(!state->fs_sample_interlock_ordered);
|
||||||
|
assert(!state->fs_sample_interlock_unordered);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
|
for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
|
||||||
@@ -1842,6 +1851,10 @@ set_shader_inout_layout(struct gl_shader *shader,
|
|||||||
shader->EarlyFragmentTests = state->fs_early_fragment_tests;
|
shader->EarlyFragmentTests = state->fs_early_fragment_tests;
|
||||||
shader->InnerCoverage = state->fs_inner_coverage;
|
shader->InnerCoverage = state->fs_inner_coverage;
|
||||||
shader->PostDepthCoverage = state->fs_post_depth_coverage;
|
shader->PostDepthCoverage = state->fs_post_depth_coverage;
|
||||||
|
shader->PixelInterlockOrdered = state->fs_pixel_interlock_ordered;
|
||||||
|
shader->PixelInterlockUnordered = state->fs_pixel_interlock_unordered;
|
||||||
|
shader->SampleInterlockOrdered = state->fs_sample_interlock_ordered;
|
||||||
|
shader->SampleInterlockUnordered = state->fs_sample_interlock_unordered;
|
||||||
shader->BlendSupport = state->fs_blend_support;
|
shader->BlendSupport = state->fs_blend_support;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@@ -639,6 +639,8 @@ struct _mesa_glsl_parse_state {
|
|||||||
bool ARB_fragment_coord_conventions_warn;
|
bool ARB_fragment_coord_conventions_warn;
|
||||||
bool ARB_fragment_layer_viewport_enable;
|
bool ARB_fragment_layer_viewport_enable;
|
||||||
bool ARB_fragment_layer_viewport_warn;
|
bool ARB_fragment_layer_viewport_warn;
|
||||||
|
bool ARB_fragment_shader_interlock_enable;
|
||||||
|
bool ARB_fragment_shader_interlock_warn;
|
||||||
bool ARB_gpu_shader5_enable;
|
bool ARB_gpu_shader5_enable;
|
||||||
bool ARB_gpu_shader5_warn;
|
bool ARB_gpu_shader5_warn;
|
||||||
bool ARB_gpu_shader_fp64_enable;
|
bool ARB_gpu_shader_fp64_enable;
|
||||||
@@ -833,6 +835,11 @@ struct _mesa_glsl_parse_state {
|
|||||||
|
|
||||||
bool fs_post_depth_coverage;
|
bool fs_post_depth_coverage;
|
||||||
|
|
||||||
|
bool fs_pixel_interlock_ordered;
|
||||||
|
bool fs_pixel_interlock_unordered;
|
||||||
|
bool fs_sample_interlock_ordered;
|
||||||
|
bool fs_sample_interlock_unordered;
|
||||||
|
|
||||||
unsigned fs_blend_support;
|
unsigned fs_blend_support;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -752,6 +752,12 @@ nir_visitor::visit(ir_call *ir)
|
|||||||
case ir_intrinsic_shader_clock:
|
case ir_intrinsic_shader_clock:
|
||||||
op = nir_intrinsic_shader_clock;
|
op = nir_intrinsic_shader_clock;
|
||||||
break;
|
break;
|
||||||
|
case ir_intrinsic_begin_invocation_interlock:
|
||||||
|
op = nir_intrinsic_begin_invocation_interlock;
|
||||||
|
break;
|
||||||
|
case ir_intrinsic_end_invocation_interlock:
|
||||||
|
op = nir_intrinsic_end_invocation_interlock;
|
||||||
|
break;
|
||||||
case ir_intrinsic_group_memory_barrier:
|
case ir_intrinsic_group_memory_barrier:
|
||||||
op = nir_intrinsic_group_memory_barrier;
|
op = nir_intrinsic_group_memory_barrier;
|
||||||
break;
|
break;
|
||||||
@@ -970,6 +976,12 @@ nir_visitor::visit(ir_call *ir)
|
|||||||
instr->num_components = 2;
|
instr->num_components = 2;
|
||||||
nir_builder_instr_insert(&b, &instr->instr);
|
nir_builder_instr_insert(&b, &instr->instr);
|
||||||
break;
|
break;
|
||||||
|
case nir_intrinsic_begin_invocation_interlock:
|
||||||
|
nir_builder_instr_insert(&b, &instr->instr);
|
||||||
|
break;
|
||||||
|
case nir_intrinsic_end_invocation_interlock:
|
||||||
|
nir_builder_instr_insert(&b, &instr->instr);
|
||||||
|
break;
|
||||||
case nir_intrinsic_store_ssbo: {
|
case nir_intrinsic_store_ssbo: {
|
||||||
exec_node *param = ir->actual_parameters.get_head();
|
exec_node *param = ir->actual_parameters.get_head();
|
||||||
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
|
ir_rvalue *block = ((ir_instruction *)param)->as_rvalue();
|
||||||
|
@@ -1120,6 +1120,8 @@ enum ir_intrinsic_id {
|
|||||||
ir_intrinsic_memory_barrier_buffer,
|
ir_intrinsic_memory_barrier_buffer,
|
||||||
ir_intrinsic_memory_barrier_image,
|
ir_intrinsic_memory_barrier_image,
|
||||||
ir_intrinsic_memory_barrier_shared,
|
ir_intrinsic_memory_barrier_shared,
|
||||||
|
ir_intrinsic_begin_invocation_interlock,
|
||||||
|
ir_intrinsic_end_invocation_interlock,
|
||||||
|
|
||||||
ir_intrinsic_vote_all,
|
ir_intrinsic_vote_all,
|
||||||
ir_intrinsic_vote_any,
|
ir_intrinsic_vote_any,
|
||||||
|
@@ -1978,6 +1978,14 @@ link_fs_inout_layout_qualifiers(struct gl_shader_program *prog,
|
|||||||
linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
|
linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
|
||||||
linked_shader->Program->info.fs.post_depth_coverage |=
|
linked_shader->Program->info.fs.post_depth_coverage |=
|
||||||
shader->PostDepthCoverage;
|
shader->PostDepthCoverage;
|
||||||
|
linked_shader->Program->info.fs.pixel_interlock_ordered |=
|
||||||
|
shader->PixelInterlockOrdered;
|
||||||
|
linked_shader->Program->info.fs.pixel_interlock_unordered |=
|
||||||
|
shader->PixelInterlockUnordered;
|
||||||
|
linked_shader->Program->info.fs.sample_interlock_ordered |=
|
||||||
|
shader->SampleInterlockOrdered;
|
||||||
|
linked_shader->Program->info.fs.sample_interlock_unordered |=
|
||||||
|
shader->SampleInterlockUnordered;
|
||||||
|
|
||||||
linked_shader->Program->sh.fs.BlendSupport |= shader->BlendSupport;
|
linked_shader->Program->sh.fs.BlendSupport |= shader->BlendSupport;
|
||||||
}
|
}
|
||||||
|
@@ -188,6 +188,8 @@ barrier("memory_barrier_atomic_counter")
|
|||||||
barrier("memory_barrier_buffer")
|
barrier("memory_barrier_buffer")
|
||||||
barrier("memory_barrier_image")
|
barrier("memory_barrier_image")
|
||||||
barrier("memory_barrier_shared")
|
barrier("memory_barrier_shared")
|
||||||
|
barrier("begin_invocation_interlock")
|
||||||
|
barrier("end_invocation_interlock")
|
||||||
|
|
||||||
# A conditional discard, with a single boolean source.
|
# A conditional discard, with a single boolean source.
|
||||||
intrinsic("discard_if", src_comp=[1])
|
intrinsic("discard_if", src_comp=[1])
|
||||||
|
@@ -179,6 +179,11 @@ typedef struct shader_info {
|
|||||||
|
|
||||||
bool pixel_center_integer;
|
bool pixel_center_integer;
|
||||||
|
|
||||||
|
bool pixel_interlock_ordered;
|
||||||
|
bool pixel_interlock_unordered;
|
||||||
|
bool sample_interlock_ordered;
|
||||||
|
bool sample_interlock_unordered;
|
||||||
|
|
||||||
/** gl_FragDepth layout for ARB_conservative_depth. */
|
/** gl_FragDepth layout for ARB_conservative_depth. */
|
||||||
enum gl_frag_depth_layout depth_layout;
|
enum gl_frag_depth_layout depth_layout;
|
||||||
} fs;
|
} fs;
|
||||||
|
@@ -68,6 +68,7 @@ EXT(ARB_fragment_layer_viewport , ARB_fragment_layer_viewport
|
|||||||
EXT(ARB_fragment_program , ARB_fragment_program , GLL, x , x , x , 2002)
|
EXT(ARB_fragment_program , ARB_fragment_program , GLL, x , x , x , 2002)
|
||||||
EXT(ARB_fragment_program_shadow , ARB_fragment_program_shadow , GLL, x , x , x , 2003)
|
EXT(ARB_fragment_program_shadow , ARB_fragment_program_shadow , GLL, x , x , x , 2003)
|
||||||
EXT(ARB_fragment_shader , ARB_fragment_shader , GLL, GLC, x , x , 2002)
|
EXT(ARB_fragment_shader , ARB_fragment_shader , GLL, GLC, x , x , 2002)
|
||||||
|
EXT(ARB_fragment_shader_interlock , ARB_fragment_shader_interlock , GLL, GLC, x , x , 2015)
|
||||||
EXT(ARB_framebuffer_no_attachments , ARB_framebuffer_no_attachments , GLL, GLC, x , x , 2012)
|
EXT(ARB_framebuffer_no_attachments , ARB_framebuffer_no_attachments , GLL, GLC, x , x , 2012)
|
||||||
EXT(ARB_framebuffer_object , ARB_framebuffer_object , GLL, GLC, x , x , 2005)
|
EXT(ARB_framebuffer_object , ARB_framebuffer_object , GLL, GLC, x , x , 2005)
|
||||||
EXT(ARB_framebuffer_sRGB , EXT_framebuffer_sRGB , GLL, GLC, x , x , 1998)
|
EXT(ARB_framebuffer_sRGB , EXT_framebuffer_sRGB , GLL, GLC, x , x , 1998)
|
||||||
|
@@ -2599,6 +2599,10 @@ struct gl_shader
|
|||||||
bool uses_gl_fragcoord;
|
bool uses_gl_fragcoord;
|
||||||
|
|
||||||
bool PostDepthCoverage;
|
bool PostDepthCoverage;
|
||||||
|
bool PixelInterlockOrdered;
|
||||||
|
bool PixelInterlockUnordered;
|
||||||
|
bool SampleInterlockOrdered;
|
||||||
|
bool SampleInterlockUnordered;
|
||||||
bool InnerCoverage;
|
bool InnerCoverage;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -4074,6 +4078,7 @@ struct gl_extensions
|
|||||||
GLboolean ARB_fragment_shader;
|
GLboolean ARB_fragment_shader;
|
||||||
GLboolean ARB_framebuffer_no_attachments;
|
GLboolean ARB_framebuffer_no_attachments;
|
||||||
GLboolean ARB_framebuffer_object;
|
GLboolean ARB_framebuffer_object;
|
||||||
|
GLboolean ARB_fragment_shader_interlock;
|
||||||
GLboolean ARB_enhanced_layouts;
|
GLboolean ARB_enhanced_layouts;
|
||||||
GLboolean ARB_explicit_attrib_location;
|
GLboolean ARB_explicit_attrib_location;
|
||||||
GLboolean ARB_explicit_uniform_location;
|
GLboolean ARB_explicit_uniform_location;
|
||||||
|
Reference in New Issue
Block a user