nir: Add a flag to lower_io to force "sample" interpolation

Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Anuj Phogat <anuj.phogat@gmail.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Jason Ekstrand
2016-09-14 10:29:38 -07:00
parent 114874b22b
commit ed65e6ef49
7 changed files with 38 additions and 21 deletions

View File

@@ -2397,9 +2397,17 @@ void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
unsigned base_offset,
int (*type_size)(const struct glsl_type *));
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
* interpolated as if with the "sample" qualifier. This requires
* nir_shader_compiler_options::use_interpolated_input_intrinsics.
*/
nir_lower_io_force_sample_interpolation = (1 << 1),
} nir_lower_io_options;
void nir_lower_io(nir_shader *shader,
nir_variable_mode modes,
int (*type_size)(const struct glsl_type *));
int (*type_size)(const struct glsl_type *),
nir_lower_io_options);
nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);

View File

@@ -39,6 +39,7 @@ struct lower_io_state {
void *mem_ctx;
int (*type_size)(const struct glsl_type *type);
nir_variable_mode modes;
nir_lower_io_options options;
};
void
@@ -205,7 +206,8 @@ lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
assert(vertex_index == NULL);
nir_intrinsic_op bary_op;
if (var->data.sample)
if (var->data.sample ||
(state->options & nir_lower_io_force_sample_interpolation))
bary_op = nir_intrinsic_load_barycentric_sample;
else if (var->data.centroid)
bary_op = nir_intrinsic_load_barycentric_centroid;
@@ -347,7 +349,9 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
nir_intrinsic_op bary_op;
switch (intrin->intrinsic) {
case nir_intrinsic_interp_var_at_centroid:
bary_op = nir_intrinsic_load_barycentric_centroid;
bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
nir_intrinsic_load_barycentric_sample :
nir_intrinsic_load_barycentric_centroid;
break;
case nir_intrinsic_interp_var_at_sample:
bary_op = nir_intrinsic_load_barycentric_at_sample;
@@ -505,7 +509,8 @@ nir_lower_io_block(nir_block *block,
static void
nir_lower_io_impl(nir_function_impl *impl,
nir_variable_mode modes,
int (*type_size)(const struct glsl_type *))
int (*type_size)(const struct glsl_type *),
nir_lower_io_options options)
{
struct lower_io_state state;
@@ -513,6 +518,7 @@ nir_lower_io_impl(nir_function_impl *impl,
state.mem_ctx = ralloc_parent(impl);
state.modes = modes;
state.type_size = type_size;
state.options = options;
nir_foreach_block(block, impl) {
nir_lower_io_block(block, &state);
@@ -524,11 +530,13 @@ nir_lower_io_impl(nir_function_impl *impl,
void
nir_lower_io(nir_shader *shader, nir_variable_mode modes,
int (*type_size)(const struct glsl_type *))
int (*type_size)(const struct glsl_type *),
nir_lower_io_options options)
{
nir_foreach_function(function, shader) {
if (function->impl)
nir_lower_io_impl(function->impl, modes, type_size);
if (function->impl) {
nir_lower_io_impl(function->impl, modes, type_size, options);
}
}
}

View File

@@ -93,7 +93,7 @@ load_glsl(unsigned num_files, char* const* files, gl_shader_stage stage)
// TODO nir_assign_var_locations??
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size);
NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size, 0);
NIR_PASS_V(nir, nir_lower_samplers, prog);
return nir;

View File

@@ -204,7 +204,7 @@ brw_blorp_compile_nir_shader(struct blorp_context *blorp, struct nir_shader *nir
unsigned end = var->data.location + nir_uniform_type_size(var->type);
nir->num_uniforms = MAX2(nir->num_uniforms, end);
}
nir_lower_io(nir, nir_var_uniform, nir_uniform_type_size);
nir_lower_io(nir, nir_var_uniform, nir_uniform_type_size, 0);
const unsigned *program =
brw_compile_fs(compiler, blorp->driver_ctx, mem_ctx,

View File

@@ -205,7 +205,7 @@ brw_nir_lower_vs_inputs(nir_shader *nir,
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
* whether it is a double-precision type or not.
*/
nir_lower_io(nir, nir_var_shader_in, type_size_vs_input);
nir_lower_io(nir, nir_var_shader_in, type_size_vs_input, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
@@ -237,7 +237,7 @@ brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar,
}
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) {
/* This pass needs actual constants */
@@ -262,7 +262,7 @@ brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
var->data.driver_location = var->data.location;
}
nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
@@ -287,7 +287,7 @@ brw_nir_lower_fs_inputs(nir_shader *nir)
var->data.driver_location = var->data.location;
}
nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
@@ -303,11 +303,11 @@ brw_nir_lower_vue_outputs(nir_shader *nir,
nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
VARYING_SLOT_VAR0,
type_size_vec4_times_4);
nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4, 0);
} else {
nir_foreach_variable(var, &nir->outputs)
var->data.driver_location = var->data.location;
nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
}
}
@@ -318,7 +318,7 @@ brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
var->data.driver_location = var->data.location;
}
nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
@@ -345,7 +345,7 @@ brw_nir_lower_fs_outputs(nir_shader *nir)
SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
}
nir_lower_io(nir, nir_var_shader_out, type_size_dvec4);
nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
}
void
@@ -353,7 +353,7 @@ brw_nir_lower_cs_shared(nir_shader *nir)
{
nir_assign_var_locations(&nir->shared, &nir->num_shared, 0,
type_size_scalar_bytes);
nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes);
nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0);
}
#define OPT(pass, ...) ({ \

View File

@@ -53,11 +53,11 @@ brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
if (is_scalar) {
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
type_size_scalar_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
} else {
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
type_size_vec4_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
}
}

View File

@@ -359,7 +359,8 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir)
&nir->uniforms, &nir->num_uniforms);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size);
NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size,
(nir_lower_io_options)0);
NIR_PASS_V(nir, nir_lower_samplers, shader_program);
}