nir: use the same driver location for packed varyings
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
@@ -2312,8 +2312,8 @@ void nir_lower_io_to_temporaries(nir_shader *shader, nir_function *entrypoint,
|
||||
|
||||
void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
|
||||
|
||||
void nir_assign_var_locations(struct exec_list *var_list,
|
||||
unsigned *size,
|
||||
void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
|
||||
unsigned base_offset,
|
||||
int (*type_size)(const struct glsl_type *));
|
||||
|
||||
void nir_lower_io(nir_shader *shader,
|
||||
|
@@ -43,10 +43,18 @@ struct lower_io_state {
|
||||
|
||||
void
|
||||
nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
|
||||
unsigned base_offset,
|
||||
int (*type_size)(const struct glsl_type *))
|
||||
{
|
||||
unsigned location = 0;
|
||||
|
||||
/* There are 32 regular and 32 patch varyings allowed */
|
||||
int locations[64][2];
|
||||
for (unsigned i = 0; i < 64; i++) {
|
||||
for (unsigned j = 0; j < 2; j++)
|
||||
locations[i][j] = -1;
|
||||
}
|
||||
|
||||
nir_foreach_variable(var, var_list) {
|
||||
/*
|
||||
* UBO's have their own address spaces, so don't count them towards the
|
||||
@@ -56,9 +64,25 @@ nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
|
||||
var->interface_type != NULL)
|
||||
continue;
|
||||
|
||||
/* Make sure we give the same location to varyings packed with
|
||||
* ARB_enhanced_layouts.
|
||||
*/
|
||||
int idx = var->data.location - base_offset;
|
||||
if (base_offset && idx >= 0) {
|
||||
assert(idx < ARRAY_SIZE(locations));
|
||||
|
||||
if (locations[idx][var->data.index] == -1) {
|
||||
var->data.driver_location = location;
|
||||
locations[idx][var->data.index] = location;
|
||||
location += type_size(var->type);
|
||||
} else {
|
||||
var->data.driver_location = locations[idx][var->data.index];
|
||||
}
|
||||
} else {
|
||||
var->data.driver_location = location;
|
||||
location += type_size(var->type);
|
||||
}
|
||||
}
|
||||
|
||||
*size = location;
|
||||
}
|
||||
|
@@ -282,7 +282,8 @@ brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
|
||||
void
|
||||
brw_nir_lower_fs_inputs(nir_shader *nir)
|
||||
{
|
||||
nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_scalar);
|
||||
nir_assign_var_locations(&nir->inputs, &nir->num_inputs, VARYING_SLOT_VAR0,
|
||||
type_size_scalar);
|
||||
nir_lower_io(nir, nir_var_shader_in, type_size_scalar);
|
||||
}
|
||||
|
||||
@@ -292,6 +293,7 @@ brw_nir_lower_vue_outputs(nir_shader *nir,
|
||||
{
|
||||
if (is_scalar) {
|
||||
nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
|
||||
VARYING_SLOT_VAR0,
|
||||
type_size_scalar);
|
||||
nir_lower_io(nir, nir_var_shader_out, type_size_scalar);
|
||||
} else {
|
||||
@@ -330,14 +332,14 @@ void
|
||||
brw_nir_lower_fs_outputs(nir_shader *nir)
|
||||
{
|
||||
nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
|
||||
type_size_scalar);
|
||||
FRAG_RESULT_DATA0, type_size_scalar);
|
||||
nir_lower_io(nir, nir_var_shader_out, type_size_scalar);
|
||||
}
|
||||
|
||||
void
|
||||
brw_nir_lower_cs_shared(nir_shader *nir)
|
||||
{
|
||||
nir_assign_var_locations(&nir->shared, &nir->num_shared,
|
||||
nir_assign_var_locations(&nir->shared, &nir->num_shared, 0,
|
||||
type_size_scalar_bytes);
|
||||
nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes);
|
||||
}
|
||||
|
@@ -51,11 +51,11 @@ static void
|
||||
brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
|
||||
{
|
||||
if (is_scalar) {
|
||||
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
|
||||
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
|
||||
type_size_scalar_bytes);
|
||||
nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
|
||||
} else {
|
||||
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
|
||||
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
|
||||
type_size_vec4_bytes);
|
||||
nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
|
||||
}
|
||||
|
@@ -308,16 +308,19 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir)
|
||||
sort_varyings(&nir->outputs);
|
||||
nir_assign_var_locations(&nir->outputs,
|
||||
&nir->num_outputs,
|
||||
VARYING_SLOT_VAR0,
|
||||
st_glsl_type_size);
|
||||
st_nir_fixup_varying_slots(st, &nir->outputs);
|
||||
} else if (nir->stage == MESA_SHADER_FRAGMENT) {
|
||||
sort_varyings(&nir->inputs);
|
||||
nir_assign_var_locations(&nir->inputs,
|
||||
&nir->num_inputs,
|
||||
VARYING_SLOT_VAR0,
|
||||
st_glsl_type_size);
|
||||
st_nir_fixup_varying_slots(st, &nir->inputs);
|
||||
nir_assign_var_locations(&nir->outputs,
|
||||
&nir->num_outputs,
|
||||
FRAG_RESULT_DATA0,
|
||||
st_glsl_type_size);
|
||||
} else {
|
||||
unreachable("invalid shader type for tgsi bypass\n");
|
||||
|
Reference in New Issue
Block a user