nir: stop adjusting driver location for varying packing

As of 59864e8e02 we just use the location assigned by the front-end and
no longer need this for i965.

Since there were some issues in the logic with assigning arrays the same
driver location if they didn't start at the same location just remove it
and let other drivers implement a solution if needed when they add
ARB_enhanced_layouts support.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Timothy Arceri
2016-10-25 10:23:25 +11:00
parent 4ac6686165
commit 2e423ca147
5 changed files with 5 additions and 53 deletions

View File

@@ -2321,7 +2321,6 @@ void nir_lower_io_to_temporaries(nir_shader *shader,
void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint); void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
void nir_assign_var_locations(struct exec_list *var_list, unsigned *size, void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
unsigned base_offset,
int (*type_size)(const struct glsl_type *)); int (*type_size)(const struct glsl_type *));
typedef enum { typedef enum {

View File

@@ -44,18 +44,10 @@ struct lower_io_state {
void void
nir_assign_var_locations(struct exec_list *var_list, unsigned *size, nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
unsigned base_offset,
int (*type_size)(const struct glsl_type *)) int (*type_size)(const struct glsl_type *))
{ {
unsigned location = 0; unsigned location = 0;
/* There are 32 regular and 32 patch varyings allowed */
int locations[64][2];
for (unsigned i = 0; i < 64; i++) {
for (unsigned j = 0; j < 2; j++)
locations[i][j] = -1;
}
nir_foreach_variable(var, var_list) { nir_foreach_variable(var, var_list) {
/* /*
* UBO's have their own address spaces, so don't count them towards the * UBO's have their own address spaces, so don't count them towards the
@@ -65,44 +57,8 @@ nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
var->interface_type != NULL) var->interface_type != NULL)
continue; continue;
/* Make sure we give the same location to varyings packed with var->data.driver_location = location;
* ARB_enhanced_layouts. location += type_size(var->type);
*/
int idx = var->data.location - base_offset;
if (base_offset && idx >= 0) {
assert(idx < ARRAY_SIZE(locations));
if (locations[idx][var->data.index] == -1) {
var->data.driver_location = location;
locations[idx][var->data.index] = location;
/* A dvec3 can be packed with a double we need special handling
* for this as we are packing across two locations.
*/
if (glsl_get_base_type(var->type) == GLSL_TYPE_DOUBLE &&
glsl_get_vector_elements(var->type) == 3) {
/* Hack around type_size functions that expect vectors to be
* padded out to vec4. If a float type is the same size as a
* double then the type size is padded to vec4, otherwise
* set the offset to two doubles which offsets the location
* past the first two components in dvec3 which were stored at
* the previous location.
*/
unsigned dsize = type_size(glsl_double_type());
unsigned offset =
dsize == type_size(glsl_float_type()) ? dsize : dsize * 2;
locations[idx + 1][var->data.index] = location + offset;
}
location += type_size(var->type);
} else {
var->data.driver_location = locations[idx][var->data.index];
}
} else {
var->data.driver_location = location;
location += type_size(var->type);
}
} }
*size = location; *size = location;

View File

@@ -380,7 +380,7 @@ brw_nir_lower_fs_outputs(nir_shader *nir)
void void
brw_nir_lower_cs_shared(nir_shader *nir) brw_nir_lower_cs_shared(nir_shader *nir)
{ {
nir_assign_var_locations(&nir->shared, &nir->num_shared, 0, nir_assign_var_locations(&nir->shared, &nir->num_shared,
type_size_scalar_bytes); type_size_scalar_bytes);
nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0); nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0);
} }

View File

@@ -51,11 +51,11 @@ static void
brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar) brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
{ {
if (is_scalar) { if (is_scalar) {
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0, nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
type_size_scalar_bytes); type_size_scalar_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0); nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
} else { } else {
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0, nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
type_size_vec4_bytes); type_size_vec4_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0); nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
} }

View File

@@ -324,19 +324,16 @@ st_finalize_nir(struct st_context *st, struct gl_program *prog, nir_shader *nir)
sort_varyings(&nir->outputs); sort_varyings(&nir->outputs);
nir_assign_var_locations(&nir->outputs, nir_assign_var_locations(&nir->outputs,
&nir->num_outputs, &nir->num_outputs,
VARYING_SLOT_VAR0,
st_glsl_type_size); st_glsl_type_size);
st_nir_fixup_varying_slots(st, &nir->outputs); st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->stage == MESA_SHADER_FRAGMENT) { } else if (nir->stage == MESA_SHADER_FRAGMENT) {
sort_varyings(&nir->inputs); sort_varyings(&nir->inputs);
nir_assign_var_locations(&nir->inputs, nir_assign_var_locations(&nir->inputs,
&nir->num_inputs, &nir->num_inputs,
VARYING_SLOT_VAR0,
st_glsl_type_size); st_glsl_type_size);
st_nir_fixup_varying_slots(st, &nir->inputs); st_nir_fixup_varying_slots(st, &nir->inputs);
nir_assign_var_locations(&nir->outputs, nir_assign_var_locations(&nir->outputs,
&nir->num_outputs, &nir->num_outputs,
FRAG_RESULT_DATA0,
st_glsl_type_size); st_glsl_type_size);
} else { } else {
unreachable("invalid shader type for tgsi bypass\n"); unreachable("invalid shader type for tgsi bypass\n");