nir: Move st_nir_assign_var_locations() to common code

It isn't really doing anything Gallium-specific, and it's needed for
handling component packing, overlapping, etc.

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
This commit is contained in:
Connor Abbott
2019-05-10 10:18:12 +02:00
parent 27f0c3c15e
commit fd5ed6b9d6
3 changed files with 119 additions and 115 deletions

View File

@@ -3143,6 +3143,11 @@ void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
void nir_assign_io_var_locations(struct exec_list *var_list,
unsigned *size,
gl_shader_stage stage);
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
* interpolated as if with the "sample" qualifier. This requires

View File

@@ -970,3 +970,112 @@ nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
return progress;
}
/* TODO any better helper somewhere to sort a list? */
static void
insert_sorted(struct exec_list *var_list, nir_variable *new_var)
{
nir_foreach_variable(var, var_list) {
if (var->data.location > new_var->data.location) {
exec_node_insert_node_before(&var->node, &new_var->node);
return;
}
}
exec_list_push_tail(var_list, &new_var->node);
}
static void
sort_varyings(struct exec_list *var_list)
{
struct exec_list new_list;
exec_list_make_empty(&new_list);
nir_foreach_variable_safe(var, var_list) {
exec_node_remove(&var->node);
insert_sorted(&new_list, var);
}
exec_list_move_nodes_to(&new_list, var_list);
}
void
nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
gl_shader_stage stage)
{
unsigned location = 0;
unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
uint64_t processed_locs[2] = {0};
sort_varyings(var_list);
const int base = stage == MESA_SHADER_FRAGMENT ?
(int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
int UNUSED last_loc = 0;
nir_foreach_variable(var, var_list) {
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
unsigned var_size = glsl_count_attribute_slots(type, false);
/* Builtins don't allow component packing so we only need to worry about
* user defined varyings sharing the same location.
*/
bool processed = false;
if (var->data.location >= base) {
unsigned glsl_location = var->data.location - base;
for (unsigned i = 0; i < var_size; i++) {
if (processed_locs[var->data.index] &
((uint64_t)1 << (glsl_location + i)))
processed = true;
else
processed_locs[var->data.index] |=
((uint64_t)1 << (glsl_location + i));
}
}
/* Because component packing allows varyings to share the same location
* we may have already have processed this location.
*/
if (processed) {
unsigned driver_location = assigned_locations[var->data.location];
var->data.driver_location = driver_location;
*size += glsl_count_attribute_slots(type, false);
/* An array may be packed such that is crosses multiple other arrays
* or variables, we need to make sure we have allocated the elements
* consecutively if the previously proccessed var was shorter than
* the current array we are processing.
*
* NOTE: The code below assumes the var list is ordered in ascending
* location order.
*/
assert(last_loc <= var->data.location);
last_loc = var->data.location;
unsigned last_slot_location = driver_location + var_size;
if (last_slot_location > location) {
unsigned num_unallocated_slots = last_slot_location - location;
unsigned first_unallocated_slot = var_size - num_unallocated_slots;
for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
assigned_locations[var->data.location + i] = location;
location++;
}
}
continue;
}
for (unsigned i = 0; i < var_size; i++) {
assigned_locations[var->data.location + i] = location + i;
}
var->data.driver_location = location;
location += var_size;
}
*size += location;
}

View File

@@ -109,86 +109,6 @@ st_nir_assign_vs_in_locations(nir_shader *nir)
}
}
static void
st_nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
gl_shader_stage stage)
{
unsigned location = 0;
unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
uint64_t processed_locs[2] = {0};
const int base = stage == MESA_SHADER_FRAGMENT ?
(int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
int UNUSED last_loc = 0;
nir_foreach_variable(var, var_list) {
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
unsigned var_size = type_size(type);
/* Builtins don't allow component packing so we only need to worry about
* user defined varyings sharing the same location.
*/
bool processed = false;
if (var->data.location >= base) {
unsigned glsl_location = var->data.location - base;
for (unsigned i = 0; i < var_size; i++) {
if (processed_locs[var->data.index] &
((uint64_t)1 << (glsl_location + i)))
processed = true;
else
processed_locs[var->data.index] |=
((uint64_t)1 << (glsl_location + i));
}
}
/* Because component packing allows varyings to share the same location
* we may have already have processed this location.
*/
if (processed) {
unsigned driver_location = assigned_locations[var->data.location];
var->data.driver_location = driver_location;
*size += type_size(type);
/* An array may be packed such that is crosses multiple other arrays
* or variables, we need to make sure we have allocated the elements
* consecutively if the previously proccessed var was shorter than
* the current array we are processing.
*
* NOTE: The code below assumes the var list is ordered in ascending
* location order.
*/
assert(last_loc <= var->data.location);
last_loc = var->data.location;
unsigned last_slot_location = driver_location + var_size;
if (last_slot_location > location) {
unsigned num_unallocated_slots = last_slot_location - location;
unsigned first_unallocated_slot = var_size - num_unallocated_slots;
for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
assigned_locations[var->data.location + i] = location;
location++;
}
}
continue;
}
for (unsigned i = 0; i < var_size; i++) {
assigned_locations[var->data.location + i] = location + i;
}
var->data.driver_location = location;
location += var_size;
}
*size += location;
}
static int
st_nir_lookup_parameter_index(const struct gl_program_parameter_list *params,
const char *name)
@@ -550,32 +470,6 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
}
}
/* TODO any better helper somewhere to sort a list? */
static void
insert_sorted(struct exec_list *var_list, nir_variable *new_var)
{
nir_foreach_variable(var, var_list) {
if (var->data.location > new_var->data.location) {
exec_node_insert_node_before(&var->node, &new_var->node);
return;
}
}
exec_list_push_tail(var_list, &new_var->node);
}
static void
sort_varyings(struct exec_list *var_list)
{
struct exec_list new_list;
exec_list_make_empty(&new_list);
nir_foreach_variable_safe(var, var_list) {
exec_node_remove(&var->node);
insert_sorted(&new_list, var);
}
exec_list_move_nodes_to(&new_list, var_list);
}
static void
set_st_program(struct gl_program *prog,
struct gl_shader_program *shader_program,
@@ -914,32 +808,28 @@ st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
/* Re-lower global vars, to deal with any dead VS inputs. */
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
sort_varyings(&nir->outputs);
st_nir_assign_var_locations(&nir->outputs,
nir_assign_io_var_locations(&nir->outputs,
&nir->num_outputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL) {
sort_varyings(&nir->inputs);
st_nir_assign_var_locations(&nir->inputs,
nir_assign_io_var_locations(&nir->inputs,
&nir->num_inputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->inputs);
sort_varyings(&nir->outputs);
st_nir_assign_var_locations(&nir->outputs,
nir_assign_io_var_locations(&nir->outputs,
&nir->num_outputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
sort_varyings(&nir->inputs);
st_nir_assign_var_locations(&nir->inputs,
nir_assign_io_var_locations(&nir->inputs,
&nir->num_inputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->inputs);
st_nir_assign_var_locations(&nir->outputs,
nir_assign_io_var_locations(&nir->outputs,
&nir->num_outputs,
nir->info.stage);
} else if (nir->info.stage == MESA_SHADER_COMPUTE) {