anv: use shader_info->var_copies_lowered

Instead of passing allow_copies as a parameter for brw_nir_optimize
(so manually doing that tracking).

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19338>
This commit is contained in:
Alejandro Piñeiro
2022-06-26 01:31:13 +02:00
committed by Marge Bot
parent a12a71e6c0
commit ba0bc7182d
3 changed files with 19 additions and 20 deletions

View File

@@ -633,7 +633,7 @@ brw_nir_lower_fs_outputs(nir_shader *nir)
void
brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar, bool allow_copies)
bool is_scalar)
{
bool progress;
unsigned lower_flrp =
@@ -656,10 +656,10 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
if (OPT(nir_opt_memcpy))
OPT(nir_split_var_copies);
OPT(nir_lower_vars_to_ssa);
if (allow_copies) {
/* Only run this pass in the first call to brw_nir_optimize. Later
* calls assume that we've lowered away any copy_deref instructions
* and we don't want to introduce any more.
if (!nir->info.var_copies_lowered) {
/* Only run this pass if nir_lower_var_copies was not called
* yet. That would lower away any copy_deref instructions and we
* don't want to introduce any more.
*/
OPT(nir_opt_find_array_copies);
}
@@ -980,7 +980,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
OPT(nir_split_var_copies);
OPT(nir_split_struct_vars, nir_var_function_temp);
brw_nir_optimize(nir, compiler, is_scalar, true);
brw_nir_optimize(nir, compiler, is_scalar);
OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
if (OPT(nir_lower_int64)) {
@@ -1054,7 +1054,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
nir_lower_direct_array_deref_of_vec_load);
/* Get rid of split copies */
brw_nir_optimize(nir, compiler, is_scalar, false);
brw_nir_optimize(nir, compiler, is_scalar);
}
void
@@ -1090,12 +1090,12 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
if (p_is_scalar && c_is_scalar) {
NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
brw_nir_optimize(producer, compiler, p_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
brw_nir_optimize(producer, compiler, p_is_scalar);
brw_nir_optimize(consumer, compiler, c_is_scalar);
}
if (nir_link_opt_varyings(producer, consumer))
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar);
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
@@ -1124,8 +1124,8 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
brw_nir_no_indirect_mask(compiler, consumer->info.stage),
UINT32_MAX);
brw_nir_optimize(producer, compiler, p_is_scalar, false);
brw_nir_optimize(consumer, compiler, c_is_scalar, false);
brw_nir_optimize(producer, compiler, p_is_scalar);
brw_nir_optimize(consumer, compiler, c_is_scalar);
}
NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
@@ -1301,21 +1301,21 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage))
NIR_PASS(_, nir, brw_nir_lower_shading_rate_output);
brw_nir_optimize(nir, compiler, is_scalar, false);
brw_nir_optimize(nir, compiler, is_scalar);
if (is_scalar && nir_shader_has_local_variables(nir)) {
OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
glsl_get_natural_size_align_bytes);
OPT(nir_lower_explicit_io, nir_var_function_temp,
nir_address_format_32bit_offset);
brw_nir_optimize(nir, compiler, is_scalar, false);
brw_nir_optimize(nir, compiler, is_scalar);
}
brw_vectorize_lower_mem_access(nir, compiler, is_scalar,
robust_buffer_access);
if (OPT(nir_lower_int64))
brw_nir_optimize(nir, compiler, is_scalar, false);
brw_nir_optimize(nir, compiler, is_scalar);
if (devinfo->ver >= 6) {
/* Try and fuse multiply-adds */
@@ -1402,7 +1402,7 @@ brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
OPT(nir_lower_subgroups, &subgroups_options);
if (OPT(nir_lower_int64))
brw_nir_optimize(nir, compiler, is_scalar, false);
brw_nir_optimize(nir, compiler, is_scalar);
}
/* Clean up LCSSA phis */
@@ -1560,7 +1560,7 @@ brw_nir_apply_key(nir_shader *nir,
OPT(brw_nir_limit_trig_input_range_workaround);
if (progress)
brw_nir_optimize(nir, compiler, is_scalar, false);
brw_nir_optimize(nir, compiler, is_scalar);
}
enum brw_conditional_mod

View File

@@ -188,8 +188,7 @@ bool brw_nir_clamp_per_vertex_loads(nir_shader *shader,
void brw_nir_optimize(nir_shader *nir,
const struct brw_compiler *compiler,
bool is_scalar,
bool allow_copies);
bool is_scalar);
nir_shader *brw_nir_create_passthrough_tcs(void *mem_ctx,
const struct brw_compiler *compiler,

View File

@@ -534,7 +534,7 @@ brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
brw_nir_optimize(nir, compiler, true, false);
brw_nir_optimize(nir, compiler, true);
return nir;
}