amd: Swap from nir_opt_shrink_load() to nir_opt_shrink_vectors().

This should do much more trimming than shrink_load, and is a win on i965's
vec4 and nir-to-tgsi.  For scalar backends like this that don't need ALU
shrinking, it still gets more load intrinsics covered.

Reviewed-by: Rhys Perry <pendingchaos02@gmail.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6050>
This commit is contained in:
Eric Anholt
2020-07-22 22:00:57 -07:00
committed by Marge Bot
parent 023e6669cc
commit d8c2f896db
2 changed files with 3 additions and 2 deletions

View File

@@ -1344,6 +1344,8 @@ setup_nir(isel_context *ctx, nir_shader *nir)
if (nir->info.stage != MESA_SHADER_COMPUTE)
nir_lower_io(nir, (nir_variable_mode)(nir_var_shader_in | nir_var_shader_out), type_size, (nir_lower_io_options)0);
lower_to_scalar |= nir_opt_shrink_vectors(nir);
if (lower_to_scalar)
nir_lower_alu_to_scalar(nir, NULL, NULL);
if (lower_pack)
@@ -1385,7 +1387,6 @@ setup_nir(isel_context *ctx, nir_shader *nir)
/* cleanup passes */
nir_lower_load_const_to_scalar(nir);
nir_opt_shrink_load(nir);
nir_move_options move_opts = (nir_move_options)(
nir_move_const_undef | nir_move_load_ubo | nir_move_load_input |
nir_move_comparisons | nir_move_copies);

View File

@@ -284,7 +284,7 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
} while (progress && !optimize_conservatively);
NIR_PASS(progress, shader, nir_opt_conditional_discard);
NIR_PASS(progress, shader, nir_opt_shrink_load);
NIR_PASS(progress, shader, nir_opt_shrink_vectors);
NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
}