nir/opt_shrink_vectors: add option to skip shrinking image stores
Some games declare the wrong format, so we might want to disable this
optimization in that case.
Signed-off-by: Rhys Perry <pendingchaos02@gmail.com>
Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Fixes: e4d75c22
("nir/opt_shrink_vectors: shrink image stores using the format")
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9229>
This commit is contained in:
@@ -2359,7 +2359,7 @@ radv_link_shaders(struct radv_pipeline *pipeline, nir_shader **shaders,
|
||||
if (nir_lower_io_to_scalar_early(ordered_shaders[i], mask)) {
|
||||
/* Optimize the new vector code and then remove dead vars */
|
||||
nir_copy_prop(ordered_shaders[i]);
|
||||
nir_opt_shrink_vectors(ordered_shaders[i]);
|
||||
nir_opt_shrink_vectors(ordered_shaders[i], true);
|
||||
|
||||
if (ordered_shaders[i]->info.stage != last) {
|
||||
/* Optimize swizzled movs of load_const for
|
||||
@@ -3351,7 +3351,7 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline,
|
||||
|
||||
radv_lower_io(device, nir[i]);
|
||||
|
||||
lower_to_scalar |= nir_opt_shrink_vectors(nir[i]);
|
||||
lower_to_scalar |= nir_opt_shrink_vectors(nir[i], true);
|
||||
|
||||
if (lower_to_scalar)
|
||||
nir_lower_alu_to_scalar(nir[i], NULL, NULL);
|
||||
|
@@ -243,7 +243,7 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
|
||||
}
|
||||
|
||||
NIR_PASS(progress, shader, nir_opt_undef);
|
||||
NIR_PASS(progress, shader, nir_opt_shrink_vectors);
|
||||
NIR_PASS(progress, shader, nir_opt_shrink_vectors, true);
|
||||
if (shader->options->max_unroll_iterations) {
|
||||
NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
|
||||
}
|
||||
|
@@ -5105,7 +5105,7 @@ bool nir_opt_rematerialize_compares(nir_shader *shader);
|
||||
bool nir_opt_remove_phis(nir_shader *shader);
|
||||
bool nir_opt_remove_phis_block(nir_block *block);
|
||||
|
||||
bool nir_opt_shrink_vectors(nir_shader *shader);
|
||||
bool nir_opt_shrink_vectors(nir_shader *shader, bool shrink_image_store);
|
||||
|
||||
bool nir_opt_trivial_continues(nir_shader *shader);
|
||||
|
||||
|
@@ -134,7 +134,7 @@ opt_shrink_vectors_image_store(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
}
|
||||
|
||||
static bool
|
||||
opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr, bool shrink_image_store)
|
||||
{
|
||||
switch (instr->intrinsic) {
|
||||
case nir_intrinsic_load_uniform:
|
||||
@@ -160,7 +160,7 @@ opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
case nir_intrinsic_bindless_image_store:
|
||||
case nir_intrinsic_image_deref_store:
|
||||
case nir_intrinsic_image_store:
|
||||
return opt_shrink_vectors_image_store(b, instr);
|
||||
return shrink_image_store && opt_shrink_vectors_image_store(b, instr);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@@ -209,7 +209,7 @@ opt_shrink_vectors_ssa_undef(nir_ssa_undef_instr *instr)
|
||||
}
|
||||
|
||||
static bool
|
||||
opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr)
|
||||
opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr, bool shrink_image_store)
|
||||
{
|
||||
b->cursor = nir_before_instr(instr);
|
||||
|
||||
@@ -218,7 +218,7 @@ opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr)
|
||||
return opt_shrink_vectors_alu(b, nir_instr_as_alu(instr));
|
||||
|
||||
case nir_instr_type_intrinsic:
|
||||
return opt_shrink_vectors_intrinsic(b, nir_instr_as_intrinsic(instr));
|
||||
return opt_shrink_vectors_intrinsic(b, nir_instr_as_intrinsic(instr), shrink_image_store);
|
||||
|
||||
case nir_instr_type_load_const:
|
||||
return opt_shrink_vectors_load_const(nir_instr_as_load_const(instr));
|
||||
@@ -234,7 +234,7 @@ opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr)
|
||||
}
|
||||
|
||||
bool
|
||||
nir_opt_shrink_vectors(nir_shader *shader)
|
||||
nir_opt_shrink_vectors(nir_shader *shader, bool shrink_image_store)
|
||||
{
|
||||
bool progress = false;
|
||||
|
||||
@@ -247,7 +247,7 @@ nir_opt_shrink_vectors(nir_shader *shader)
|
||||
|
||||
nir_foreach_block(block, function->impl) {
|
||||
nir_foreach_instr(instr, block) {
|
||||
progress |= opt_shrink_vectors_instr(&b, instr);
|
||||
progress |= opt_shrink_vectors_instr(&b, instr, shrink_image_store);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -2428,7 +2428,7 @@ ntt_optimize_nir(struct nir_shader *s, struct pipe_screen *screen)
|
||||
.robust_modes = 0,
|
||||
};
|
||||
NIR_PASS(progress, s, nir_opt_load_store_vectorize, &vectorize_opts);
|
||||
NIR_PASS(progress, s, nir_opt_shrink_vectors);
|
||||
NIR_PASS(progress, s, nir_opt_shrink_vectors, true);
|
||||
NIR_PASS(progress, s, nir_opt_trivial_continues);
|
||||
NIR_PASS(progress, s, nir_opt_vectorize, ntt_should_vectorize_instr, NULL);
|
||||
NIR_PASS(progress, s, nir_opt_undef);
|
||||
|
@@ -146,7 +146,7 @@ etna_optimize_loop(nir_shader *s)
|
||||
|
||||
NIR_PASS_V(s, nir_lower_vars_to_ssa);
|
||||
progress |= OPT(s, nir_opt_copy_prop_vars);
|
||||
progress |= OPT(s, nir_opt_shrink_vectors);
|
||||
progress |= OPT(s, nir_opt_shrink_vectors, true);
|
||||
progress |= OPT(s, nir_copy_prop);
|
||||
progress |= OPT(s, nir_opt_dce);
|
||||
progress |= OPT(s, nir_opt_cse);
|
||||
|
@@ -594,7 +594,7 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
|
||||
if (is_scalar) {
|
||||
OPT(nir_lower_alu_to_scalar, NULL, NULL);
|
||||
} else {
|
||||
OPT(nir_opt_shrink_vectors);
|
||||
OPT(nir_opt_shrink_vectors, true);
|
||||
}
|
||||
|
||||
OPT(nir_copy_prop);
|
||||
|
Reference in New Issue
Block a user