treewide: Avoid nir_lower_regs_to_ssa calls

nir_registers are only supposed to be used temporarily. They may be created by a
producer, but then must be immediately lowered prior to optimizing the produced
shader. They may be created internally by an optimization pass that doesn't want
to deal with phis, but that pass needs to lower them back to phis immediately.
Finally they may be created when going out-of-SSA if a backend chooses, but that
has to happen late.

Regardless, there should be no case where a backend sees a shader that comes in
with nir_registers needing to be lowered. The two frontend producers of
registers (tgsi_to_nir and mesa/st) both call nir_lower_regs_to_ssa to clean up
as they should. Some backend (like intel) already depend on this behaviour.
There's no need for other backends to call nir_lower_regs_to_ssa too.

Drop the pointless calls as a baby step towards replacing nir_register.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23181>
This commit is contained in:
Alyssa Rosenzweig
2023-05-23 10:10:47 -04:00
committed by Marge Bot
parent 6875f97618
commit ecd295bb8b
14 changed files with 0 additions and 16 deletions

View File

@@ -2352,7 +2352,6 @@ agx_preprocess_nir(nir_shader *nir, bool support_lod_bias)
.allow_fp16 = true,
};
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, nir_lower_idiv, &idiv_options);
NIR_PASS_V(nir, nir_lower_frexp);
NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);

View File

@@ -356,7 +356,6 @@ ir3_finalize_nir(struct ir3_compiler *compiler, nir_shader *s)
NIR_PASS_V(s, nir_lower_frexp);
NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
OPT_V(s, nir_lower_regs_to_ssa);
OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
OPT_V(s, nir_lower_tex, &tex_options);

View File

@@ -3774,7 +3774,6 @@ const void *nir_to_tgsi_options(struct nir_shader *s,
NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size, (nir_lower_io_options)0);
NIR_PASS_V(s, nir_lower_regs_to_ssa);
nir_to_tgsi_lower_txp(s);
NIR_PASS_V(s, nir_to_tgsi_lower_tex);

View File

@@ -1124,7 +1124,6 @@ etna_compile_shader(struct etna_shader_variant *v)
NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_uniform, etna_glsl_type_size,
(nir_lower_io_options)0);
NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_lower_vars_to_ssa);
NIR_PASS_V(s, nir_lower_indirect_derefs, nir_var_all, UINT32_MAX);
NIR_PASS_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u, .lower_invalid_implicit_lod = true, });

View File

@@ -121,7 +121,6 @@ ir2_optimize_nir(nir_shader *s, bool lower)
debug_printf("----------------------\n");
}
OPT_V(s, nir_lower_regs_to_ssa);
OPT_V(s, nir_lower_vars_to_ssa);
OPT_V(s, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out,
UINT32_MAX);

View File

@@ -227,7 +227,6 @@ lima_program_optimize_fs_nir(struct nir_shader *s,
NIR_PASS_V(s, nir_lower_fragcoord_wtrans);
NIR_PASS_V(s, nir_lower_io,
nir_var_shader_in | nir_var_shader_out, type_size, 0);
NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_lower_tex, tex_options);
NIR_PASS_V(s, lima_nir_lower_txp);

View File

@@ -187,7 +187,6 @@ int r600_pipe_shader_create(struct pipe_context *ctx,
sel->nir = tgsi_to_nir(sel->tokens, ctx->screen, true);
/* Lower int64 ops because we have some r600 built-in shaders that use it */
if (nir_options->lower_int64_options) {
NIR_PASS_V(sel->nir, nir_lower_regs_to_ssa);
NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS_V(sel->nir, nir_lower_int64);
}

View File

@@ -756,7 +756,6 @@ r600_finalize_nir(pipe_screen *screen, void *shader)
nir_shader *nir = (nir_shader *)shader;
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
const int nir_lower_flrp_mask = 16 | 32 | 64;
NIR_PASS_V(nir, nir_lower_flrp, nir_lower_flrp_mask, false);

View File

@@ -313,7 +313,6 @@ v3d_uncompiled_shader_create(struct pipe_context *pctx,
type_size, (nir_lower_io_options)0);
}
NIR_PASS(_, s, nir_lower_regs_to_ssa);
NIR_PASS(_, s, nir_normalize_cubemap_coords);
NIR_PASS(_, s, nir_lower_load_const_to_scalar);

View File

@@ -2536,7 +2536,6 @@ vc4_shader_state_create(struct pipe_context *pctx,
nir_var_shader_in | nir_var_shader_out | nir_var_uniform,
type_size, (nir_lower_io_options)0);
NIR_PASS_V(s, nir_lower_regs_to_ssa);
NIR_PASS_V(s, nir_normalize_cubemap_coords);
NIR_PASS_V(s, nir_lower_load_const_to_scalar);

View File

@@ -4835,7 +4835,6 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
NIR_PASS_V(nir, fixup_io_locations);
NIR_PASS_V(nir, lower_basevertex);
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, lower_baseinstance);
NIR_PASS_V(nir, lower_sparse);
NIR_PASS_V(nir, split_bitfields);
@@ -5287,7 +5286,6 @@ zink_shader_tcs_create(struct zink_screen *screen, nir_shader *tes, unsigned ver
nir->info.tess.tcs_vertices_out = vertices_per_patch;
nir_validate_shader(nir, "created");
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
optimize_nir(nir, NULL);
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS_V(nir, nir_convert_from_ssa, true);

View File

@@ -3194,7 +3194,6 @@ Converter::run()
/* prepare for IO lowering */
NIR_PASS_V(nir, nir_lower_flrp, lower_flrp, false);
NIR_PASS_V(nir, nir_opt_deref);
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
/* codegen assumes vec4 alignment for memory */

View File

@@ -4736,7 +4736,6 @@ bifrost_preprocess_nir(nir_shader *nir, unsigned gpu_id)
NIR_PASS_V(nir, pan_lower_sample_pos);
NIR_PASS_V(nir, nir_lower_bit_size, bi_lower_bit_size, NULL);
NIR_PASS_V(nir, nir_lower_64bit_phis);
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
NIR_PASS_V(nir, pan_nir_lower_64bit_intrin);
NIR_PASS_V(nir, pan_lower_helper_invocation);
NIR_PASS_V(nir, nir_lower_int64);

View File

@@ -369,10 +369,8 @@ midgard_preprocess_nir(nir_shader *nir, unsigned gpu_id)
NIR_PASS_V(nir, pan_nir_lower_64bit_intrin);
NIR_PASS_V(nir, nir_lower_frexp);
NIR_PASS_V(nir, midgard_nir_lower_global_load);
NIR_PASS_V(nir, nir_lower_regs_to_ssa);
nir_lower_idiv_options idiv_options = {
.allow_fp16 = true,
};