bi: Use nir_opt_load_store_vectorize

Ensure we vectorize load/store when possible.
Also move lower pack after loop optimization.

This drastically reduce the shader size of
"dEQP-VK.graphicsfuzz.spv-stable-maze-flatten-copy-composite" and allow
it to pass instead of timing out but it might greatly help others.

Signed-off-by: Mary Guillemard <mary.guillemard@collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30484>
This commit is contained in:
Mary Guillemard
2024-08-02 09:42:32 +02:00
parent 33fef27356
commit 3904cfabd6

View File

@@ -4530,11 +4530,28 @@ mem_access_size_align_cb(nir_intrinsic_op intrin, uint8_t bytes,
};
}
static bool
mem_vectorize_cb(unsigned align_mul, unsigned align_offset, unsigned bit_size,
unsigned num_components, nir_intrinsic_instr *low,
nir_intrinsic_instr *high, void *data)
{
/* Must be aligned to the size of the load */
unsigned align = nir_combined_align(align_mul, align_offset);
if ((bit_size / 8) > align)
return false;
if (num_components > 4)
return false;
if (bit_size > 32)
return false;
return true;
}
static void
bi_optimize_nir(nir_shader *nir, unsigned gpu_id, bool is_blend)
{
NIR_PASS(_, nir, nir_lower_pack);
bool progress;
do {
@@ -4559,6 +4576,14 @@ bi_optimize_nir(nir_shader *nir, unsigned gpu_id, bool is_blend)
NIR_PASS(progress, nir, nir_opt_loop_unroll);
} while (progress);
NIR_PASS(
progress, nir, nir_opt_load_store_vectorize,
&(const nir_load_store_vectorize_options){
.modes = nir_var_mem_global | nir_var_mem_shared | nir_var_shader_temp,
.callback = mem_vectorize_cb,
});
NIR_PASS(progress, nir, nir_lower_pack);
/* TODO: Why is 64-bit getting rematerialized?
* KHR-GLES31.core.shader_image_load_store.basic-allTargets-atomicFS */
NIR_PASS(progress, nir, nir_lower_int64);