iris: Calculate uses_atomic_load_store after all lowering
The lowering passes will soon be moved to another function, so there won't be any choice. As a side benefit, this allows eliminating the uses_atomic_load_store **pointer** parameter from brw_nir_lower_storage_image. For some reason crocus was passing false instead of NULL. Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12858>
This commit is contained in:
@@ -2683,7 +2683,7 @@ crocus_create_uncompiled_shader(struct pipe_context *ctx,
|
||||
|
||||
brw_preprocess_nir(screen->compiler, nir, NULL);
|
||||
|
||||
NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo, false);
|
||||
NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo);
|
||||
NIR_PASS_V(nir, crocus_lower_storage_image_derefs);
|
||||
|
||||
nir_sweep(nir);
|
||||
|
@@ -300,6 +300,55 @@ iris_lower_storage_image_derefs(nir_shader *nir)
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
iris_uses_image_atomic(const nir_shader *shader)
|
||||
{
|
||||
nir_foreach_function(function, shader) {
|
||||
if (function->impl == NULL)
|
||||
continue;
|
||||
|
||||
nir_foreach_block(block, function->impl) {
|
||||
nir_foreach_instr(instr, block) {
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
continue;
|
||||
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
switch (intrin->intrinsic) {
|
||||
case nir_intrinsic_image_deref_atomic_add:
|
||||
case nir_intrinsic_image_deref_atomic_imin:
|
||||
case nir_intrinsic_image_deref_atomic_umin:
|
||||
case nir_intrinsic_image_deref_atomic_imax:
|
||||
case nir_intrinsic_image_deref_atomic_umax:
|
||||
case nir_intrinsic_image_deref_atomic_and:
|
||||
case nir_intrinsic_image_deref_atomic_or:
|
||||
case nir_intrinsic_image_deref_atomic_xor:
|
||||
case nir_intrinsic_image_deref_atomic_exchange:
|
||||
case nir_intrinsic_image_deref_atomic_comp_swap:
|
||||
unreachable("Should have been lowered in "
|
||||
"iris_lower_storage_image_derefs");
|
||||
|
||||
case nir_intrinsic_image_atomic_add:
|
||||
case nir_intrinsic_image_atomic_imin:
|
||||
case nir_intrinsic_image_atomic_umin:
|
||||
case nir_intrinsic_image_atomic_imax:
|
||||
case nir_intrinsic_image_atomic_umax:
|
||||
case nir_intrinsic_image_atomic_and:
|
||||
case nir_intrinsic_image_atomic_or:
|
||||
case nir_intrinsic_image_atomic_xor:
|
||||
case nir_intrinsic_image_atomic_exchange:
|
||||
case nir_intrinsic_image_atomic_comp_swap:
|
||||
return true;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
|
||||
*/
|
||||
@@ -2395,12 +2444,13 @@ iris_create_uncompiled_shader(struct iris_screen *screen,
|
||||
|
||||
brw_preprocess_nir(screen->compiler, nir, NULL);
|
||||
|
||||
NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo,
|
||||
&ish->uses_atomic_load_store);
|
||||
NIR_PASS_V(nir, brw_nir_lower_storage_image, devinfo);
|
||||
NIR_PASS_V(nir, iris_lower_storage_image_derefs);
|
||||
|
||||
nir_sweep(nir);
|
||||
|
||||
ish->uses_atomic_load_store = iris_uses_image_atomic(nir);
|
||||
|
||||
ish->program_id = get_new_program_id(screen);
|
||||
ish->nir = nir;
|
||||
if (so_info) {
|
||||
|
@@ -123,8 +123,7 @@ bool brw_nir_lower_conversions(nir_shader *nir);
|
||||
bool brw_nir_lower_scoped_barriers(nir_shader *nir);
|
||||
|
||||
bool brw_nir_lower_storage_image(nir_shader *nir,
|
||||
const struct intel_device_info *devinfo,
|
||||
bool *uses_atomic_load_store);
|
||||
const struct intel_device_info *devinfo);
|
||||
void brw_nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin,
|
||||
nir_ssa_def *index);
|
||||
void brw_nir_rewrite_bindless_image_intrinsic(nir_intrinsic_instr *intrin,
|
||||
|
@@ -671,8 +671,7 @@ lower_image_size_instr(nir_builder *b,
|
||||
|
||||
bool
|
||||
brw_nir_lower_storage_image(nir_shader *shader,
|
||||
const struct intel_device_info *devinfo,
|
||||
bool *uses_atomic_load_store)
|
||||
const struct intel_device_info *devinfo)
|
||||
{
|
||||
bool progress = false;
|
||||
|
||||
@@ -716,8 +715,6 @@ brw_nir_lower_storage_image(nir_shader *shader,
|
||||
case nir_intrinsic_image_deref_atomic_xor:
|
||||
case nir_intrinsic_image_deref_atomic_exchange:
|
||||
case nir_intrinsic_image_deref_atomic_comp_swap:
|
||||
if (uses_atomic_load_store)
|
||||
*uses_atomic_load_store = true;
|
||||
if (lower_image_atomic_instr(&b, devinfo, intrin))
|
||||
impl_progress = true;
|
||||
break;
|
||||
|
@@ -833,7 +833,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
|
||||
|
||||
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
|
||||
|
||||
NIR_PASS_V(nir, brw_nir_lower_storage_image, compiler->devinfo, NULL);
|
||||
NIR_PASS_V(nir, brw_nir_lower_storage_image, compiler->devinfo);
|
||||
|
||||
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global,
|
||||
nir_address_format_64bit_global);
|
||||
|
@@ -186,7 +186,7 @@ brw_nir_lower_resources(nir_shader *nir, struct gl_shader_program *shader_prog,
|
||||
BITSET_COPY(prog->info.textures_used, prog->nir->info.textures_used);
|
||||
BITSET_COPY(prog->info.textures_used_by_txf, prog->nir->info.textures_used_by_txf);
|
||||
|
||||
NIR_PASS_V(prog->nir, brw_nir_lower_storage_image, devinfo, NULL);
|
||||
NIR_PASS_V(prog->nir, brw_nir_lower_storage_image, devinfo);
|
||||
|
||||
if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&
|
||||
shader_prog->data->spirv) {
|
||||
|
Reference in New Issue
Block a user