intel/nir: Stop adding redundant barriers
Now that both GLSL and SPIR-V are adding shared and tcs_patch barriers (as appropreate) prior to the nir_intrinsic_barrier, we don't need to do it ourselves in the back-end. This reverts commit 26e950a5de01564e3b5f2148ae994454ae5205fe. Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3307>
This commit is contained in:

committed by
Marge Bot

parent
ba43b66dc9
commit
bd3ab75aef
@@ -55,20 +55,6 @@ lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
|
||||
|
||||
nir_ssa_def *sysval;
|
||||
switch (intrinsic->intrinsic) {
|
||||
case nir_intrinsic_barrier: {
|
||||
/* Our HW barrier instruction doesn't do a memory barrier for us but
|
||||
* the GLSL barrier() intrinsic does for shared memory. Insert a
|
||||
* shared memory barrier before every barrier().
|
||||
*/
|
||||
b->cursor = nir_before_instr(&intrinsic->instr);
|
||||
|
||||
nir_intrinsic_instr *shared_barrier =
|
||||
nir_intrinsic_instr_create(b->shader,
|
||||
nir_intrinsic_memory_barrier_shared);
|
||||
nir_builder_instr_insert(b, &shared_barrier->instr);
|
||||
continue;
|
||||
}
|
||||
|
||||
case nir_intrinsic_load_local_invocation_index:
|
||||
case nir_intrinsic_load_local_invocation_id: {
|
||||
/* First time we are using those, so let's calculate them. */
|
||||
|
Reference in New Issue
Block a user