intel/compiler: Use gl_shader_stage_uses_workgroup() helpers

Instead of checking for MESA_SHADER_COMPUTE (and KERNEL).  Where
appropriate, also use gl_shader_stage_is_compute().

This allows most of the workgroup-related lowering to be applied to
Task and Mesh shaders.  These will be added later and "inherit" from
cs_prog_data structure.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13629>
This commit is contained in:
Caio Oliveira
2021-05-18 10:01:49 -07:00
parent c4355d3f24
commit 858424bd2e
6 changed files with 16 additions and 13 deletions

View File

@@ -1451,7 +1451,7 @@ DEFINE_PROG_DATA_DOWNCAST(tcs, prog_data->stage == MESA_SHADER_TESS_CTRL)
DEFINE_PROG_DATA_DOWNCAST(tes, prog_data->stage == MESA_SHADER_TESS_EVAL) DEFINE_PROG_DATA_DOWNCAST(tes, prog_data->stage == MESA_SHADER_TESS_EVAL)
DEFINE_PROG_DATA_DOWNCAST(gs, prog_data->stage == MESA_SHADER_GEOMETRY) DEFINE_PROG_DATA_DOWNCAST(gs, prog_data->stage == MESA_SHADER_GEOMETRY)
DEFINE_PROG_DATA_DOWNCAST(wm, prog_data->stage == MESA_SHADER_FRAGMENT) DEFINE_PROG_DATA_DOWNCAST(wm, prog_data->stage == MESA_SHADER_FRAGMENT)
DEFINE_PROG_DATA_DOWNCAST(cs, prog_data->stage == MESA_SHADER_COMPUTE) DEFINE_PROG_DATA_DOWNCAST(cs, gl_shader_stage_uses_workgroup(prog_data->stage))
DEFINE_PROG_DATA_DOWNCAST(bs, brw_shader_stage_is_bindless(prog_data->stage)) DEFINE_PROG_DATA_DOWNCAST(bs, brw_shader_stage_is_bindless(prog_data->stage))
DEFINE_PROG_DATA_DOWNCAST(vue, prog_data->stage == MESA_SHADER_VERTEX || DEFINE_PROG_DATA_DOWNCAST(vue, prog_data->stage == MESA_SHADER_VERTEX ||

View File

@@ -10436,7 +10436,7 @@ brw_fs_test_dispatch_packing(const fs_builder &bld)
unsigned unsigned
fs_visitor::workgroup_size() const fs_visitor::workgroup_size() const
{ {
assert(stage == MESA_SHADER_COMPUTE); assert(gl_shader_stage_uses_workgroup(stage));
const struct brw_cs_prog_data *cs = brw_cs_prog_data(prog_data); const struct brw_cs_prog_data *cs = brw_cs_prog_data(prog_data);
return cs->local_size[0] * cs->local_size[1] * cs->local_size[2]; return cs->local_size[0] * cs->local_size[1] * cs->local_size[2];
} }

View File

@@ -192,8 +192,7 @@ emit_system_values_block(nir_block *block, fs_visitor *v)
break; break;
case nir_intrinsic_load_workgroup_id: case nir_intrinsic_load_workgroup_id:
assert(v->stage == MESA_SHADER_COMPUTE || assert(gl_shader_stage_uses_workgroup(v->stage));
v->stage == MESA_SHADER_KERNEL);
reg = &v->nir_system_values[SYSTEM_VALUE_WORKGROUP_ID]; reg = &v->nir_system_values[SYSTEM_VALUE_WORKGROUP_ID];
if (reg->file == BAD_FILE) if (reg->file == BAD_FILE)
*reg = *v->emit_cs_work_group_id_setup(); *reg = *v->emit_cs_work_group_id_setup();
@@ -3802,7 +3801,7 @@ void
fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld, fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
nir_intrinsic_instr *instr) nir_intrinsic_instr *instr)
{ {
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL); assert(gl_shader_stage_uses_workgroup(stage));
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data); struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
fs_reg dest; fs_reg dest;
@@ -3885,7 +3884,6 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
case nir_intrinsic_load_shared: { case nir_intrinsic_load_shared: {
assert(devinfo->ver >= 7); assert(devinfo->ver >= 7);
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
const unsigned bit_size = nir_dest_bit_size(instr->dest); const unsigned bit_size = nir_dest_bit_size(instr->dest);
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
@@ -3922,7 +3920,6 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
case nir_intrinsic_store_shared: { case nir_intrinsic_store_shared: {
assert(devinfo->ver >= 7); assert(devinfo->ver >= 7);
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL);
const unsigned bit_size = nir_src_bit_size(instr->src[0]); const unsigned bit_size = nir_src_bit_size(instr->src[0]);
fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS]; fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
@@ -3959,8 +3956,12 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
} }
case nir_intrinsic_load_workgroup_size: { case nir_intrinsic_load_workgroup_size: {
assert(compiler->lower_variable_group_size); /* For non-variable case, this should've been lowered already. */
assert(nir->info.workgroup_size_variable); assert(nir->info.workgroup_size_variable);
assert(compiler->lower_variable_group_size);
assert(gl_shader_stage_is_compute(stage));
for (unsigned i = 0; i < 3; i++) { for (unsigned i = 0; i < 3; i++) {
bld.MOV(retype(offset(dest, bld, i), BRW_REGISTER_TYPE_UD), bld.MOV(retype(offset(dest, bld, i), BRW_REGISTER_TYPE_UD),
group_size[i]); group_size[i]);

View File

@@ -1047,7 +1047,7 @@ void
fs_visitor::emit_barrier() fs_visitor::emit_barrier()
{ {
/* We are getting the barrier ID from the compute shader header */ /* We are getting the barrier ID from the compute shader header */
assert(stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL); assert(gl_shader_stage_uses_workgroup(stage));
fs_reg payload = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD); fs_reg payload = fs_reg(VGRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
@@ -1062,6 +1062,8 @@ fs_visitor::emit_barrier()
0, 1, 0); 0, 1, 0);
bld.exec_all().group(2, 0).MOV(m0_10ub, r0_11ub); bld.exec_all().group(2, 0).MOV(m0_10ub, r0_11ub);
} else { } else {
assert(gl_shader_stage_is_compute(stage));
uint32_t barrier_id_mask; uint32_t barrier_id_mask;
switch (devinfo->ver) { switch (devinfo->ver) {
case 7: case 7:

View File

@@ -1285,7 +1285,7 @@ get_subgroup_size(gl_shader_stage stage,
case BRW_SUBGROUP_SIZE_REQUIRE_8: case BRW_SUBGROUP_SIZE_REQUIRE_8:
case BRW_SUBGROUP_SIZE_REQUIRE_16: case BRW_SUBGROUP_SIZE_REQUIRE_16:
case BRW_SUBGROUP_SIZE_REQUIRE_32: case BRW_SUBGROUP_SIZE_REQUIRE_32:
assert(stage == MESA_SHADER_COMPUTE); assert(gl_shader_stage_uses_workgroup(stage));
/* These enum values are expressly chosen to be equal to the subgroup /* These enum values are expressly chosen to be equal to the subgroup
* size that they require. * size that they require.
*/ */

View File

@@ -265,15 +265,15 @@ lower_cs_intrinsics_convert_impl(struct lower_intrinsics_state *state)
bool bool
brw_nir_lower_cs_intrinsics(nir_shader *nir) brw_nir_lower_cs_intrinsics(nir_shader *nir)
{ {
assert(nir->info.stage == MESA_SHADER_COMPUTE || assert(gl_shader_stage_uses_workgroup(nir->info.stage));
nir->info.stage == MESA_SHADER_KERNEL);
struct lower_intrinsics_state state = { struct lower_intrinsics_state state = {
.nir = nir, .nir = nir,
}; };
/* Constraints from NV_compute_shader_derivatives. */ /* Constraints from NV_compute_shader_derivatives. */
if (!nir->info.workgroup_size_variable) { if (gl_shader_stage_is_compute(nir->info.stage) &&
!nir->info.workgroup_size_variable) {
if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) { if (nir->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
assert(nir->info.workgroup_size[0] % 2 == 0); assert(nir->info.workgroup_size[0] % 2 == 0);
assert(nir->info.workgroup_size[1] % 2 == 0); assert(nir->info.workgroup_size[1] % 2 == 0);