ac/radv: move lower_indirect_derefs() to ac_nir_to_llvm.c

Until llvm handles indirects better we will need to use these
workarounds in the radeonsi backend also.

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
This commit is contained in:
Timothy Arceri
2018-03-05 11:13:11 +11:00
parent eea20d59ab
commit 0f2c7341e8
5 changed files with 44 additions and 48 deletions

View File

@@ -7312,3 +7312,40 @@ void ac_create_gs_copy_shader(LLVMTargetMachineRef tm,
MESA_SHADER_VERTEX, MESA_SHADER_VERTEX,
dump_shader, options->supports_spill); dump_shader, options->supports_spill);
} }
void
ac_lower_indirect_derefs(struct nir_shader *nir, enum chip_class chip_class)
{
/* While it would be nice not to have this flag, we are constrained
* by the reality that LLVM 5.0 doesn't have working VGPR indexing
* on GFX9.
*/
bool llvm_has_working_vgpr_indexing = chip_class <= VI;
/* TODO: Indirect indexing of GS inputs is unimplemented.
*
* TCS and TES load inputs directly from LDS or offchip memory, so
* indirect indexing is trivial.
*/
nir_variable_mode indirect_mask = 0;
if (nir->info.stage == MESA_SHADER_GEOMETRY ||
(nir->info.stage != MESA_SHADER_TESS_CTRL &&
nir->info.stage != MESA_SHADER_TESS_EVAL &&
!llvm_has_working_vgpr_indexing)) {
indirect_mask |= nir_var_shader_in;
}
if (!llvm_has_working_vgpr_indexing &&
nir->info.stage != MESA_SHADER_TESS_CTRL)
indirect_mask |= nir_var_shader_out;
/* TODO: We shouldn't need to do this, however LLVM isn't currently
* smart enough to handle indirects without causing excess spilling
* causing the gpu to hang.
*
* See the following thread for more details of the problem:
* https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
*/
indirect_mask |= nir_var_local;
nir_lower_indirect_derefs(nir, indirect_mask);
}

View File

@@ -229,6 +229,8 @@ void ac_create_gs_copy_shader(LLVMTargetMachineRef tm,
const struct ac_nir_compiler_options *options, const struct ac_nir_compiler_options *options,
bool dump_shader); bool dump_shader);
void ac_lower_indirect_derefs(struct nir_shader *nir, enum chip_class);
void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi, void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
struct nir_shader *nir); struct nir_shader *nir);

View File

@@ -1527,14 +1527,14 @@ radv_link_shaders(struct radv_pipeline *pipeline, nir_shader **shaders)
if (progress) { if (progress) {
if (nir_lower_global_vars_to_local(ordered_shaders[i])) { if (nir_lower_global_vars_to_local(ordered_shaders[i])) {
radv_lower_indirect_derefs(ordered_shaders[i], ac_lower_indirect_derefs(ordered_shaders[i],
pipeline->device->physical_device); pipeline->device->physical_device->rad_info.chip_class);
} }
radv_optimize_nir(ordered_shaders[i]); radv_optimize_nir(ordered_shaders[i]);
if (nir_lower_global_vars_to_local(ordered_shaders[i - 1])) { if (nir_lower_global_vars_to_local(ordered_shaders[i - 1])) {
radv_lower_indirect_derefs(ordered_shaders[i - 1], ac_lower_indirect_derefs(ordered_shaders[i - 1],
pipeline->device->physical_device); pipeline->device->physical_device->rad_info.chip_class);
} }
radv_optimize_nir(ordered_shaders[i - 1]); radv_optimize_nir(ordered_shaders[i - 1]);
} }

View File

@@ -115,45 +115,6 @@ void radv_DestroyShaderModule(
vk_free2(&device->alloc, pAllocator, module); vk_free2(&device->alloc, pAllocator, module);
} }
bool
radv_lower_indirect_derefs(struct nir_shader *nir,
struct radv_physical_device *device)
{
/* While it would be nice not to have this flag, we are constrained
* by the reality that LLVM 5.0 doesn't have working VGPR indexing
* on GFX9.
*/
bool llvm_has_working_vgpr_indexing =
device->rad_info.chip_class <= VI;
/* TODO: Indirect indexing of GS inputs is unimplemented.
*
* TCS and TES load inputs directly from LDS or offchip memory, so
* indirect indexing is trivial.
*/
nir_variable_mode indirect_mask = 0;
if (nir->info.stage == MESA_SHADER_GEOMETRY ||
(nir->info.stage != MESA_SHADER_TESS_CTRL &&
nir->info.stage != MESA_SHADER_TESS_EVAL &&
!llvm_has_working_vgpr_indexing)) {
indirect_mask |= nir_var_shader_in;
}
if (!llvm_has_working_vgpr_indexing &&
nir->info.stage != MESA_SHADER_TESS_CTRL)
indirect_mask |= nir_var_shader_out;
/* TODO: We shouldn't need to do this, however LLVM isn't currently
* smart enough to handle indirects without causing excess spilling
* causing the gpu to hang.
*
* See the following thread for more details of the problem:
* https://lists.freedesktop.org/archives/mesa-dev/2017-July/162106.html
*/
indirect_mask |= nir_var_local;
return nir_lower_indirect_derefs(nir, indirect_mask);
}
void void
radv_optimize_nir(struct nir_shader *shader) radv_optimize_nir(struct nir_shader *shader)
{ {
@@ -304,7 +265,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
nir_lower_var_copies(nir); nir_lower_var_copies(nir);
nir_lower_global_vars_to_local(nir); nir_lower_global_vars_to_local(nir);
nir_remove_dead_variables(nir, nir_var_local); nir_remove_dead_variables(nir, nir_var_local);
radv_lower_indirect_derefs(nir, device->physical_device); ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
radv_optimize_nir(nir); radv_optimize_nir(nir);
return nir; return nir;

View File

@@ -104,10 +104,6 @@ void
radv_shader_variant_destroy(struct radv_device *device, radv_shader_variant_destroy(struct radv_device *device,
struct radv_shader_variant *variant); struct radv_shader_variant *variant);
bool
radv_lower_indirect_derefs(struct nir_shader *nir,
struct radv_physical_device *device);
const char * const char *
radv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage); radv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage);