radv: force using indirect descriptor sets for indirect compute pipelines
Emitting descriptors in DGC is a huge pain but using indirect descriptor sets is much easier. Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29700>
This commit is contained in:

committed by
Marge Bot

parent
ef21df917f
commit
b1ba02e707
@@ -105,7 +105,7 @@ radv_compute_pipeline_init(struct radv_compute_pipeline *pipeline, const struct
|
||||
|
||||
struct radv_shader *
|
||||
radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, struct radv_shader_stage *cs_stage,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool is_internal,
|
||||
bool keep_executable_info, bool keep_statistic_info, bool is_internal, bool is_indirect_bindable,
|
||||
struct radv_shader_binary **cs_binary)
|
||||
{
|
||||
struct radv_shader *cs_shader;
|
||||
@@ -121,7 +121,7 @@ radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache, str
|
||||
/* Run the shader info pass. */
|
||||
radv_nir_shader_info_init(cs_stage->stage, MESA_SHADER_NONE, &cs_stage->info);
|
||||
radv_nir_shader_info_pass(device, cs_stage->nir, &cs_stage->layout, &cs_stage->key, NULL, RADV_PIPELINE_COMPUTE,
|
||||
false, &cs_stage->info);
|
||||
false, is_indirect_bindable, &cs_stage->info);
|
||||
|
||||
radv_declare_shader_args(device, NULL, &cs_stage->info, MESA_SHADER_COMPUTE, MESA_SHADER_NONE, &cs_stage->args);
|
||||
|
||||
@@ -217,11 +217,13 @@ radv_compute_pipeline_compile(const VkComputePipelineCreateInfo *pCreateInfo, st
|
||||
|
||||
const struct radv_shader_stage_key stage_key =
|
||||
radv_pipeline_get_shader_key(device, &pCreateInfo->stage, pipeline->base.create_flags, pCreateInfo->pNext);
|
||||
const bool is_indirect_bindable = !!(pipeline->base.create_flags & VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV);
|
||||
|
||||
radv_pipeline_stage_init(pStage, pipeline_layout, &stage_key, &cs_stage);
|
||||
|
||||
pipeline->base.shaders[MESA_SHADER_COMPUTE] = radv_compile_cs(
|
||||
device, cache, &cs_stage, keep_executable_info, keep_statistic_info, pipeline->base.is_internal, &cs_binary);
|
||||
pipeline->base.shaders[MESA_SHADER_COMPUTE] =
|
||||
radv_compile_cs(device, cache, &cs_stage, keep_executable_info, keep_statistic_info, pipeline->base.is_internal,
|
||||
is_indirect_bindable, &cs_binary);
|
||||
|
||||
cs_stage.feedback.duration += os_time_get_nano() - stage_start;
|
||||
|
||||
|
@@ -53,7 +53,8 @@ void radv_compute_pipeline_init(struct radv_compute_pipeline *pipeline, const st
|
||||
|
||||
struct radv_shader *radv_compile_cs(struct radv_device *device, struct vk_pipeline_cache *cache,
|
||||
struct radv_shader_stage *cs_stage, bool keep_executable_info,
|
||||
bool keep_statistic_info, bool is_internal, struct radv_shader_binary **cs_binary);
|
||||
bool keep_statistic_info, bool is_internal, bool is_indirect_bindable,
|
||||
struct radv_shader_binary **cs_binary);
|
||||
|
||||
VkResult radv_compute_pipeline_create(VkDevice _device, VkPipelineCache _cache,
|
||||
const VkComputePipelineCreateInfo *pCreateInfo,
|
||||
|
@@ -2066,7 +2066,7 @@ radv_fill_shader_info(struct radv_device *device, const enum radv_pipeline_type
|
||||
}
|
||||
|
||||
radv_nir_shader_info_pass(device, stages[i].nir, &stages[i].layout, &stages[i].key, gfx_state, pipeline_type,
|
||||
consider_force_vrs, &stages[i].info);
|
||||
consider_force_vrs, false, &stages[i].info);
|
||||
}
|
||||
|
||||
radv_nir_shader_info_link(device, gfx_state, stages);
|
||||
@@ -2147,7 +2147,7 @@ radv_create_gs_copy_shader(struct radv_device *device, struct vk_pipeline_cache
|
||||
};
|
||||
radv_nir_shader_info_init(gs_copy_stage.stage, MESA_SHADER_FRAGMENT, &gs_copy_stage.info);
|
||||
radv_nir_shader_info_pass(device, nir, &gs_stage->layout, &gs_stage->key, gfx_state, RADV_PIPELINE_GRAPHICS, false,
|
||||
&gs_copy_stage.info);
|
||||
false, &gs_copy_stage.info);
|
||||
gs_copy_stage.info.wave_size = 64; /* Wave32 not supported. */
|
||||
gs_copy_stage.info.workgroup_size = 64; /* HW VS: separate waves, no workgroups */
|
||||
gs_copy_stage.info.so = gs_info->so;
|
||||
|
@@ -361,7 +361,7 @@ radv_rt_nir_to_asm(struct radv_device *device, struct vk_pipeline_cache *cache,
|
||||
nir_shader_gather_info(stage->nir, nir_shader_get_entrypoint(stage->nir));
|
||||
radv_nir_shader_info_init(stage->stage, MESA_SHADER_NONE, &stage->info);
|
||||
radv_nir_shader_info_pass(device, stage->nir, &stage->layout, &stage->key, NULL, RADV_PIPELINE_RAY_TRACING, false,
|
||||
&stage->info);
|
||||
false, &stage->info);
|
||||
|
||||
/* Declare shader arguments. */
|
||||
radv_declare_shader_args(device, NULL, &stage->info, stage->stage, MESA_SHADER_NONE, &stage->args);
|
||||
|
@@ -899,7 +899,7 @@ radv_declare_shader_args(const struct radv_device *device, const struct radv_gra
|
||||
|
||||
uint32_t num_desc_set = util_bitcount(info->desc_set_used_mask);
|
||||
|
||||
if (info->merged_shader_compiled_separately || remaining_sgprs < num_desc_set) {
|
||||
if (info->force_indirect_desc_sets || remaining_sgprs < num_desc_set) {
|
||||
user_sgpr_info.indirect_all_descriptor_sets = true;
|
||||
user_sgpr_info.remaining_sgprs--;
|
||||
} else {
|
||||
|
@@ -1121,7 +1121,7 @@ void
|
||||
radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *nir,
|
||||
const struct radv_shader_layout *layout, const struct radv_shader_stage_key *stage_key,
|
||||
const struct radv_graphics_state_key *gfx_state, const enum radv_pipeline_type pipeline_type,
|
||||
bool consider_force_vrs, struct radv_shader_info *info)
|
||||
bool consider_force_vrs, bool is_indirect_bindable, struct radv_shader_info *info)
|
||||
{
|
||||
const struct radv_physical_device *pdev = radv_device_physical(device);
|
||||
struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&nir->functions);
|
||||
@@ -1232,6 +1232,7 @@ radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *n
|
||||
|
||||
info->user_data_0 = radv_get_user_data_0(device, info);
|
||||
info->merged_shader_compiled_separately = radv_is_merged_shader_compiled_separately(device, info);
|
||||
info->force_indirect_desc_sets = info->merged_shader_compiled_separately || is_indirect_bindable;
|
||||
|
||||
switch (nir->info.stage) {
|
||||
case MESA_SHADER_COMPUTE:
|
||||
|
@@ -108,6 +108,7 @@ struct radv_shader_info {
|
||||
bool outputs_linked;
|
||||
bool has_epilog; /* Only for TCS or PS */
|
||||
bool merged_shader_compiled_separately; /* GFX9+ */
|
||||
bool force_indirect_desc_sets;
|
||||
|
||||
struct {
|
||||
uint8_t output_usage_mask[VARYING_SLOT_VAR31 + 1];
|
||||
@@ -316,7 +317,7 @@ void radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shad
|
||||
const struct radv_shader_layout *layout, const struct radv_shader_stage_key *stage_key,
|
||||
const struct radv_graphics_state_key *gfx_state,
|
||||
const enum radv_pipeline_type pipeline_type, bool consider_force_vrs,
|
||||
struct radv_shader_info *info);
|
||||
bool is_indirect_bindable, struct radv_shader_info *info);
|
||||
|
||||
void gfx10_get_ngg_info(const struct radv_device *device, struct radv_shader_info *es_info,
|
||||
struct radv_shader_info *gs_info, struct gfx10_ngg_info *out);
|
||||
|
@@ -248,7 +248,7 @@ radv_shader_object_init_compute(struct radv_shader_object *shader_obj, struct ra
|
||||
|
||||
radv_shader_stage_init(pCreateInfo, &stage);
|
||||
|
||||
struct radv_shader *cs_shader = radv_compile_cs(device, NULL, &stage, true, false, false, &cs_binary);
|
||||
struct radv_shader *cs_shader = radv_compile_cs(device, NULL, &stage, true, false, false, false, &cs_binary);
|
||||
|
||||
ralloc_free(stage.nir);
|
||||
|
||||
|
Reference in New Issue
Block a user