radv: split radv_pipeline into radv_{graphics,compute,library}pipeline

Instead of using a union in radv_pipeline, this introduces new
structures for graphics, compute and library pipelines which inherit
from radv_pipeline. This will ease graphics pipeline libary implem.

There is still no radv_raytracing_pipeline because RADV actually
uses a compute pipeline for everything but it could be introduced
later when necessary.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16603>
This commit is contained in:
Samuel Pitoiset
2022-05-18 11:26:53 +02:00
committed by Marge Bot
parent fc3717df0c
commit 23be0aad9c
9 changed files with 762 additions and 700 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -462,9 +462,9 @@ radv_dump_shaders(struct radv_pipeline *pipeline, VkShaderStageFlagBits active_s
}
static void
radv_dump_vertex_descriptors(struct radv_pipeline *pipeline, FILE *f)
radv_dump_vertex_descriptors(struct radv_graphics_pipeline *pipeline, FILE *f)
{
void *ptr = (uint64_t *)pipeline->device->trace_id_ptr;
void *ptr = (uint64_t *)pipeline->base.device->trace_id_ptr;
uint32_t count = util_bitcount(pipeline->vb_desc_usage_mask);
uint32_t *vb_ptr = &((uint32_t *)ptr)[3];
@@ -526,11 +526,13 @@ radv_dump_queue_state(struct radv_queue *queue, const char *dump_dir, FILE *f)
pipeline = radv_get_saved_pipeline(queue->device, ring);
if (pipeline) {
struct radv_graphics_pipeline *graphics_pipeline = radv_pipeline_to_graphics(pipeline);
radv_dump_vs_prolog(pipeline, f);
radv_dump_shaders(pipeline, pipeline->active_stages, dump_dir, f);
if (!(queue->device->instance->debug_flags & RADV_DEBUG_NO_UMR))
radv_dump_annotated_shaders(pipeline, pipeline->active_stages, f);
radv_dump_vertex_descriptors(pipeline, f);
radv_dump_vertex_descriptors(graphics_pipeline, f);
radv_dump_descriptors(queue->device, f);
}
}

View File

@@ -81,7 +81,7 @@ radv_meta_save(struct radv_meta_saved_state *state, struct radv_cmd_buffer *cmd_
if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) {
assert(!(state->flags & RADV_META_SAVE_COMPUTE_PIPELINE));
state->old_pipeline = cmd_buffer->state.pipeline;
state->old_graphics_pipeline = cmd_buffer->state.pipeline;
/* Save all viewports. */
state->dynamic.viewport.count = cmd_buffer->state.dynamic.viewport.count;
@@ -171,7 +171,7 @@ radv_meta_save(struct radv_meta_saved_state *state, struct radv_cmd_buffer *cmd_
if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) {
assert(!(state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE));
state->old_pipeline = cmd_buffer->state.compute_pipeline;
state->old_compute_pipeline = cmd_buffer->state.compute_pipeline;
}
if (state->flags & RADV_META_SAVE_DESCRIPTORS) {
@@ -204,7 +204,7 @@ radv_meta_restore(const struct radv_meta_saved_state *state, struct radv_cmd_buf
if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) {
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_GRAPHICS,
radv_pipeline_to_handle(state->old_pipeline));
radv_pipeline_to_handle(&state->old_graphics_pipeline->base));
cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
@@ -313,9 +313,9 @@ radv_meta_restore(const struct radv_meta_saved_state *state, struct radv_cmd_buf
}
if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) {
if (state->old_pipeline) {
if (state->old_compute_pipeline) {
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
radv_pipeline_to_handle(state->old_pipeline));
radv_pipeline_to_handle(&state->old_compute_pipeline->base));
}
}

View File

@@ -46,7 +46,8 @@ struct radv_meta_saved_state {
uint32_t flags;
struct radv_descriptor_set *old_descriptor_set0;
struct radv_pipeline *old_pipeline;
struct radv_graphics_pipeline *old_graphics_pipeline;
struct radv_compute_pipeline *old_compute_pipeline;
struct radv_dynamic_state dynamic;
char push_constants[MAX_PUSH_CONSTANTS_SIZE];
@@ -233,9 +234,9 @@ static inline bool
radv_is_fmask_decompress_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_meta_state *meta_state = &cmd_buffer->device->meta_state;
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
struct radv_graphics_pipeline *pipeline = cmd_buffer->state.pipeline;
return radv_pipeline_to_handle(pipeline) ==
return radv_pipeline_to_handle(&pipeline->base) ==
meta_state->fast_clear_flush.fmask_decompress_pipeline;
}
@@ -246,9 +247,10 @@ static inline bool
radv_is_dcc_decompress_pipeline(struct radv_cmd_buffer *cmd_buffer)
{
struct radv_meta_state *meta_state = &cmd_buffer->device->meta_state;
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
struct radv_graphics_pipeline *pipeline = cmd_buffer->state.pipeline;
return radv_pipeline_to_handle(pipeline) == meta_state->fast_clear_flush.dcc_decompress_pipeline;
return radv_pipeline_to_handle(&pipeline->base) ==
meta_state->fast_clear_flush.dcc_decompress_pipeline;
}
/* common nir builder helpers */

File diff suppressed because it is too large Load Diff

View File

@@ -41,9 +41,11 @@ radv_create_merged_rt_create_info(const VkRayTracingPipelineCreateInfoKHR *pCrea
if (pCreateInfo->pLibraryInfo) {
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
RADV_FROM_HANDLE(radv_pipeline, library, pCreateInfo->pLibraryInfo->pLibraries[i]);
total_stages += library->library.stage_count;
total_groups += library->library.group_count;
RADV_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
struct radv_library_pipeline *library_pipeline = radv_pipeline_to_library(pipeline);
total_stages += library_pipeline->stage_count;
total_groups += library_pipeline->group_count;
}
}
VkPipelineShaderStageCreateInfo *stages = NULL;
@@ -66,12 +68,14 @@ radv_create_merged_rt_create_info(const VkRayTracingPipelineCreateInfoKHR *pCrea
if (pCreateInfo->pLibraryInfo) {
for (unsigned i = 0; i < pCreateInfo->pLibraryInfo->libraryCount; ++i) {
RADV_FROM_HANDLE(radv_pipeline, library, pCreateInfo->pLibraryInfo->pLibraries[i]);
for (unsigned j = 0; j < library->library.stage_count; ++j)
stages[total_stages + j] = library->library.stages[j];
for (unsigned j = 0; j < library->library.group_count; ++j) {
RADV_FROM_HANDLE(radv_pipeline, pipeline, pCreateInfo->pLibraryInfo->pLibraries[i]);
struct radv_library_pipeline *library_pipeline = radv_pipeline_to_library(pipeline);
for (unsigned j = 0; j < library_pipeline->stage_count; ++j)
stages[total_stages + j] = library_pipeline->stages[j];
for (unsigned j = 0; j < library_pipeline->group_count; ++j) {
VkRayTracingShaderGroupCreateInfoKHR *dst = &groups[total_groups + j];
*dst = library->library.groups[j];
*dst = library_pipeline->groups[j];
if (dst->generalShader != VK_SHADER_UNUSED_KHR)
dst->generalShader += total_stages;
if (dst->closestHitShader != VK_SHADER_UNUSED_KHR)
@@ -81,8 +85,8 @@ radv_create_merged_rt_create_info(const VkRayTracingPipelineCreateInfoKHR *pCrea
if (dst->intersectionShader != VK_SHADER_UNUSED_KHR)
dst->intersectionShader += total_stages;
}
total_stages += library->library.stage_count;
total_groups += library->library.group_count;
total_stages += library_pipeline->stage_count;
total_groups += library_pipeline->group_count;
}
}
return local_create_info;
@@ -94,14 +98,14 @@ radv_rt_pipeline_library_create(VkDevice _device, VkPipelineCache _cache,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipeline)
{
RADV_FROM_HANDLE(radv_device, device, _device);
struct radv_pipeline *pipeline;
struct radv_library_pipeline *pipeline;
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
radv_pipeline_init(device, pipeline, RADV_PIPELINE_LIBRARY);
radv_pipeline_init(device, &pipeline->base, RADV_PIPELINE_LIBRARY);
VkRayTracingPipelineCreateInfoKHR local_create_info =
radv_create_merged_rt_create_info(pCreateInfo);
@@ -109,40 +113,40 @@ radv_rt_pipeline_library_create(VkDevice _device, VkPipelineCache _cache,
goto fail;
if (local_create_info.stageCount) {
pipeline->library.stage_count = local_create_info.stageCount;
pipeline->stage_count = local_create_info.stageCount;
size_t size = sizeof(VkPipelineShaderStageCreateInfo) * local_create_info.stageCount;
pipeline->library.stages = malloc(size);
if (!pipeline->library.stages)
pipeline->stages = malloc(size);
if (!pipeline->stages)
goto fail;
memcpy(pipeline->library.stages, local_create_info.pStages, size);
memcpy(pipeline->stages, local_create_info.pStages, size);
for (uint32_t i = 0; i < local_create_info.stageCount; i++) {
RADV_FROM_HANDLE(vk_shader_module, module, pipeline->library.stages[i].module);
RADV_FROM_HANDLE(vk_shader_module, module, pipeline->stages[i].module);
struct vk_shader_module *new_module = vk_shader_module_clone(NULL, module);
pipeline->library.stages[i].module = vk_shader_module_to_handle(new_module);
pipeline->stages[i].module = vk_shader_module_to_handle(new_module);
}
}
if (local_create_info.groupCount) {
size_t size = sizeof(VkRayTracingShaderGroupCreateInfoKHR) * local_create_info.groupCount;
pipeline->library.group_count = local_create_info.groupCount;
pipeline->library.groups = malloc(size);
if (!pipeline->library.groups)
pipeline->group_count = local_create_info.groupCount;
pipeline->groups = malloc(size);
if (!pipeline->groups)
goto fail;
memcpy(pipeline->library.groups, local_create_info.pGroups, size);
memcpy(pipeline->groups, local_create_info.pGroups, size);
}
*pPipeline = radv_pipeline_to_handle(pipeline);
*pPipeline = radv_pipeline_to_handle(&pipeline->base);
free((void *)local_create_info.pGroups);
free((void *)local_create_info.pStages);
return VK_SUCCESS;
fail:
free(pipeline->library.groups);
free(pipeline->library.stages);
free(pipeline->groups);
free(pipeline->stages);
free((void *)local_create_info.pGroups);
free((void *)local_create_info.pStages);
return VK_ERROR_OUT_OF_HOST_MEMORY;
@@ -1767,6 +1771,7 @@ radv_rt_pipeline_create(VkDevice _device, VkPipelineCache _cache,
RADV_FROM_HANDLE(radv_device, device, _device);
VkResult result;
struct radv_pipeline *pipeline = NULL;
struct radv_compute_pipeline *compute_pipeline = NULL;
struct radv_pipeline_shader_stack_size *stack_sizes = NULL;
uint8_t hash[20];
nir_shader *shader = NULL;
@@ -1830,32 +1835,33 @@ radv_rt_pipeline_create(VkDevice _device, VkPipelineCache _cache,
goto shader_fail;
}
pipeline = radv_pipeline_from_handle(*pPipeline);
compute_pipeline = radv_pipeline_to_compute(pipeline);
pipeline->compute.rt_group_handles =
calloc(sizeof(*pipeline->compute.rt_group_handles), local_create_info.groupCount);
if (!pipeline->compute.rt_group_handles) {
compute_pipeline->rt_group_handles =
calloc(sizeof(*compute_pipeline->rt_group_handles), local_create_info.groupCount);
if (!compute_pipeline->rt_group_handles) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto shader_fail;
}
pipeline->compute.dynamic_stack_size = radv_rt_pipeline_has_dynamic_stack_size(pCreateInfo);
compute_pipeline->dynamic_stack_size = radv_rt_pipeline_has_dynamic_stack_size(pCreateInfo);
for (unsigned i = 0; i < local_create_info.groupCount; ++i) {
const VkRayTracingShaderGroupCreateInfoKHR *group_info = &local_create_info.pGroups[i];
switch (group_info->type) {
case VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR:
if (group_info->generalShader != VK_SHADER_UNUSED_KHR)
pipeline->compute.rt_group_handles[i].handles[0] = i + 2;
compute_pipeline->rt_group_handles[i].handles[0] = i + 2;
break;
case VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR:
if (group_info->intersectionShader != VK_SHADER_UNUSED_KHR)
pipeline->compute.rt_group_handles[i].handles[1] = i + 2;
compute_pipeline->rt_group_handles[i].handles[1] = i + 2;
FALLTHROUGH;
case VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR:
if (group_info->closestHitShader != VK_SHADER_UNUSED_KHR)
pipeline->compute.rt_group_handles[i].handles[0] = i + 2;
compute_pipeline->rt_group_handles[i].handles[0] = i + 2;
if (group_info->anyHitShader != VK_SHADER_UNUSED_KHR)
pipeline->compute.rt_group_handles[i].handles[1] = i + 2;
compute_pipeline->rt_group_handles[i].handles[1] = i + 2;
break;
case VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR:
unreachable("VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR");
@@ -1906,15 +1912,16 @@ radv_GetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline _pipeline, u
uint32_t groupCount, size_t dataSize, void *pData)
{
RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
struct radv_compute_pipeline *compute_pipeline = radv_pipeline_to_compute(pipeline);
char *data = pData;
STATIC_ASSERT(sizeof(*pipeline->compute.rt_group_handles) <= RADV_RT_HANDLE_SIZE);
STATIC_ASSERT(sizeof(*compute_pipeline->rt_group_handles) <= RADV_RT_HANDLE_SIZE);
memset(data, 0, groupCount * RADV_RT_HANDLE_SIZE);
for (uint32_t i = 0; i < groupCount; ++i) {
memcpy(data + i * RADV_RT_HANDLE_SIZE, &pipeline->compute.rt_group_handles[firstGroup + i],
sizeof(*pipeline->compute.rt_group_handles));
memcpy(data + i * RADV_RT_HANDLE_SIZE, &compute_pipeline->rt_group_handles[firstGroup + i],
sizeof(*compute_pipeline->rt_group_handles));
}
return VK_SUCCESS;
@@ -1925,8 +1932,9 @@ radv_GetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline _pipeline,
VkShaderGroupShaderKHR groupShader)
{
RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
struct radv_compute_pipeline *compute_pipeline = radv_pipeline_to_compute(pipeline);
const struct radv_pipeline_shader_stack_size *stack_size =
&pipeline->compute.rt_stack_sizes[group];
&compute_pipeline->rt_stack_sizes[group];
if (groupShader == VK_SHADER_GROUP_SHADER_ANY_HIT_KHR ||
groupShader == VK_SHADER_GROUP_SHADER_INTERSECTION_KHR)

View File

@@ -1410,11 +1410,11 @@ struct radv_cmd_state {
uint32_t prefetch_L2_mask;
struct radv_pipeline *pipeline;
struct radv_pipeline *emitted_pipeline;
struct radv_pipeline *compute_pipeline;
struct radv_pipeline *emitted_compute_pipeline;
struct radv_pipeline *rt_pipeline; /* emitted = emitted_compute_pipeline */
struct radv_graphics_pipeline *pipeline;
struct radv_graphics_pipeline *emitted_pipeline;
struct radv_compute_pipeline *compute_pipeline;
struct radv_compute_pipeline *emitted_compute_pipeline;
struct radv_compute_pipeline *rt_pipeline; /* emitted = emitted_compute_pipeline */
struct vk_framebuffer *framebuffer;
struct radv_render_pass *pass;
const struct radv_subpass *subpass;
@@ -1915,7 +1915,6 @@ struct radv_pipeline {
enum radv_pipeline_type type;
struct radv_device *device;
struct radv_dynamic_state dynamic_state;
struct radv_pipeline_slab *slab;
struct radeon_winsys_bo *slab_bo;
@@ -1929,74 +1928,7 @@ struct radv_pipeline {
uint32_t ctx_cs_hash;
struct radeon_cmdbuf ctx_cs;
uint32_t binding_stride[MAX_VBS];
uint8_t attrib_bindings[MAX_VERTEX_ATTRIBS];
uint32_t attrib_ends[MAX_VERTEX_ATTRIBS];
uint32_t attrib_index_offset[MAX_VERTEX_ATTRIBS];
bool use_per_attribute_vb_descs;
bool can_use_simple_input;
uint8_t last_vertex_attrib_bit;
uint8_t next_vertex_stage : 8;
uint32_t vb_desc_usage_mask;
uint32_t vb_desc_alloc_size;
uint32_t user_data_0[MESA_VULKAN_SHADER_STAGES];
union {
struct {
uint64_t dynamic_states;
struct radv_multisample_state ms;
struct radv_binning_state binning;
struct radv_vrs_state vrs;
uint32_t spi_baryc_cntl;
unsigned esgs_ring_size;
unsigned gsvs_ring_size;
uint32_t vtx_base_sgpr;
struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param;
uint8_t vtx_emit_num;
bool uses_drawid;
bool uses_baseinstance;
bool can_use_guardband;
uint64_t needed_dynamic_state;
bool disable_out_of_order_rast_for_occlusion;
unsigned tess_patch_control_points;
unsigned pa_su_sc_mode_cntl;
unsigned db_depth_control;
unsigned pa_cl_clip_cntl;
unsigned cb_color_control;
bool uses_dynamic_stride;
bool uses_conservative_overestimate;
bool negative_one_to_one;
/* Used for rbplus */
uint32_t col_format;
uint32_t cb_target_mask;
/* Whether the pipeline uses NGG (GFX10+). */
bool is_ngg;
bool has_ngg_culling;
/* Last pre-PS API stage */
gl_shader_stage last_vgt_api_stage;
/* Whether the pipeline forces per-vertex VRS (GFX10.3+). */
bool force_vrs_per_vertex;
} graphics;
struct {
struct radv_pipeline_group_handle *rt_group_handles;
struct radv_pipeline_shader_stack_size *rt_stack_sizes;
bool dynamic_stack_size;
uint32_t group_count;
bool cs_regalloc_hang_bug;
} compute;
struct {
unsigned stage_count;
VkPipelineShaderStageCreateInfo *stages;
unsigned group_count;
VkRayTracingShaderGroupCreateInfoKHR *groups;
} library;
};
unsigned max_waves;
unsigned scratch_bytes_per_wave;
@@ -2012,6 +1944,94 @@ struct radv_pipeline {
uint32_t dynamic_offset_count;
};
struct radv_graphics_pipeline {
struct radv_pipeline base;
struct radv_dynamic_state dynamic_state;
uint64_t dynamic_states;
struct radv_multisample_state ms;
struct radv_binning_state binning;
struct radv_vrs_state vrs;
uint32_t spi_baryc_cntl;
unsigned esgs_ring_size;
unsigned gsvs_ring_size;
uint32_t vtx_base_sgpr;
struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param;
uint8_t vtx_emit_num;
uint64_t needed_dynamic_state;
unsigned tess_patch_control_points;
unsigned pa_su_sc_mode_cntl;
unsigned db_depth_control;
unsigned pa_cl_clip_cntl;
unsigned cb_color_control;
uint32_t binding_stride[MAX_VBS];
uint8_t attrib_bindings[MAX_VERTEX_ATTRIBS];
uint32_t attrib_ends[MAX_VERTEX_ATTRIBS];
uint32_t attrib_index_offset[MAX_VERTEX_ATTRIBS];
uint8_t last_vertex_attrib_bit;
uint8_t next_vertex_stage : 8;
uint32_t vb_desc_usage_mask;
uint32_t vb_desc_alloc_size;
/* Last pre-PS API stage */
gl_shader_stage last_vgt_api_stage;
/* Used for rbplus */
uint32_t col_format;
uint32_t cb_target_mask;
bool disable_out_of_order_rast_for_occlusion;
bool uses_drawid;
bool uses_baseinstance;
bool can_use_guardband;
bool uses_dynamic_stride;
bool uses_conservative_overestimate;
bool negative_one_to_one;
bool use_per_attribute_vb_descs;
bool can_use_simple_input;
/* Whether the pipeline forces per-vertex VRS (GFX10.3+). */
bool force_vrs_per_vertex;
/* Whether the pipeline uses NGG (GFX10+). */
bool is_ngg;
bool has_ngg_culling;
};
struct radv_compute_pipeline {
struct radv_pipeline base;
bool cs_regalloc_hang_bug;
/* Raytracing */
struct radv_pipeline_group_handle *rt_group_handles;
struct radv_pipeline_shader_stack_size *rt_stack_sizes;
bool dynamic_stack_size;
uint32_t group_count;
};
struct radv_library_pipeline {
struct radv_pipeline base;
unsigned stage_count;
VkPipelineShaderStageCreateInfo *stages;
unsigned group_count;
VkRayTracingShaderGroupCreateInfoKHR *groups;
};
#define RADV_DECL_PIPELINE_DOWNCAST(pipe_type, pipe_enum) \
static inline struct radv_##pipe_type##_pipeline * \
radv_pipeline_to_##pipe_type(struct radv_pipeline *pipeline) \
{ \
assert(pipeline->type == pipe_enum); \
return (struct radv_##pipe_type##_pipeline *) pipeline; \
}
RADV_DECL_PIPELINE_DOWNCAST(graphics, RADV_PIPELINE_GRAPHICS)
RADV_DECL_PIPELINE_DOWNCAST(compute, RADV_PIPELINE_COMPUTE)
RADV_DECL_PIPELINE_DOWNCAST(library, RADV_PIPELINE_LIBRARY)
struct radv_pipeline_stage {
gl_shader_stage stage;
@@ -2037,30 +2057,30 @@ struct radv_pipeline_stage {
};
static inline bool
radv_pipeline_has_gs(const struct radv_pipeline *pipeline)
radv_pipeline_has_gs(const struct radv_graphics_pipeline *pipeline)
{
return pipeline->shaders[MESA_SHADER_GEOMETRY] ? true : false;
return pipeline->base.shaders[MESA_SHADER_GEOMETRY] ? true : false;
}
static inline bool
radv_pipeline_has_tess(const struct radv_pipeline *pipeline)
radv_pipeline_has_tess(const struct radv_graphics_pipeline *pipeline)
{
return pipeline->shaders[MESA_SHADER_TESS_CTRL] ? true : false;
return pipeline->base.shaders[MESA_SHADER_TESS_CTRL] ? true : false;
}
static inline bool
radv_pipeline_has_mesh(const struct radv_pipeline *pipeline)
radv_pipeline_has_mesh(const struct radv_graphics_pipeline *pipeline)
{
return !!pipeline->shaders[MESA_SHADER_MESH];
return !!pipeline->base.shaders[MESA_SHADER_MESH];
}
static inline bool
radv_pipeline_has_task(const struct radv_pipeline *pipeline)
radv_pipeline_has_task(const struct radv_graphics_pipeline *pipeline)
{
return !!pipeline->shaders[MESA_SHADER_TASK];
return !!pipeline->base.shaders[MESA_SHADER_TASK];
}
bool radv_pipeline_has_ngg_passthrough(const struct radv_pipeline *pipeline);
bool radv_pipeline_has_ngg_passthrough(const struct radv_graphics_pipeline *pipeline);
bool radv_pipeline_has_gs_copy_shader(const struct radv_pipeline *pipeline);

View File

@@ -532,6 +532,8 @@ void radv_nir_lower_abi(nir_shader *shader, enum amd_gfx_level gfx_level,
void radv_init_shader_arenas(struct radv_device *device);
void radv_destroy_shader_arenas(struct radv_device *device);
struct radv_pipeline_shader_stack_size;
VkResult radv_create_shaders(struct radv_pipeline *pipeline,
struct radv_pipeline_layout *pipeline_layout,
struct radv_device *device, struct radv_pipeline_cache *cache,
@@ -539,7 +541,10 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline,
const VkPipelineShaderStageCreateInfo *pStages,
uint32_t stageCount,
const VkPipelineCreateFlags flags, const uint8_t *custom_hash,
const VkPipelineCreationFeedbackCreateInfo *creation_feedback);
const VkPipelineCreationFeedbackCreateInfo *creation_feedback,
struct radv_pipeline_shader_stack_size **stack_sizes,
uint32_t *num_stack_sizes,
gl_shader_stage *last_vgt_api_stage);
struct radv_shader_args;

View File

@@ -794,13 +794,13 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, bool instanced_dra
bool ia_switch_on_eop = false;
bool ia_switch_on_eoi = false;
bool partial_vs_wave = false;
bool partial_es_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_es_wave;
bool partial_es_wave = cmd_buffer->state.pipeline->ia_multi_vgt_param.partial_es_wave;
bool multi_instances_smaller_than_primgroup;
struct radv_prim_vertex_count prim_vertex_count = prim_size_table[topology];
if (radv_pipeline_has_tess(cmd_buffer->state.pipeline)) {
if (topology == V_008958_DI_PT_PATCH) {
prim_vertex_count.min = cmd_buffer->state.pipeline->graphics.tess_patch_control_points;
prim_vertex_count.min = cmd_buffer->state.pipeline->tess_patch_control_points;
prim_vertex_count.incr = 1;
}
}
@@ -808,12 +808,12 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, bool instanced_dra
multi_instances_smaller_than_primgroup = indirect_draw;
if (!multi_instances_smaller_than_primgroup && instanced_draw) {
uint32_t num_prims = radv_prims_for_vertices(&prim_vertex_count, draw_vertex_count);
if (num_prims < cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.primgroup_size)
if (num_prims < cmd_buffer->state.pipeline->ia_multi_vgt_param.primgroup_size)
multi_instances_smaller_than_primgroup = true;
}
ia_switch_on_eoi = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.ia_switch_on_eoi;
partial_vs_wave = cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.partial_vs_wave;
ia_switch_on_eoi = cmd_buffer->state.pipeline->ia_multi_vgt_param.ia_switch_on_eoi;
partial_vs_wave = cmd_buffer->state.pipeline->ia_multi_vgt_param.partial_vs_wave;
if (gfx_level >= GFX7) {
/* WD_SWITCH_ON_EOP has no effect on GPUs with less than
@@ -896,7 +896,7 @@ si_get_ia_multi_vgt_param(struct radv_cmd_buffer *cmd_buffer, bool instanced_dra
partial_vs_wave = true;
}
return cmd_buffer->state.pipeline->graphics.ia_multi_vgt_param.base |
return cmd_buffer->state.pipeline->ia_multi_vgt_param.base |
S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |