lavapipe: implement EXT_graphics_pipeline_library

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15636>
This commit is contained in:
Mike Blumenkrantz
2022-03-29 08:02:30 -04:00
committed by Marge Bot
parent 22fd70ca81
commit d4d5a7abba
5 changed files with 474 additions and 200 deletions

View File

@@ -267,10 +267,16 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
VK_OBJECT_TYPE_PIPELINE_LAYOUT); VK_OBJECT_TYPE_PIPELINE_LAYOUT);
layout->ref_cnt = 1; layout->ref_cnt = 1;
layout->num_sets = pCreateInfo->setLayoutCount; layout->num_sets = pCreateInfo->setLayoutCount;
if (pCreateInfo->flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT)
layout->independent_sets = true;
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) { for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout, LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]); pCreateInfo->pSetLayouts[set]);
if (layout->independent_sets && (!layout->num_sets || !set_layout)) {
layout->set[set].layout = NULL;
continue;
}
layout->set[set].layout = set_layout; layout->set[set].layout = set_layout;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
layout->stage[i].uniform_block_size += set_layout->stage[i].uniform_block_size; layout->stage[i].uniform_block_size += set_layout->stage[i].uniform_block_size;
@@ -300,7 +306,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
uint16_t sampler_view_count = 0; uint16_t sampler_view_count = 0;
uint16_t image_count = 0; uint16_t image_count = 0;
for (unsigned j = 0; j < layout->num_sets; j++) { for (unsigned j = 0; j < layout->num_sets; j++) {
if (layout->set[j].layout->shader_stages & array[i]) { if (layout->set[j].layout && layout->set[j].layout->shader_stages & array[i]) {
const_buffer_count += layout->set[j].layout->stage[i].const_buffer_count; const_buffer_count += layout->set[j].layout->stage[i].const_buffer_count;
shader_buffer_count += layout->set[j].layout->stage[i].shader_buffer_count; shader_buffer_count += layout->set[j].layout->stage[i].shader_buffer_count;
sampler_count += layout->set[j].layout->stage[i].sampler_count; sampler_count += layout->set[j].layout->stage[i].sampler_count;

View File

@@ -1420,7 +1420,11 @@ static void handle_descriptor_sets(struct vk_cmd_queue_entry *cmd,
} }
for (i = 0; i < bds->descriptor_set_count; i++) { for (i = 0; i < bds->descriptor_set_count; i++) {
if (!layout->set[bds->first_set + i].layout)
continue;
const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]); const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]);
if (!set)
continue;
/* verify that there's enough total offsets */ /* verify that there's enough total offsets */
assert(set->layout->dynamic_offset_count <= dyn_info.dynamic_offset_count); assert(set->layout->dynamic_offset_count <= dyn_info.dynamic_offset_count);
/* verify there's either no offsets... */ /* verify there's either no offsets... */

View File

@@ -106,6 +106,8 @@ static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
return nir_imm_ivec2(b, 0, 0); return nir_imm_ivec2(b, 0, 0);
for (unsigned s = 0; s < desc_set_idx; s++) { for (unsigned s = 0; s < desc_set_idx; s++) {
if (!layout->set[s].layout)
continue;
if (is_ubo) if (is_ubo)
value += layout->set[s].layout->stage[b->shader->info.stage].const_buffer_count; value += layout->set[s].layout->stage[b->shader->info.stage].const_buffer_count;
else else
@@ -165,6 +167,8 @@ lower_vri_instr_tex_deref(nir_tex_instr *tex,
struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx]; struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
nir_tex_instr_remove_src(tex, deref_src_idx); nir_tex_instr_remove_src(tex, deref_src_idx);
for (unsigned s = 0; s < desc_set_idx; s++) { for (unsigned s = 0; s < desc_set_idx; s++) {
if (!layout->set[s].layout)
continue;
if (deref_src_type == nir_tex_src_sampler_deref) if (deref_src_type == nir_tex_src_sampler_deref)
value += layout->set[s].layout->stage[stage].sampler_count; value += layout->set[s].layout->stage[stage].sampler_count;
else else
@@ -269,20 +273,29 @@ void lvp_lower_pipeline_layout(const struct lvp_device *device,
var->data.descriptor_set = 0; var->data.descriptor_set = 0;
if (base_type == GLSL_TYPE_SAMPLER || base_type == GLSL_TYPE_TEXTURE) { if (base_type == GLSL_TYPE_SAMPLER || base_type == GLSL_TYPE_TEXTURE) {
if (binding->type == VK_DESCRIPTOR_TYPE_SAMPLER) { if (binding->type == VK_DESCRIPTOR_TYPE_SAMPLER) {
for (unsigned s = 0; s < desc_set_idx; s++) for (unsigned s = 0; s < desc_set_idx; s++) {
if (!layout->set[s].layout)
continue;
value += layout->set[s].layout->stage[shader->info.stage].sampler_count; value += layout->set[s].layout->stage[shader->info.stage].sampler_count;
}
value += binding->stage[shader->info.stage].sampler_index; value += binding->stage[shader->info.stage].sampler_index;
} else { } else {
for (unsigned s = 0; s < desc_set_idx; s++) for (unsigned s = 0; s < desc_set_idx; s++) {
if (!layout->set[s].layout)
continue;
value += layout->set[s].layout->stage[shader->info.stage].sampler_view_count; value += layout->set[s].layout->stage[shader->info.stage].sampler_view_count;
}
value += binding->stage[shader->info.stage].sampler_view_index; value += binding->stage[shader->info.stage].sampler_view_index;
} }
var->data.binding = value; var->data.binding = value;
} }
if (base_type == GLSL_TYPE_IMAGE) { if (base_type == GLSL_TYPE_IMAGE) {
var->data.descriptor_set = 0; var->data.descriptor_set = 0;
for (unsigned s = 0; s < desc_set_idx; s++) for (unsigned s = 0; s < desc_set_idx; s++) {
if (!layout->set[s].layout)
continue;
value += layout->set[s].layout->stage[shader->info.stage].image_count; value += layout->set[s].layout->stage[shader->info.stage].image_count;
}
value += binding->stage[shader->info.stage].image_index; value += binding->stage[shader->info.stage].image_index;
var->data.binding = value; var->data.binding = value;
} }

View File

@@ -69,7 +69,8 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) for (unsigned i = 0; i < MESA_SHADER_STAGES; i++)
ralloc_free(pipeline->pipeline_nir[i]); ralloc_free(pipeline->pipeline_nir[i]);
lvp_pipeline_layout_unref(device, pipeline->layout); if (pipeline->layout)
lvp_pipeline_layout_unref(device, pipeline->layout);
ralloc_free(pipeline->mem_ctx); ralloc_free(pipeline->mem_ctx);
vk_object_base_finish(&pipeline->base); vk_object_base_finish(&pipeline->base);
@@ -232,17 +233,64 @@ deep_copy_color_blend_state(void *mem_ctx,
static VkResult static VkResult
deep_copy_dynamic_state(void *mem_ctx, deep_copy_dynamic_state(void *mem_ctx,
VkPipelineDynamicStateCreateInfo *dst, VkPipelineDynamicStateCreateInfo *dst,
const VkPipelineDynamicStateCreateInfo *src) const VkPipelineDynamicStateCreateInfo *src,
VkGraphicsPipelineLibraryFlagsEXT stages)
{ {
dst->sType = src->sType; dst->sType = src->sType;
dst->pNext = NULL; dst->pNext = NULL;
dst->flags = src->flags; dst->flags = src->flags;
VkDynamicState *states = (void*)dst->pDynamicStates;
for (unsigned i = 0; i < src->dynamicStateCount; i++) {
switch (src->pDynamicStates[i]) {
case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
case VK_DYNAMIC_STATE_VERTEX_INPUT_EXT:
if (stages & VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT)
states[dst->dynamicStateCount++] = src->pDynamicStates[i];
break;
LVP_PIPELINE_DUP(dst->pDynamicStates, case VK_DYNAMIC_STATE_VIEWPORT:
src->pDynamicStates, case VK_DYNAMIC_STATE_SCISSOR:
VkDynamicState, case VK_DYNAMIC_STATE_LINE_WIDTH:
src->dynamicStateCount); case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT:
dst->dynamicStateCount = src->dynamicStateCount; case VK_DYNAMIC_STATE_CULL_MODE_EXT:
case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
case VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT:
case VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT:
case VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT:
if (stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT)
states[dst->dynamicStateCount++] = src->pDynamicStates[i];
break;
case VK_DYNAMIC_STATE_DEPTH_BIAS:
case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
case VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT:
case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
if (stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT)
states[dst->dynamicStateCount++] = src->pDynamicStates[i];
break;
case VK_DYNAMIC_STATE_LOGIC_OP_EXT:
case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
case VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT:
if (stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT)
states[dst->dynamicStateCount++] = src->pDynamicStates[i];
break;
default:
unreachable("unknown dynamic state!");
}
}
assert(dst->dynamicStateCount <= 37);
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -278,139 +326,190 @@ deep_copy_rasterization_state(void *mem_ctx,
static VkResult static VkResult
deep_copy_graphics_create_info(void *mem_ctx, deep_copy_graphics_create_info(void *mem_ctx,
VkGraphicsPipelineCreateInfo *dst, VkGraphicsPipelineCreateInfo *dst,
const VkGraphicsPipelineCreateInfo *src) const VkGraphicsPipelineCreateInfo *src,
VkGraphicsPipelineLibraryFlagsEXT shaders)
{ {
int i; int i;
VkResult result; VkResult result;
VkPipelineShaderStageCreateInfo *stages; VkPipelineShaderStageCreateInfo *stages;
VkPipelineVertexInputStateCreateInfo *vertex_input; VkPipelineVertexInputStateCreateInfo *vertex_input;
VkPipelineRasterizationStateCreateInfo *rasterization_state; VkPipelineRasterizationStateCreateInfo *rasterization_state;
const VkPipelineRenderingCreateInfoKHR *rp_info = const VkPipelineRenderingCreateInfoKHR *rp_info = NULL;
vk_get_pipeline_rendering_create_info(src);
dst->sType = src->sType; dst->sType = src->sType;
dst->pNext = NULL; dst->pNext = NULL;
dst->flags = src->flags; dst->flags = src->flags;
dst->layout = src->layout; dst->layout = src->layout;
if (shaders & (VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT | VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT)) {
assert(!dst->renderPass || !src->renderPass || dst->renderPass == src->renderPass);
assert(!dst->subpass || !src->subpass || dst->subpass == src->subpass);
dst->subpass = src->subpass;
dst->renderPass = src->renderPass;
rp_info = vk_get_pipeline_rendering_create_info(src);
}
dst->basePipelineHandle = src->basePipelineHandle; dst->basePipelineHandle = src->basePipelineHandle;
dst->basePipelineIndex = src->basePipelineIndex; dst->basePipelineIndex = src->basePipelineIndex;
/* pStages */ /* pStages */
VkShaderStageFlags stages_present = 0; VkShaderStageFlags stages_present = 0;
dst->stageCount = src->stageCount; stages = (void*)dst->pStages;
stages = ralloc_array(mem_ctx, VkPipelineShaderStageCreateInfo, dst->stageCount); if (!stages)
for (i = 0 ; i < dst->stageCount; i++) { stages = ralloc_array(mem_ctx, VkPipelineShaderStageCreateInfo, 5 /* max number of gfx stages */);
result = deep_copy_shader_stage(mem_ctx, &stages[i], &src->pStages[i]); for (i = 0 ; i < src->stageCount; i++) {
if (shaders & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
/* only vertex stages allowed */
if (!(src->pStages[i].stage & BITFIELD_MASK(VK_SHADER_STAGE_FRAGMENT_BIT)))
continue;
} else if (shaders & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) {
/* only fragment stages allowed */
if (src->pStages[i].stage != VK_SHADER_STAGE_FRAGMENT_BIT)
continue;
} else {
/* other partials don't consume shaders */
continue;
}
result = deep_copy_shader_stage(mem_ctx, &stages[dst->stageCount++], &src->pStages[i]);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
stages_present |= src->pStages[i].stage; stages_present |= src->pStages[i].stage;
} }
dst->pStages = stages; dst->pStages = stages;
/* pVertexInputState */ if (shaders & VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT) {
if (!dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) { /* pVertexInputState */
vertex_input = ralloc(mem_ctx, VkPipelineVertexInputStateCreateInfo); if (!dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) {
result = deep_copy_vertex_input_state(mem_ctx, vertex_input, vertex_input = ralloc(mem_ctx, VkPipelineVertexInputStateCreateInfo);
src->pVertexInputState); result = deep_copy_vertex_input_state(mem_ctx, vertex_input,
if (result != VK_SUCCESS) src->pVertexInputState);
return result; if (result != VK_SUCCESS)
dst->pVertexInputState = vertex_input; return result;
} else dst->pVertexInputState = vertex_input;
dst->pVertexInputState = NULL; } else
dst->pVertexInputState = NULL;
/* pInputAssemblyState */ /* pInputAssemblyState */
LVP_PIPELINE_DUP(dst->pInputAssemblyState, LVP_PIPELINE_DUP(dst->pInputAssemblyState,
src->pInputAssemblyState, src->pInputAssemblyState,
VkPipelineInputAssemblyStateCreateInfo, VkPipelineInputAssemblyStateCreateInfo,
1);
/* pTessellationState */
if (src->pTessellationState &&
(stages_present & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) ==
(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
LVP_PIPELINE_DUP(dst->pTessellationState,
src->pTessellationState,
VkPipelineTessellationStateCreateInfo,
1); 1);
} }
/* pViewportState */ bool rasterization_disabled = false;
bool rasterization_disabled = !dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) && if (shaders & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
src->pRasterizationState->rasterizerDiscardEnable; /* pTessellationState */
if (src->pViewportState && !rasterization_disabled) { if (src->pTessellationState &&
VkPipelineViewportStateCreateInfo *viewport_state; (stages_present & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) ==
viewport_state = ralloc(mem_ctx, VkPipelineViewportStateCreateInfo); (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
if (!viewport_state) LVP_PIPELINE_DUP(dst->pTessellationState,
return VK_ERROR_OUT_OF_HOST_MEMORY; src->pTessellationState,
deep_copy_viewport_state(mem_ctx, src->pDynamicState, VkPipelineTessellationStateCreateInfo,
viewport_state, src->pViewportState); 1);
dst->pViewportState = viewport_state;
} else
dst->pViewportState = NULL;
/* pRasterizationState */
rasterization_state = ralloc(mem_ctx, VkPipelineRasterizationStateCreateInfo);
if (!rasterization_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
deep_copy_rasterization_state(mem_ctx, rasterization_state, src->pRasterizationState);
dst->pRasterizationState = rasterization_state;
/* pMultisampleState */
if (src->pMultisampleState && !rasterization_disabled) {
VkPipelineMultisampleStateCreateInfo* ms_state;
ms_state = ralloc_size(mem_ctx, sizeof(VkPipelineMultisampleStateCreateInfo) + sizeof(VkSampleMask));
if (!ms_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
/* does samplemask need deep copy? */
memcpy(ms_state, src->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
if (src->pMultisampleState->pSampleMask) {
VkSampleMask *sample_mask = (VkSampleMask *)(ms_state + 1);
sample_mask[0] = src->pMultisampleState->pSampleMask[0];
ms_state->pSampleMask = sample_mask;
} }
dst->pMultisampleState = ms_state;
} else
dst->pMultisampleState = NULL;
/* pDepthStencilState */ /* pViewportState */
if (src->pDepthStencilState && !rasterization_disabled && rasterization_disabled = !dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) &&
(rp_info->depthAttachmentFormat != VK_FORMAT_UNDEFINED || src->pRasterizationState->rasterizerDiscardEnable;
rp_info->stencilAttachmentFormat != VK_FORMAT_UNDEFINED)) { if (src->pViewportState && !rasterization_disabled) {
LVP_PIPELINE_DUP(dst->pDepthStencilState, VkPipelineViewportStateCreateInfo *viewport_state;
src->pDepthStencilState, viewport_state = ralloc(mem_ctx, VkPipelineViewportStateCreateInfo);
VkPipelineDepthStencilStateCreateInfo, if (!viewport_state)
1); return VK_ERROR_OUT_OF_HOST_MEMORY;
} else deep_copy_viewport_state(mem_ctx, src->pDynamicState,
dst->pDepthStencilState = NULL; viewport_state, src->pViewportState);
dst->pViewportState = viewport_state;
} else
dst->pViewportState = NULL;
bool uses_color_att = false; /* pRasterizationState */
for (unsigned i = 0; i < rp_info->colorAttachmentCount; i++) { rasterization_state = ralloc(mem_ctx, VkPipelineRasterizationStateCreateInfo);
if (rp_info->pColorAttachmentFormats[i] != VK_FORMAT_UNDEFINED) { if (!rasterization_state)
uses_color_att = true; return VK_ERROR_OUT_OF_HOST_MEMORY;
break; deep_copy_rasterization_state(mem_ctx, rasterization_state, src->pRasterizationState);
} dst->pRasterizationState = rasterization_state;
} }
/* pColorBlendState */ if (shaders & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) {
if (src->pColorBlendState && !rasterization_disabled && uses_color_att) { assert(rp_info);
VkPipelineColorBlendStateCreateInfo* cb_state; /* pDepthStencilState */
if (src->pDepthStencilState && !rasterization_disabled &&
(rp_info->depthAttachmentFormat != VK_FORMAT_UNDEFINED ||
rp_info->stencilAttachmentFormat != VK_FORMAT_UNDEFINED)) {
LVP_PIPELINE_DUP(dst->pDepthStencilState,
src->pDepthStencilState,
VkPipelineDepthStencilStateCreateInfo,
1);
} else
dst->pDepthStencilState = NULL;
}
cb_state = ralloc(mem_ctx, VkPipelineColorBlendStateCreateInfo); if (shaders & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT) {
if (!cb_state) assert(rp_info);
return VK_ERROR_OUT_OF_HOST_MEMORY; /* pMultisampleState */
deep_copy_color_blend_state(mem_ctx, cb_state, src->pColorBlendState); if (src->pMultisampleState && !rasterization_disabled) {
dst->pColorBlendState = cb_state; VkPipelineMultisampleStateCreateInfo* ms_state;
} else ms_state = ralloc_size(mem_ctx, sizeof(VkPipelineMultisampleStateCreateInfo) + sizeof(VkSampleMask));
dst->pColorBlendState = NULL; if (!ms_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
/* does samplemask need deep copy? */
memcpy(ms_state, src->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
if (src->pMultisampleState->pSampleMask) {
VkSampleMask *sample_mask = (VkSampleMask *)(ms_state + 1);
sample_mask[0] = src->pMultisampleState->pSampleMask[0];
ms_state->pSampleMask = sample_mask;
}
dst->pMultisampleState = ms_state;
} else
dst->pMultisampleState = NULL;
bool uses_color_att = false;
for (unsigned i = 0; i < rp_info->colorAttachmentCount; i++) {
if (rp_info->pColorAttachmentFormats[i] != VK_FORMAT_UNDEFINED) {
uses_color_att = true;
break;
}
}
/* pColorBlendState */
if (src->pColorBlendState && !rasterization_disabled && uses_color_att) {
VkPipelineColorBlendStateCreateInfo* cb_state;
cb_state = ralloc(mem_ctx, VkPipelineColorBlendStateCreateInfo);
if (!cb_state)
return VK_ERROR_OUT_OF_HOST_MEMORY;
deep_copy_color_blend_state(mem_ctx, cb_state, src->pColorBlendState);
dst->pColorBlendState = cb_state;
if (!dynamic_state_contains(src->pDynamicState, VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT)) {
const VkPipelineColorWriteCreateInfoEXT *cw_state =
vk_find_struct_const(src->pColorBlendState, PIPELINE_COLOR_WRITE_CREATE_INFO_EXT);
if (cw_state) {
assert(cw_state->attachmentCount <= src->pColorBlendState->attachmentCount);
for (unsigned i = 0; i < cw_state->attachmentCount; i++)
if (!cw_state->pColorWriteEnables[i]) {
VkPipelineColorBlendAttachmentState *att = (void*)&cb_state->pAttachments[i];
att->colorWriteMask = 0;
}
}
}
} else
dst->pColorBlendState = NULL;
}
if (src->pDynamicState) { if (src->pDynamicState) {
VkPipelineDynamicStateCreateInfo* dyn_state; VkPipelineDynamicStateCreateInfo* dyn_state;
/* pDynamicState */ /* pDynamicState */
dyn_state = ralloc(mem_ctx, VkPipelineDynamicStateCreateInfo); if (dst->pDynamicState) {
if (!dyn_state) dyn_state = (void*)dst->pDynamicState;
} else {
dyn_state = ralloc(mem_ctx, VkPipelineDynamicStateCreateInfo);
VkDynamicState *states = ralloc_array(mem_ctx, VkDynamicState, 37 /* current (1.3) number of dynamic states */);
dyn_state->pDynamicStates = states;
dyn_state->dynamicStateCount = 0;
}
if (!dyn_state || !dyn_state->pDynamicStates)
return VK_ERROR_OUT_OF_HOST_MEMORY; return VK_ERROR_OUT_OF_HOST_MEMORY;
deep_copy_dynamic_state(mem_ctx, dyn_state, src->pDynamicState); deep_copy_dynamic_state(mem_ctx, dyn_state, src->pDynamicState, shaders);
dst->pDynamicState = dyn_state; dst->pDynamicState = dyn_state;
} else } else
dst->pDynamicState = NULL; dst->pDynamicState = NULL;
@@ -669,16 +768,17 @@ optimize(nir_shader *nir)
static void static void
lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline, lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
struct vk_shader_module *module, uint32_t size,
const void *module,
const char *entrypoint_name, const char *entrypoint_name,
gl_shader_stage stage, gl_shader_stage stage,
const VkSpecializationInfo *spec_info) const VkSpecializationInfo *spec_info)
{ {
nir_shader *nir; nir_shader *nir;
const nir_shader_compiler_options *drv_options = pipeline->device->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage)); const nir_shader_compiler_options *drv_options = pipeline->device->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage));
uint32_t *spirv = (uint32_t *) module->data; const uint32_t *spirv = module;
assert(spirv[0] == SPIR_V_MAGIC_NUMBER); assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
assert(module->size % 4 == 0); assert(size % 4 == 0);
uint32_t num_spec_entries = 0; uint32_t num_spec_entries = 0;
struct nir_spirv_specialization *spec_entries = struct nir_spirv_specialization *spec_entries =
@@ -728,7 +828,7 @@ lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
.shared_addr_format = nir_address_format_32bit_offset, .shared_addr_format = nir_address_format_32bit_offset,
}; };
nir = spirv_to_nir(spirv, module->size / 4, nir = spirv_to_nir(spirv, size / 4,
spec_entries, num_spec_entries, spec_entries, num_spec_entries,
stage, entrypoint_name, &spirv_options, drv_options); stage, entrypoint_name, &spirv_options, drv_options);
@@ -977,6 +1077,79 @@ lvp_pipeline_compile(struct lvp_pipeline *pipeline,
return VK_SUCCESS; return VK_SUCCESS;
} }
#ifndef NDEBUG
static bool
layouts_equal(const struct lvp_descriptor_set_layout *a, const struct lvp_descriptor_set_layout *b)
{
const uint8_t *pa = (const uint8_t*)a, *pb = (const uint8_t*)b;
uint32_t hash_start_offset = offsetof(struct lvp_descriptor_set_layout, ref_cnt) + sizeof(uint32_t);
uint32_t binding_offset = offsetof(struct lvp_descriptor_set_layout, binding);
/* base equal */
if (memcmp(pa + hash_start_offset, pb + hash_start_offset, binding_offset - hash_start_offset))
return false;
/* bindings equal */
if (a->binding_count != b->binding_count)
return false;
size_t binding_size = a->binding_count * sizeof(struct lvp_descriptor_set_binding_layout);
const struct lvp_descriptor_set_binding_layout *la = a->binding;
const struct lvp_descriptor_set_binding_layout *lb = b->binding;
if (memcmp(la, lb, binding_size)) {
for (unsigned i = 0; i < a->binding_count; i++) {
if (memcmp(&la[i], &lb[i], offsetof(struct lvp_descriptor_set_binding_layout, immutable_samplers)))
return false;
}
}
/* immutable sampler equal */
if (a->immutable_sampler_count != b->immutable_sampler_count)
return false;
if (a->immutable_sampler_count) {
size_t sampler_size = a->immutable_sampler_count * sizeof(struct lvp_sampler *);
if (memcmp(pa + binding_offset + binding_size, pb + binding_offset + binding_size, sampler_size)) {
struct lvp_sampler **sa = (struct lvp_sampler **)(pa + binding_offset);
struct lvp_sampler **sb = (struct lvp_sampler **)(pb + binding_offset);
for (unsigned i = 0; i < a->immutable_sampler_count; i++) {
if (memcmp(sa[i], sb[i], sizeof(struct lvp_sampler)))
return false;
}
}
}
return true;
}
#endif
static void
merge_layouts(struct lvp_pipeline *dst, struct lvp_pipeline_layout *src)
{
if (!src)
return;
if (!dst->layout) {
/* no layout created yet: copy onto ralloc ctx allocation for auto-free */
dst->layout = ralloc(dst->mem_ctx, struct lvp_pipeline_layout);
memcpy(dst->layout, src, sizeof(struct lvp_pipeline_layout));
return;
}
#ifndef NDEBUG
/* verify that layouts match */
const struct lvp_pipeline_layout *smaller = dst->layout->num_sets < src->num_sets ? dst->layout : src;
const struct lvp_pipeline_layout *bigger = smaller == dst->layout ? src : dst->layout;
for (unsigned i = 0; i < smaller->num_sets; i++) {
assert(!smaller->set[i].layout || !bigger->set[i].layout ||
!smaller->set[i].layout->binding_count || !bigger->set[i].layout->binding_count ||
smaller->set[i].layout == bigger->set[i].layout ||
layouts_equal(smaller->set[i].layout, bigger->set[i].layout));
}
#endif
for (unsigned i = 0; i < src->num_sets; i++) {
if (!dst->layout->set[i].layout)
dst->layout->set[i].layout = src->set[i].layout;
}
dst->layout->num_sets = MAX2(dst->layout->num_sets, src->num_sets);
dst->layout->push_constant_size += src->push_constant_size;
dst->layout->push_constant_stages |= src->push_constant_stages;
}
static VkResult static VkResult
lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline, lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
struct lvp_device *device, struct lvp_device *device,
@@ -984,93 +1157,114 @@ lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
const VkGraphicsPipelineCreateInfo *pCreateInfo, const VkGraphicsPipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc) const VkAllocationCallbacks *alloc)
{ {
if (alloc == NULL) const VkGraphicsPipelineLibraryCreateInfoEXT *libinfo = vk_find_struct_const(pCreateInfo,
alloc = &device->vk.alloc; GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT);
pipeline->device = device; const VkPipelineLibraryCreateInfoKHR *libstate = vk_find_struct_const(pCreateInfo,
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout); PIPELINE_LIBRARY_CREATE_INFO_KHR);
lvp_pipeline_layout_ref(pipeline->layout); const VkGraphicsPipelineLibraryFlagsEXT layout_stages = VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT |
pipeline->force_min_sample = false; VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT;
if (libinfo)
pipeline->stages = libinfo->flags;
else if (!libstate)
pipeline->stages = VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT |
VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT |
VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT |
VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT;
pipeline->mem_ctx = ralloc_context(NULL); pipeline->mem_ctx = ralloc_context(NULL);
/* recreate createinfo */
deep_copy_graphics_create_info(pipeline->mem_ctx, &pipeline->graphics_create_info, pCreateInfo);
pipeline->is_compute_pipeline = false;
if (pCreateInfo->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) if (pCreateInfo->flags & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR)
pipeline->library = true; pipeline->library = true;
if (pipeline->graphics_create_info.pViewportState) { struct lvp_pipeline_layout *layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
/* if pViewportState is null, it means rasterization is discarded, if (layout)
* so this is ignored lvp_pipeline_layout_ref(layout);
*/
const VkPipelineViewportDepthClipControlCreateInfoEXT *ccontrol = vk_find_struct_const(pCreateInfo->pViewportState, if (!layout || !layout->independent_sets)
PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT); /* this is a regular pipeline with no partials: directly reuse */
if (ccontrol) pipeline->layout = layout;
pipeline->negative_one_to_one = !!ccontrol->negativeOneToOne; else if (pipeline->stages & layout_stages) {
if ((pipeline->stages & layout_stages) == layout_stages)
/* this has all the layout stages: directly reuse */
pipeline->layout = layout;
else {
/* this is a partial: copy for later merging to avoid modifying another layout */
merge_layouts(pipeline, layout);
}
} }
const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *pv_state = /* recreate createinfo */
vk_find_struct_const(pCreateInfo->pRasterizationState, if (!libstate || libinfo)
PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT); deep_copy_graphics_create_info(pipeline->mem_ctx, &pipeline->graphics_create_info, pCreateInfo, pipeline->stages);
pipeline->provoking_vertex_last = pv_state && pv_state->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT; if (libstate) {
for (unsigned i = 0; i < libstate->libraryCount; i++) {
const VkPipelineRasterizationLineStateCreateInfoEXT *line_state = LVP_FROM_HANDLE(lvp_pipeline, p, libstate->pLibraries[i]);
vk_find_struct_const(pCreateInfo->pRasterizationState, deep_copy_graphics_create_info(pipeline->mem_ctx, &pipeline->graphics_create_info, &p->graphics_create_info, p->stages);
PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT); if (p->stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
if (line_state) { pipeline->provoking_vertex_last = p->provoking_vertex_last;
/* always draw bresenham if !smooth */ pipeline->line_stipple_enable = p->line_stipple_enable;
pipeline->line_stipple_enable = line_state->stippledLineEnable; pipeline->line_smooth = p->line_smooth;
pipeline->line_smooth = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT; pipeline->disable_multisample = p->disable_multisample;
pipeline->disable_multisample = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT || pipeline->line_rectangular = p->line_rectangular;
line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT; pipeline->line_stipple_factor = p->line_stipple_factor;
pipeline->line_rectangular = line_state->lineRasterizationMode != VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT; pipeline->line_stipple_pattern = p->line_stipple_pattern;
if (pipeline->line_stipple_enable) { pipeline->negative_one_to_one = p->negative_one_to_one;
if (!dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_LINE_STIPPLE_EXT)) {
pipeline->line_stipple_factor = line_state->lineStippleFactor - 1;
pipeline->line_stipple_pattern = line_state->lineStipplePattern;
} else {
pipeline->line_stipple_factor = 0;
pipeline->line_stipple_pattern = UINT16_MAX;
} }
} if (p->stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT)
} else pipeline->force_min_sample = p->force_min_sample;
pipeline->line_rectangular = true; if (p->stages & layout_stages) {
if (!layout || layout->independent_sets)
bool rasterization_disabled = !dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) && merge_layouts(pipeline, p->layout);
pipeline->graphics_create_info.pRasterizationState->rasterizerDiscardEnable; }
if (!dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT) && pipeline->stages |= p->stages;
!rasterization_disabled) {
const VkPipelineColorWriteCreateInfoEXT *cw_state =
vk_find_struct_const(pCreateInfo->pColorBlendState, PIPELINE_COLOR_WRITE_CREATE_INFO_EXT);
if (cw_state) {
assert(cw_state->attachmentCount <= pipeline->graphics_create_info.pColorBlendState->attachmentCount);
for (unsigned i = 0; i < cw_state->attachmentCount; i++)
if (!cw_state->pColorWriteEnables[i]) {
VkPipelineColorBlendAttachmentState *att = (void*)&pipeline->graphics_create_info.pColorBlendState->pAttachments[i];
att->colorWriteMask = 0;
}
} }
} }
if (alloc == NULL)
alloc = &device->vk.alloc;
pipeline->device = device;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
VK_FROM_HANDLE(vk_shader_module, module, VK_FROM_HANDLE(vk_shader_module, module,
pCreateInfo->pStages[i].module); pCreateInfo->pStages[i].module);
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage); gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
lvp_shader_compile_to_ir(pipeline, module, if (stage == MESA_SHADER_FRAGMENT) {
pCreateInfo->pStages[i].pName, if (!(pipeline->stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT))
stage, continue;
pCreateInfo->pStages[i].pSpecializationInfo); } else {
if (!(pipeline->stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT))
continue;
}
if (module) {
lvp_shader_compile_to_ir(pipeline, module->size, module->data,
pCreateInfo->pStages[i].pName,
stage,
pCreateInfo->pStages[i].pSpecializationInfo);
} else {
const VkShaderModuleCreateInfo *info = vk_find_struct_const(pCreateInfo->pStages[i].pNext, SHADER_MODULE_CREATE_INFO);
assert(info);
lvp_shader_compile_to_ir(pipeline, info->codeSize, info->pCode,
pCreateInfo->pStages[i].pName,
stage,
pCreateInfo->pStages[i].pSpecializationInfo);
}
if (!pipeline->pipeline_nir[stage]) if (!pipeline->pipeline_nir[stage])
return VK_ERROR_FEATURE_NOT_PRESENT; return VK_ERROR_FEATURE_NOT_PRESENT;
}
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]) { switch (stage) {
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_qualifier || case MESA_SHADER_GEOMETRY:
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) || pipeline->gs_output_lines = pipeline->pipeline_nir[MESA_SHADER_GEOMETRY] &&
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS)) pipeline->pipeline_nir[MESA_SHADER_GEOMETRY]->info.gs.output_primitive == SHADER_PRIM_LINES;
pipeline->force_min_sample = true; break;
case MESA_SHADER_FRAGMENT:
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_qualifier ||
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS))
pipeline->force_min_sample = true;
break;
default: break;
}
} }
if (pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]) { if (pCreateInfo->stageCount && pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]) {
nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL); nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info); merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info);
const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state = const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state =
@@ -1079,29 +1273,81 @@ lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
if (!domain_origin_state || domain_origin_state->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT) if (!domain_origin_state || domain_origin_state->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT)
pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw; pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw;
} }
if (libstate) {
pipeline->gs_output_lines = pipeline->pipeline_nir[MESA_SHADER_GEOMETRY] && for (unsigned i = 0; i < libstate->libraryCount; i++) {
pipeline->pipeline_nir[MESA_SHADER_GEOMETRY]->info.gs.output_primitive == SHADER_PRIM_LINES; LVP_FROM_HANDLE(lvp_pipeline, p, libstate->pLibraries[i]);
if (p->stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT) {
if (p->pipeline_nir[MESA_SHADER_FRAGMENT])
bool has_fragment_shader = false; pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = nir_shader_clone(pipeline->mem_ctx, p->pipeline_nir[MESA_SHADER_FRAGMENT]);
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { }
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage); if (p->stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
lvp_pipeline_compile(pipeline, stage); for (unsigned j = MESA_SHADER_VERTEX; j < MESA_SHADER_FRAGMENT; j++) {
if (stage == MESA_SHADER_FRAGMENT) if (p->pipeline_nir[j])
has_fragment_shader = true; pipeline->pipeline_nir[j] = nir_shader_clone(pipeline->mem_ctx, p->pipeline_nir[j]);
}
}
}
} }
if (has_fragment_shader == false) { if (pipeline->stages & VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT) {
/* create a dummy fragment shader for this pipeline. */ if (pipeline->graphics_create_info.pViewportState) {
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, NULL, /* if pViewportState is null, it means rasterization is discarded,
"dummy_frag"); * so this is ignored
*/
const VkPipelineViewportDepthClipControlCreateInfoEXT *ccontrol = vk_find_struct_const(pCreateInfo->pViewportState,
PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT);
if (ccontrol)
pipeline->negative_one_to_one = !!ccontrol->negativeOneToOne;
}
pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader; const VkPipelineRasterizationProvokingVertexStateCreateInfoEXT *pv_state =
struct pipe_shader_state shstate = {0}; vk_find_struct_const(pCreateInfo->pRasterizationState,
shstate.type = PIPE_SHADER_IR_NIR; PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT);
shstate.ir.nir = nir_shader_clone(NULL, pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]); pipeline->provoking_vertex_last = pv_state && pv_state->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT;
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
const VkPipelineRasterizationLineStateCreateInfoEXT *line_state =
vk_find_struct_const(pCreateInfo->pRasterizationState,
PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT);
if (line_state) {
/* always draw bresenham if !smooth */
pipeline->line_stipple_enable = line_state->stippledLineEnable;
pipeline->line_smooth = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
pipeline->disable_multisample = line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT ||
line_state->lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
pipeline->line_rectangular = line_state->lineRasterizationMode != VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;
if (pipeline->line_stipple_enable) {
if (!dynamic_state_contains(pipeline->graphics_create_info.pDynamicState, VK_DYNAMIC_STATE_LINE_STIPPLE_EXT)) {
pipeline->line_stipple_factor = line_state->lineStippleFactor - 1;
pipeline->line_stipple_pattern = line_state->lineStipplePattern;
} else {
pipeline->line_stipple_factor = 0;
pipeline->line_stipple_pattern = UINT16_MAX;
}
}
} else
pipeline->line_rectangular = true;
}
if (!pipeline->library) {
bool has_fragment_shader = false;
for (uint32_t i = 0; i < pipeline->graphics_create_info.stageCount; i++) {
gl_shader_stage stage = lvp_shader_stage(pipeline->graphics_create_info.pStages[i].stage);
lvp_pipeline_compile(pipeline, stage);
if (stage == MESA_SHADER_FRAGMENT)
has_fragment_shader = true;
}
if (has_fragment_shader == false) {
/* create a dummy fragment shader for this pipeline. */
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, NULL,
"dummy_frag");
pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader;
struct pipe_shader_state shstate = {0};
shstate.type = PIPE_SHADER_IR_NIR;
shstate.ir.nir = nir_shader_clone(NULL, pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]);
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
}
} }
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -1202,7 +1448,7 @@ lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
&pipeline->compute_create_info, pCreateInfo); &pipeline->compute_create_info, pCreateInfo);
pipeline->is_compute_pipeline = true; pipeline->is_compute_pipeline = true;
lvp_shader_compile_to_ir(pipeline, module, lvp_shader_compile_to_ir(pipeline, module->size, module->data,
pCreateInfo->stage.pName, pCreateInfo->stage.pName,
MESA_SHADER_COMPUTE, MESA_SHADER_COMPUTE,
pCreateInfo->stage.pSpecializationInfo); pCreateInfo->stage.pSpecializationInfo);

View File

@@ -284,6 +284,7 @@ struct lvp_descriptor_set_layout {
/* Descriptor set layouts can be destroyed at almost any time */ /* Descriptor set layouts can be destroyed at almost any time */
uint32_t ref_cnt; uint32_t ref_cnt;
/* add new members after this */
uint32_t immutable_sampler_count; uint32_t immutable_sampler_count;
@@ -328,7 +329,9 @@ static inline void
lvp_descriptor_set_layout_unref(struct lvp_device *device, lvp_descriptor_set_layout_unref(struct lvp_device *device,
struct lvp_descriptor_set_layout *layout) struct lvp_descriptor_set_layout *layout)
{ {
assert(layout && layout->ref_cnt >= 1); if (!layout)
return;
assert(layout->ref_cnt >= 1);
if (p_atomic_dec_zero(&layout->ref_cnt)) if (p_atomic_dec_zero(&layout->ref_cnt))
lvp_descriptor_set_layout_destroy(device, layout); lvp_descriptor_set_layout_destroy(device, layout);
} }
@@ -406,6 +409,7 @@ struct lvp_pipeline_layout {
uint16_t uniform_block_count; uint16_t uniform_block_count;
uint16_t uniform_block_sizes[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS]; uint16_t uniform_block_sizes[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS];
} stage[MESA_SHADER_STAGES]; } stage[MESA_SHADER_STAGES];
bool independent_sets;
}; };
void lvp_pipeline_layout_destroy(struct lvp_device *device, void lvp_pipeline_layout_destroy(struct lvp_device *device,
@@ -447,6 +451,7 @@ struct lvp_pipeline {
void *shader_cso[PIPE_SHADER_TYPES]; void *shader_cso[PIPE_SHADER_TYPES];
VkGraphicsPipelineCreateInfo graphics_create_info; VkGraphicsPipelineCreateInfo graphics_create_info;
VkComputePipelineCreateInfo compute_create_info; VkComputePipelineCreateInfo compute_create_info;
VkGraphicsPipelineLibraryFlagsEXT stages;
uint32_t line_stipple_factor; uint32_t line_stipple_factor;
uint16_t line_stipple_pattern; uint16_t line_stipple_pattern;
bool line_stipple_enable; bool line_stipple_enable;