2020-06-19 16:40:27 +10:00
|
|
|
/*
|
|
|
|
* Copyright © 2019 Red Hat.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2020-09-30 05:29:04 +10:00
|
|
|
#include "lvp_private.h"
|
2020-12-04 20:21:51 +01:00
|
|
|
#include "vk_util.h"
|
2020-06-19 16:40:27 +10:00
|
|
|
#include "glsl_types.h"
|
|
|
|
#include "spirv/nir_spirv.h"
|
|
|
|
#include "nir/nir_builder.h"
|
2020-09-30 05:29:04 +10:00
|
|
|
#include "lvp_lower_vulkan_resource.h"
|
2020-06-19 16:40:27 +10:00
|
|
|
#include "pipe/p_state.h"
|
|
|
|
#include "pipe/p_context.h"
|
2020-12-08 14:48:44 +10:00
|
|
|
#include "nir/nir_xfb_info.h"
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
#define SPIR_V_MAGIC_NUMBER 0x07230203
|
|
|
|
|
2020-12-08 09:34:14 +10:00
|
|
|
#define LVP_PIPELINE_DUP(dst, src, type, count) do { \
|
|
|
|
type *temp = ralloc_array(mem_ctx, type, count); \
|
|
|
|
if (!temp) return VK_ERROR_OUT_OF_HOST_MEMORY; \
|
|
|
|
memcpy(temp, (src), sizeof(type) * count); \
|
|
|
|
dst = temp; \
|
|
|
|
} while(0)
|
|
|
|
|
2020-10-18 13:08:21 +02:00
|
|
|
VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
|
2020-06-19 16:40:27 +10:00
|
|
|
VkDevice _device,
|
|
|
|
VkPipeline _pipeline,
|
|
|
|
const VkAllocationCallbacks* pAllocator)
|
|
|
|
{
|
2020-09-30 05:29:04 +10:00
|
|
|
LVP_FROM_HANDLE(lvp_device, device, _device);
|
|
|
|
LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
if (!_pipeline)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (pipeline->shader_cso[PIPE_SHADER_VERTEX])
|
|
|
|
device->queue.ctx->delete_vs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
|
|
|
|
if (pipeline->shader_cso[PIPE_SHADER_FRAGMENT])
|
|
|
|
device->queue.ctx->delete_fs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
|
|
|
|
if (pipeline->shader_cso[PIPE_SHADER_GEOMETRY])
|
|
|
|
device->queue.ctx->delete_gs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
|
|
|
|
if (pipeline->shader_cso[PIPE_SHADER_TESS_CTRL])
|
|
|
|
device->queue.ctx->delete_tcs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
|
|
|
|
if (pipeline->shader_cso[PIPE_SHADER_TESS_EVAL])
|
|
|
|
device->queue.ctx->delete_tes_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
|
|
|
|
if (pipeline->shader_cso[PIPE_SHADER_COMPUTE])
|
|
|
|
device->queue.ctx->delete_compute_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
|
|
|
|
|
2020-12-08 08:31:58 +10:00
|
|
|
ralloc_free(pipeline->mem_ctx);
|
2020-06-19 16:40:27 +10:00
|
|
|
vk_object_base_finish(&pipeline->base);
|
2020-11-16 08:55:16 +10:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, pipeline);
|
2020-06-19 16:40:27 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_shader_stage(void *mem_ctx,
|
|
|
|
struct VkPipelineShaderStageCreateInfo *dst,
|
2020-06-19 16:40:27 +10:00
|
|
|
const struct VkPipelineShaderStageCreateInfo *src)
|
|
|
|
{
|
|
|
|
dst->sType = src->sType;
|
|
|
|
dst->pNext = NULL;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->stage = src->stage;
|
|
|
|
dst->module = src->module;
|
|
|
|
dst->pName = src->pName;
|
|
|
|
dst->pSpecializationInfo = NULL;
|
|
|
|
if (src->pSpecializationInfo) {
|
|
|
|
const VkSpecializationInfo *src_spec = src->pSpecializationInfo;
|
2020-12-08 08:31:58 +10:00
|
|
|
VkSpecializationInfo *dst_spec = ralloc_size(mem_ctx, sizeof(VkSpecializationInfo) +
|
|
|
|
src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry) +
|
|
|
|
src_spec->dataSize);
|
2020-06-19 16:40:27 +10:00
|
|
|
VkSpecializationMapEntry *maps = (VkSpecializationMapEntry *)(dst_spec + 1);
|
|
|
|
dst_spec->pMapEntries = maps;
|
|
|
|
void *pdata = (void *)(dst_spec->pMapEntries + src_spec->mapEntryCount);
|
|
|
|
dst_spec->pData = pdata;
|
|
|
|
|
|
|
|
|
|
|
|
dst_spec->mapEntryCount = src_spec->mapEntryCount;
|
|
|
|
dst_spec->dataSize = src_spec->dataSize;
|
|
|
|
memcpy(pdata, src_spec->pData, src->pSpecializationInfo->dataSize);
|
|
|
|
memcpy(maps, src_spec->pMapEntries, src_spec->mapEntryCount * sizeof(VkSpecializationMapEntry));
|
|
|
|
dst->pSpecializationInfo = dst_spec;
|
|
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_vertex_input_state(void *mem_ctx,
|
|
|
|
struct VkPipelineVertexInputStateCreateInfo *dst,
|
2020-06-19 16:40:27 +10:00
|
|
|
const struct VkPipelineVertexInputStateCreateInfo *src)
|
|
|
|
{
|
|
|
|
dst->sType = src->sType;
|
|
|
|
dst->pNext = NULL;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->vertexBindingDescriptionCount = src->vertexBindingDescriptionCount;
|
|
|
|
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pVertexBindingDescriptions,
|
|
|
|
src->pVertexBindingDescriptions,
|
|
|
|
VkVertexInputBindingDescription,
|
|
|
|
src->vertexBindingDescriptionCount);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
dst->vertexAttributeDescriptionCount = src->vertexAttributeDescriptionCount;
|
|
|
|
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pVertexAttributeDescriptions,
|
|
|
|
src->pVertexAttributeDescriptions,
|
|
|
|
VkVertexInputAttributeDescription,
|
|
|
|
src->vertexAttributeDescriptionCount);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
2020-12-04 20:21:51 +01:00
|
|
|
if (src->pNext) {
|
|
|
|
vk_foreach_struct(ext, src->pNext) {
|
|
|
|
switch (ext->sType) {
|
|
|
|
case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: {
|
|
|
|
VkPipelineVertexInputDivisorStateCreateInfoEXT *ext_src = (VkPipelineVertexInputDivisorStateCreateInfoEXT *)ext;;
|
|
|
|
VkPipelineVertexInputDivisorStateCreateInfoEXT *ext_dst = ralloc(mem_ctx, VkPipelineVertexInputDivisorStateCreateInfoEXT);
|
|
|
|
|
|
|
|
ext_dst->sType = ext_src->sType;
|
|
|
|
ext_dst->vertexBindingDivisorCount = ext_src->vertexBindingDivisorCount;
|
|
|
|
|
|
|
|
LVP_PIPELINE_DUP(ext_dst->pVertexBindingDivisors,
|
|
|
|
ext_src->pVertexBindingDivisors,
|
|
|
|
VkVertexInputBindingDivisorDescriptionEXT,
|
|
|
|
ext_src->vertexBindingDivisorCount);
|
|
|
|
|
|
|
|
dst->pNext = ext_dst;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-06-19 16:40:27 +10:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_viewport_state(void *mem_ctx,
|
|
|
|
VkPipelineViewportStateCreateInfo *dst,
|
2020-06-19 16:40:27 +10:00
|
|
|
const VkPipelineViewportStateCreateInfo *src)
|
|
|
|
{
|
|
|
|
dst->sType = src->sType;
|
2020-12-08 09:35:01 +10:00
|
|
|
dst->pNext = NULL;
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->flags = src->flags;
|
|
|
|
|
|
|
|
if (src->pViewports) {
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pViewports,
|
|
|
|
src->pViewports,
|
|
|
|
VkViewport,
|
|
|
|
src->viewportCount);
|
2020-06-19 16:40:27 +10:00
|
|
|
} else
|
|
|
|
dst->pViewports = NULL;
|
|
|
|
dst->viewportCount = src->viewportCount;
|
|
|
|
|
|
|
|
if (src->pScissors) {
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pScissors,
|
|
|
|
src->pScissors,
|
|
|
|
VkRect2D,
|
2021-03-05 13:16:31 +10:00
|
|
|
src->scissorCount);
|
2020-06-19 16:40:27 +10:00
|
|
|
} else
|
|
|
|
dst->pScissors = NULL;
|
|
|
|
dst->scissorCount = src->scissorCount;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_color_blend_state(void *mem_ctx,
|
|
|
|
VkPipelineColorBlendStateCreateInfo *dst,
|
2020-06-19 16:40:27 +10:00
|
|
|
const VkPipelineColorBlendStateCreateInfo *src)
|
|
|
|
{
|
|
|
|
dst->sType = src->sType;
|
2020-12-08 09:35:01 +10:00
|
|
|
dst->pNext = NULL;
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->logicOpEnable = src->logicOpEnable;
|
|
|
|
dst->logicOp = src->logicOp;
|
|
|
|
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pAttachments,
|
|
|
|
src->pAttachments,
|
|
|
|
VkPipelineColorBlendAttachmentState,
|
|
|
|
src->attachmentCount);
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->attachmentCount = src->attachmentCount;
|
|
|
|
|
|
|
|
memcpy(&dst->blendConstants, &src->blendConstants, sizeof(float) * 4);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_dynamic_state(void *mem_ctx,
|
|
|
|
VkPipelineDynamicStateCreateInfo *dst,
|
2020-06-19 16:40:27 +10:00
|
|
|
const VkPipelineDynamicStateCreateInfo *src)
|
|
|
|
{
|
|
|
|
dst->sType = src->sType;
|
2020-12-08 09:35:01 +10:00
|
|
|
dst->pNext = NULL;
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->flags = src->flags;
|
|
|
|
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pDynamicStates,
|
|
|
|
src->pDynamicStates,
|
|
|
|
VkDynamicState,
|
|
|
|
src->dynamicStateCount);
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->dynamicStateCount = src->dynamicStateCount;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_graphics_create_info(void *mem_ctx,
|
|
|
|
VkGraphicsPipelineCreateInfo *dst,
|
2020-06-19 16:40:27 +10:00
|
|
|
const VkGraphicsPipelineCreateInfo *src)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
VkResult result;
|
|
|
|
VkPipelineShaderStageCreateInfo *stages;
|
|
|
|
VkPipelineVertexInputStateCreateInfo *vertex_input;
|
|
|
|
|
|
|
|
dst->sType = src->sType;
|
|
|
|
dst->pNext = NULL;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->layout = src->layout;
|
|
|
|
dst->renderPass = src->renderPass;
|
|
|
|
dst->subpass = src->subpass;
|
|
|
|
dst->basePipelineHandle = src->basePipelineHandle;
|
|
|
|
dst->basePipelineIndex = src->basePipelineIndex;
|
|
|
|
|
|
|
|
/* pStages */
|
|
|
|
dst->stageCount = src->stageCount;
|
2020-12-08 08:31:58 +10:00
|
|
|
stages = ralloc_array(mem_ctx, VkPipelineShaderStageCreateInfo, dst->stageCount);
|
2020-06-19 16:40:27 +10:00
|
|
|
for (i = 0 ; i < dst->stageCount; i++) {
|
2020-12-08 08:31:58 +10:00
|
|
|
result = deep_copy_shader_stage(mem_ctx, &stages[i], &src->pStages[i]);
|
2020-06-19 16:40:27 +10:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
dst->pStages = stages;
|
|
|
|
|
|
|
|
/* pVertexInputState */
|
2020-12-08 08:31:58 +10:00
|
|
|
vertex_input = ralloc(mem_ctx, VkPipelineVertexInputStateCreateInfo);
|
|
|
|
result = deep_copy_vertex_input_state(mem_ctx, vertex_input,
|
2020-06-19 16:40:27 +10:00
|
|
|
src->pVertexInputState);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
dst->pVertexInputState = vertex_input;
|
|
|
|
|
|
|
|
/* pInputAssemblyState */
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pInputAssemblyState,
|
|
|
|
src->pInputAssemblyState,
|
|
|
|
VkPipelineInputAssemblyStateCreateInfo,
|
|
|
|
1);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
/* pTessellationState */
|
|
|
|
if (src->pTessellationState) {
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pTessellationState,
|
|
|
|
src->pTessellationState,
|
|
|
|
VkPipelineTessellationStateCreateInfo,
|
|
|
|
1);
|
2020-06-19 16:40:27 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
/* pViewportState */
|
|
|
|
if (src->pViewportState) {
|
|
|
|
VkPipelineViewportStateCreateInfo *viewport_state;
|
2020-12-08 08:31:58 +10:00
|
|
|
viewport_state = ralloc(mem_ctx, VkPipelineViewportStateCreateInfo);
|
2020-06-19 16:40:27 +10:00
|
|
|
if (!viewport_state)
|
|
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_viewport_state(mem_ctx, viewport_state, src->pViewportState);
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->pViewportState = viewport_state;
|
|
|
|
} else
|
|
|
|
dst->pViewportState = NULL;
|
|
|
|
|
|
|
|
/* pRasterizationState */
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pRasterizationState,
|
|
|
|
src->pRasterizationState,
|
|
|
|
VkPipelineRasterizationStateCreateInfo,
|
|
|
|
1);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
/* pMultisampleState */
|
|
|
|
if (src->pMultisampleState) {
|
|
|
|
VkPipelineMultisampleStateCreateInfo* ms_state;
|
2020-12-08 08:31:58 +10:00
|
|
|
ms_state = ralloc_size(mem_ctx, sizeof(VkPipelineMultisampleStateCreateInfo) + sizeof(VkSampleMask));
|
2020-06-19 16:40:27 +10:00
|
|
|
if (!ms_state)
|
|
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
|
|
/* does samplemask need deep copy? */
|
|
|
|
memcpy(ms_state, src->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
|
|
|
|
if (src->pMultisampleState->pSampleMask) {
|
|
|
|
VkSampleMask *sample_mask = (VkSampleMask *)(ms_state + 1);
|
|
|
|
sample_mask[0] = src->pMultisampleState->pSampleMask[0];
|
|
|
|
ms_state->pSampleMask = sample_mask;
|
|
|
|
}
|
|
|
|
dst->pMultisampleState = ms_state;
|
|
|
|
} else
|
|
|
|
dst->pMultisampleState = NULL;
|
|
|
|
|
|
|
|
/* pDepthStencilState */
|
|
|
|
if (src->pDepthStencilState) {
|
2020-12-08 09:34:14 +10:00
|
|
|
LVP_PIPELINE_DUP(dst->pDepthStencilState,
|
|
|
|
src->pDepthStencilState,
|
|
|
|
VkPipelineDepthStencilStateCreateInfo,
|
|
|
|
1);
|
2020-06-19 16:40:27 +10:00
|
|
|
} else
|
|
|
|
dst->pDepthStencilState = NULL;
|
|
|
|
|
|
|
|
/* pColorBlendState */
|
|
|
|
if (src->pColorBlendState) {
|
|
|
|
VkPipelineColorBlendStateCreateInfo* cb_state;
|
|
|
|
|
2020-12-08 08:31:58 +10:00
|
|
|
cb_state = ralloc(mem_ctx, VkPipelineColorBlendStateCreateInfo);
|
2020-06-19 16:40:27 +10:00
|
|
|
if (!cb_state)
|
|
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_color_blend_state(mem_ctx, cb_state, src->pColorBlendState);
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->pColorBlendState = cb_state;
|
|
|
|
} else
|
|
|
|
dst->pColorBlendState = NULL;
|
|
|
|
|
|
|
|
if (src->pDynamicState) {
|
|
|
|
VkPipelineDynamicStateCreateInfo* dyn_state;
|
|
|
|
|
|
|
|
/* pDynamicState */
|
2020-12-08 08:31:58 +10:00
|
|
|
dyn_state = ralloc(mem_ctx, VkPipelineDynamicStateCreateInfo);
|
2020-06-19 16:40:27 +10:00
|
|
|
if (!dyn_state)
|
|
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_dynamic_state(mem_ctx, dyn_state, src->pDynamicState);
|
2020-06-19 16:40:27 +10:00
|
|
|
dst->pDynamicState = dyn_state;
|
|
|
|
} else
|
|
|
|
dst->pDynamicState = NULL;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_compute_create_info(void *mem_ctx,
|
|
|
|
VkComputePipelineCreateInfo *dst,
|
2020-06-19 16:40:27 +10:00
|
|
|
const VkComputePipelineCreateInfo *src)
|
|
|
|
{
|
|
|
|
VkResult result;
|
|
|
|
dst->sType = src->sType;
|
|
|
|
dst->pNext = NULL;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->layout = src->layout;
|
|
|
|
dst->basePipelineHandle = src->basePipelineHandle;
|
|
|
|
dst->basePipelineIndex = src->basePipelineIndex;
|
|
|
|
|
2020-12-08 08:31:58 +10:00
|
|
|
result = deep_copy_shader_stage(mem_ctx, &dst->stage, &src->stage);
|
2020-06-19 16:40:27 +10:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned
|
|
|
|
st_shader_stage_to_ptarget(gl_shader_stage stage)
|
|
|
|
{
|
|
|
|
switch (stage) {
|
|
|
|
case MESA_SHADER_VERTEX:
|
|
|
|
return PIPE_SHADER_VERTEX;
|
|
|
|
case MESA_SHADER_FRAGMENT:
|
|
|
|
return PIPE_SHADER_FRAGMENT;
|
|
|
|
case MESA_SHADER_GEOMETRY:
|
|
|
|
return PIPE_SHADER_GEOMETRY;
|
|
|
|
case MESA_SHADER_TESS_CTRL:
|
|
|
|
return PIPE_SHADER_TESS_CTRL;
|
|
|
|
case MESA_SHADER_TESS_EVAL:
|
|
|
|
return PIPE_SHADER_TESS_EVAL;
|
|
|
|
case MESA_SHADER_COMPUTE:
|
|
|
|
return PIPE_SHADER_COMPUTE;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!"should not be reached");
|
|
|
|
return PIPE_SHADER_VERTEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
|
|
|
|
{
|
|
|
|
assert(glsl_type_is_vector_or_scalar(type));
|
|
|
|
|
|
|
|
uint32_t comp_size = glsl_type_is_boolean(type)
|
|
|
|
? 4 : glsl_get_bit_size(type) / 8;
|
|
|
|
unsigned length = glsl_get_vector_elements(type);
|
|
|
|
*size = comp_size * length,
|
|
|
|
*align = comp_size;
|
|
|
|
}
|
|
|
|
|
2021-02-17 17:10:28 -08:00
|
|
|
#define OPT(pass, ...) do { \
|
|
|
|
bool this_progress = false; \
|
|
|
|
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
|
|
|
|
progress |= this_progress; \
|
|
|
|
} while(0)
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
static void
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_shader_compile_to_ir(struct lvp_pipeline *pipeline,
|
2021-03-10 17:49:28 -05:00
|
|
|
struct vk_shader_module *module,
|
2020-06-19 16:40:27 +10:00
|
|
|
const char *entrypoint_name,
|
|
|
|
gl_shader_stage stage,
|
|
|
|
const VkSpecializationInfo *spec_info)
|
|
|
|
{
|
|
|
|
nir_shader *nir;
|
|
|
|
const nir_shader_compiler_options *drv_options = pipeline->device->pscreen->get_compiler_options(pipeline->device->pscreen, PIPE_SHADER_IR_NIR, st_shader_stage_to_ptarget(stage));
|
|
|
|
bool progress;
|
|
|
|
uint32_t *spirv = (uint32_t *) module->data;
|
|
|
|
assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
|
|
|
|
assert(module->size % 4 == 0);
|
|
|
|
|
|
|
|
uint32_t num_spec_entries = 0;
|
|
|
|
struct nir_spirv_specialization *spec_entries = NULL;
|
|
|
|
if (spec_info && spec_info->mapEntryCount > 0) {
|
|
|
|
num_spec_entries = spec_info->mapEntryCount;
|
|
|
|
spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
|
|
|
|
for (uint32_t i = 0; i < num_spec_entries; i++) {
|
|
|
|
VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
|
|
|
|
const void *data =
|
2021-02-17 17:06:39 -08:00
|
|
|
(char *)spec_info->pData + entry.offset;
|
|
|
|
assert((const char *)((char *)data + entry.size) <=
|
|
|
|
(char *)spec_info->pData + spec_info->dataSize);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
spec_entries[i].id = entry.constantID;
|
|
|
|
switch (entry.size) {
|
|
|
|
case 8:
|
|
|
|
spec_entries[i].value.u64 = *(const uint64_t *)data;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
spec_entries[i].value.u32 = *(const uint32_t *)data;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
spec_entries[i].value.u16 = *(const uint16_t *)data;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
spec_entries[i].value.u8 = *(const uint8_t *)data;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(!"Invalid spec constant size");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-30 05:29:04 +10:00
|
|
|
struct lvp_device *pdevice = pipeline->device;
|
2020-06-19 16:40:27 +10:00
|
|
|
const struct spirv_to_nir_options spirv_options = {
|
|
|
|
.environment = NIR_SPIRV_VULKAN,
|
|
|
|
.caps = {
|
|
|
|
.float64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_DOUBLES) == 1),
|
|
|
|
.int16 = true,
|
|
|
|
.int64 = (pdevice->pscreen->get_param(pdevice->pscreen, PIPE_CAP_INT64) == 1),
|
|
|
|
.tessellation = true,
|
|
|
|
.image_ms_array = true,
|
2020-12-21 11:08:11 -08:00
|
|
|
.image_read_without_format = true,
|
|
|
|
.image_write_without_format = true,
|
2020-06-19 16:40:27 +10:00
|
|
|
.storage_image_ms = true,
|
|
|
|
.geometry_streams = true,
|
2021-03-17 11:23:26 +10:00
|
|
|
.storage_8bit = true,
|
2020-06-19 16:40:27 +10:00
|
|
|
.storage_16bit = true,
|
|
|
|
.variable_pointers = true,
|
2020-11-23 17:28:07 +10:00
|
|
|
.stencil_export = true,
|
2020-11-24 05:43:48 +10:00
|
|
|
.post_depth_coverage = true,
|
2020-12-08 14:48:44 +10:00
|
|
|
.transform_feedback = true,
|
|
|
|
.geometry_streams = true,
|
2020-12-18 12:29:59 +10:00
|
|
|
.device_group = true,
|
2020-12-24 09:51:23 +10:00
|
|
|
.draw_parameters = true,
|
2021-03-04 16:31:01 +10:00
|
|
|
.shader_viewport_index_layer = true,
|
2021-03-05 03:53:10 +10:00
|
|
|
.multiview = true,
|
2021-03-16 11:15:47 +10:00
|
|
|
.physical_storage_buffer_address = true,
|
2021-03-15 11:44:17 +10:00
|
|
|
.int64_atomics = true,
|
2020-06-19 16:40:27 +10:00
|
|
|
},
|
|
|
|
.ubo_addr_format = nir_address_format_32bit_index_offset,
|
|
|
|
.ssbo_addr_format = nir_address_format_32bit_index_offset,
|
|
|
|
.phys_ssbo_addr_format = nir_address_format_64bit_global,
|
|
|
|
.push_const_addr_format = nir_address_format_logical,
|
|
|
|
.shared_addr_format = nir_address_format_32bit_offset,
|
|
|
|
.frag_coord_is_sysval = false,
|
|
|
|
};
|
|
|
|
|
|
|
|
nir = spirv_to_nir(spirv, module->size / 4,
|
|
|
|
spec_entries, num_spec_entries,
|
|
|
|
stage, entrypoint_name, &spirv_options, drv_options);
|
|
|
|
|
2021-03-31 10:25:13 +10:00
|
|
|
if (!nir) {
|
|
|
|
free(spec_entries);
|
|
|
|
return;
|
|
|
|
}
|
2020-06-19 16:40:27 +10:00
|
|
|
nir_validate_shader(nir, NULL);
|
|
|
|
|
|
|
|
free(spec_entries);
|
|
|
|
|
|
|
|
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
|
|
|
|
NIR_PASS_V(nir, nir_lower_returns);
|
|
|
|
NIR_PASS_V(nir, nir_inline_functions);
|
|
|
|
NIR_PASS_V(nir, nir_copy_prop);
|
|
|
|
NIR_PASS_V(nir, nir_opt_deref);
|
|
|
|
|
|
|
|
/* Pick off the single entrypoint that we want */
|
|
|
|
foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
|
|
|
|
if (!func->is_entrypoint)
|
|
|
|
exec_node_remove(&func->node);
|
|
|
|
}
|
|
|
|
assert(exec_list_length(&nir->functions) == 1);
|
|
|
|
|
|
|
|
NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
|
|
|
|
NIR_PASS_V(nir, nir_split_var_copies);
|
|
|
|
NIR_PASS_V(nir, nir_split_per_member_structs);
|
|
|
|
|
|
|
|
NIR_PASS_V(nir, nir_remove_dead_variables,
|
|
|
|
nir_var_shader_in | nir_var_shader_out | nir_var_system_value, NULL);
|
|
|
|
|
|
|
|
if (stage == MESA_SHADER_FRAGMENT)
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_lower_input_attachments(nir, false);
|
2020-06-19 16:40:27 +10:00
|
|
|
NIR_PASS_V(nir, nir_lower_system_values);
|
2020-08-21 10:40:45 -07:00
|
|
|
NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
|
|
|
|
nir_remove_dead_variables(nir, nir_var_uniform, NULL);
|
|
|
|
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_lower_pipeline_layout(pipeline->device, pipeline->layout, nir);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
|
|
|
|
NIR_PASS_V(nir, nir_split_var_copies);
|
|
|
|
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
|
|
|
|
|
2020-05-27 17:09:33 -05:00
|
|
|
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
|
|
|
|
nir_address_format_32bit_offset);
|
|
|
|
|
2020-09-21 17:34:23 -05:00
|
|
|
NIR_PASS_V(nir, nir_lower_explicit_io,
|
|
|
|
nir_var_mem_ubo | nir_var_mem_ssbo,
|
|
|
|
nir_address_format_32bit_index_offset);
|
|
|
|
|
2021-03-16 11:15:47 +10:00
|
|
|
NIR_PASS_V(nir, nir_lower_explicit_io,
|
|
|
|
nir_var_mem_global,
|
|
|
|
nir_address_format_64bit_global);
|
|
|
|
|
2020-06-19 16:40:27 +10:00
|
|
|
if (nir->info.stage == MESA_SHADER_COMPUTE) {
|
|
|
|
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
|
|
|
|
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);
|
|
|
|
|
|
|
|
if (nir->info.stage == MESA_SHADER_VERTEX ||
|
|
|
|
nir->info.stage == MESA_SHADER_GEOMETRY) {
|
|
|
|
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
|
|
|
|
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
|
|
|
|
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
progress = false;
|
|
|
|
|
2021-02-17 17:10:28 -08:00
|
|
|
OPT(nir_lower_flrp, 32|64, true);
|
|
|
|
OPT(nir_split_array_vars, nir_var_function_temp);
|
|
|
|
OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
|
|
|
|
OPT(nir_opt_deref);
|
|
|
|
OPT(nir_lower_vars_to_ssa);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
progress |= nir_copy_prop(nir);
|
|
|
|
progress |= nir_opt_dce(nir);
|
|
|
|
progress |= nir_opt_dead_cf(nir);
|
|
|
|
progress |= nir_opt_cse(nir);
|
|
|
|
progress |= nir_opt_algebraic(nir);
|
|
|
|
progress |= nir_opt_constant_folding(nir);
|
|
|
|
progress |= nir_opt_undef(nir);
|
|
|
|
|
|
|
|
progress |= nir_opt_deref(nir);
|
|
|
|
progress |= nir_lower_alu_to_scalar(nir, NULL, NULL);
|
|
|
|
} while (progress);
|
|
|
|
|
|
|
|
nir_lower_var_copies(nir);
|
|
|
|
nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
|
|
|
|
|
|
|
|
nir_validate_shader(nir, NULL);
|
|
|
|
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
|
|
|
|
|
|
|
|
if (nir->info.stage != MESA_SHADER_VERTEX)
|
|
|
|
nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, nir->info.stage);
|
|
|
|
else {
|
|
|
|
nir->num_inputs = util_last_bit64(nir->info.inputs_read);
|
|
|
|
nir_foreach_shader_in_variable(var, nir) {
|
|
|
|
var->data.driver_location = var->data.location - VERT_ATTRIB_GENERIC0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs,
|
|
|
|
nir->info.stage);
|
|
|
|
pipeline->pipeline_nir[stage] = nir;
|
|
|
|
}
|
|
|
|
|
2020-09-30 05:29:04 +10:00
|
|
|
static void fill_shader_prog(struct pipe_shader_state *state, gl_shader_stage stage, struct lvp_pipeline *pipeline)
|
2020-06-19 16:40:27 +10:00
|
|
|
{
|
|
|
|
state->type = PIPE_SHADER_IR_NIR;
|
|
|
|
state->ir.nir = pipeline->pipeline_nir[stage];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
merge_tess_info(struct shader_info *tes_info,
|
|
|
|
const struct shader_info *tcs_info)
|
|
|
|
{
|
|
|
|
/* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
|
|
|
|
*
|
|
|
|
* "PointMode. Controls generation of points rather than triangles
|
|
|
|
* or lines. This functionality defaults to disabled, and is
|
|
|
|
* enabled if either shader stage includes the execution mode.
|
|
|
|
*
|
|
|
|
* and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
|
|
|
|
* PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
|
|
|
|
* and OutputVertices, it says:
|
|
|
|
*
|
|
|
|
* "One mode must be set in at least one of the tessellation
|
|
|
|
* shader stages."
|
|
|
|
*
|
|
|
|
* So, the fields can be set in either the TCS or TES, but they must
|
|
|
|
* agree if set in both. Our backend looks at TES, so bitwise-or in
|
|
|
|
* the values from the TCS.
|
|
|
|
*/
|
|
|
|
assert(tcs_info->tess.tcs_vertices_out == 0 ||
|
|
|
|
tes_info->tess.tcs_vertices_out == 0 ||
|
|
|
|
tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
|
|
|
|
tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
|
|
|
|
|
|
|
|
assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
|
|
|
|
tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
|
|
|
|
tcs_info->tess.spacing == tes_info->tess.spacing);
|
|
|
|
tes_info->tess.spacing |= tcs_info->tess.spacing;
|
|
|
|
|
|
|
|
assert(tcs_info->tess.primitive_mode == 0 ||
|
|
|
|
tes_info->tess.primitive_mode == 0 ||
|
|
|
|
tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
|
|
|
|
tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
|
|
|
|
tes_info->tess.ccw |= tcs_info->tess.ccw;
|
|
|
|
tes_info->tess.point_mode |= tcs_info->tess.point_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gl_shader_stage
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_shader_stage(VkShaderStageFlagBits stage)
|
2020-06-19 16:40:27 +10:00
|
|
|
{
|
|
|
|
switch (stage) {
|
|
|
|
case VK_SHADER_STAGE_VERTEX_BIT:
|
|
|
|
return MESA_SHADER_VERTEX;
|
|
|
|
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
|
|
|
|
return MESA_SHADER_TESS_CTRL;
|
|
|
|
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
|
|
|
|
return MESA_SHADER_TESS_EVAL;
|
|
|
|
case VK_SHADER_STAGE_GEOMETRY_BIT:
|
|
|
|
return MESA_SHADER_GEOMETRY;
|
|
|
|
case VK_SHADER_STAGE_FRAGMENT_BIT:
|
|
|
|
return MESA_SHADER_FRAGMENT;
|
|
|
|
case VK_SHADER_STAGE_COMPUTE_BIT:
|
|
|
|
return MESA_SHADER_COMPUTE;
|
|
|
|
default:
|
|
|
|
unreachable("invalid VkShaderStageFlagBits");
|
|
|
|
return MESA_SHADER_NONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_pipeline_compile(struct lvp_pipeline *pipeline,
|
2020-06-19 16:40:27 +10:00
|
|
|
gl_shader_stage stage)
|
|
|
|
{
|
2020-09-30 05:29:04 +10:00
|
|
|
struct lvp_device *device = pipeline->device;
|
2020-06-19 16:40:27 +10:00
|
|
|
device->physical_device->pscreen->finalize_nir(device->physical_device->pscreen, pipeline->pipeline_nir[stage], true);
|
|
|
|
if (stage == MESA_SHADER_COMPUTE) {
|
2021-02-17 17:07:54 -08:00
|
|
|
struct pipe_compute_state shstate = {0};
|
2020-06-19 16:40:27 +10:00
|
|
|
shstate.prog = (void *)pipeline->pipeline_nir[MESA_SHADER_COMPUTE];
|
|
|
|
shstate.ir_type = PIPE_SHADER_IR_NIR;
|
|
|
|
shstate.req_local_mem = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.cs.shared_size;
|
|
|
|
pipeline->shader_cso[PIPE_SHADER_COMPUTE] = device->queue.ctx->create_compute_state(device->queue.ctx, &shstate);
|
|
|
|
} else {
|
2021-02-17 17:07:54 -08:00
|
|
|
struct pipe_shader_state shstate = {0};
|
2020-06-19 16:40:27 +10:00
|
|
|
fill_shader_prog(&shstate, stage, pipeline);
|
2020-12-08 14:48:44 +10:00
|
|
|
|
|
|
|
nir_xfb_info *xfb_info = NULL;
|
|
|
|
if (stage == MESA_SHADER_VERTEX ||
|
|
|
|
stage == MESA_SHADER_GEOMETRY ||
|
|
|
|
stage == MESA_SHADER_TESS_EVAL) {
|
|
|
|
xfb_info = nir_gather_xfb_info(pipeline->pipeline_nir[stage], NULL);
|
|
|
|
if (xfb_info) {
|
|
|
|
uint8_t output_mapping[VARYING_SLOT_TESS_MAX];
|
|
|
|
memset(output_mapping, 0, sizeof(output_mapping));
|
|
|
|
|
2021-02-04 14:10:59 +00:00
|
|
|
nir_foreach_shader_out_variable(var, pipeline->pipeline_nir[stage]) {
|
|
|
|
unsigned slots = var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
|
|
|
|
: glsl_count_attribute_slots(var->type, false);
|
|
|
|
for (unsigned i = 0; i < slots; i++)
|
|
|
|
output_mapping[var->data.location + i] = var->data.driver_location + i;
|
2020-12-08 14:48:44 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
shstate.stream_output.num_outputs = xfb_info->output_count;
|
|
|
|
for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
|
|
|
|
if (xfb_info->buffers_written & (1 << i)) {
|
|
|
|
shstate.stream_output.stride[i] = xfb_info->buffers[i].stride / 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (unsigned i = 0; i < xfb_info->output_count; i++) {
|
|
|
|
shstate.stream_output.output[i].output_buffer = xfb_info->outputs[i].buffer;
|
|
|
|
shstate.stream_output.output[i].dst_offset = xfb_info->outputs[i].offset / 4;
|
|
|
|
shstate.stream_output.output[i].register_index = output_mapping[xfb_info->outputs[i].location];
|
|
|
|
shstate.stream_output.output[i].num_components = util_bitcount(xfb_info->outputs[i].component_mask);
|
|
|
|
shstate.stream_output.output[i].start_component = ffs(xfb_info->outputs[i].component_mask) - 1;
|
|
|
|
shstate.stream_output.output[i].stream = xfb_info->buffer_to_stream[xfb_info->outputs[i].buffer];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-19 16:40:27 +10:00
|
|
|
switch (stage) {
|
|
|
|
case MESA_SHADER_FRAGMENT:
|
|
|
|
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
|
|
|
|
break;
|
|
|
|
case MESA_SHADER_VERTEX:
|
|
|
|
pipeline->shader_cso[PIPE_SHADER_VERTEX] = device->queue.ctx->create_vs_state(device->queue.ctx, &shstate);
|
|
|
|
break;
|
|
|
|
case MESA_SHADER_GEOMETRY:
|
|
|
|
pipeline->shader_cso[PIPE_SHADER_GEOMETRY] = device->queue.ctx->create_gs_state(device->queue.ctx, &shstate);
|
|
|
|
break;
|
|
|
|
case MESA_SHADER_TESS_CTRL:
|
|
|
|
pipeline->shader_cso[PIPE_SHADER_TESS_CTRL] = device->queue.ctx->create_tcs_state(device->queue.ctx, &shstate);
|
|
|
|
break;
|
|
|
|
case MESA_SHADER_TESS_EVAL:
|
|
|
|
pipeline->shader_cso[PIPE_SHADER_TESS_EVAL] = device->queue.ctx->create_tes_state(device->queue.ctx, &shstate);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
unreachable("illegal shader");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
|
|
|
|
struct lvp_device *device,
|
|
|
|
struct lvp_pipeline_cache *cache,
|
2020-06-19 16:40:27 +10:00
|
|
|
const VkGraphicsPipelineCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *alloc)
|
|
|
|
{
|
|
|
|
if (alloc == NULL)
|
2020-11-16 08:55:16 +10:00
|
|
|
alloc = &device->vk.alloc;
|
2020-06-19 16:40:27 +10:00
|
|
|
pipeline->device = device;
|
2020-09-30 05:29:04 +10:00
|
|
|
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
|
2020-06-19 16:40:27 +10:00
|
|
|
pipeline->force_min_sample = false;
|
|
|
|
|
2020-12-08 08:31:58 +10:00
|
|
|
pipeline->mem_ctx = ralloc_context(NULL);
|
2020-06-19 16:40:27 +10:00
|
|
|
/* recreate createinfo */
|
2020-12-08 08:31:58 +10:00
|
|
|
deep_copy_graphics_create_info(pipeline->mem_ctx, &pipeline->graphics_create_info, pCreateInfo);
|
2020-06-19 16:40:27 +10:00
|
|
|
pipeline->is_compute_pipeline = false;
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
|
2021-03-10 17:49:28 -05:00
|
|
|
VK_FROM_HANDLE(vk_shader_module, module,
|
2020-06-19 16:40:27 +10:00
|
|
|
pCreateInfo->pStages[i].module);
|
2020-09-30 05:29:04 +10:00
|
|
|
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
|
|
|
|
lvp_shader_compile_to_ir(pipeline, module,
|
2020-06-19 16:40:27 +10:00
|
|
|
pCreateInfo->pStages[i].pName,
|
|
|
|
stage,
|
|
|
|
pCreateInfo->pStages[i].pSpecializationInfo);
|
2021-03-31 10:25:13 +10:00
|
|
|
if (!pipeline->pipeline_nir[stage])
|
|
|
|
return VK_ERROR_FEATURE_NOT_PRESENT;
|
2020-06-19 16:40:27 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]) {
|
|
|
|
if (pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.fs.uses_sample_qualifier ||
|
2021-01-19 17:14:28 -08:00
|
|
|
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_ID) ||
|
|
|
|
BITSET_TEST(pipeline->pipeline_nir[MESA_SHADER_FRAGMENT]->info.system_values_read, SYSTEM_VALUE_SAMPLE_POS))
|
2020-06-19 16:40:27 +10:00
|
|
|
pipeline->force_min_sample = true;
|
|
|
|
}
|
|
|
|
if (pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]) {
|
|
|
|
nir_lower_patch_vertices(pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL], pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info.tess.tcs_vertices_out, NULL);
|
|
|
|
merge_tess_info(&pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info, &pipeline->pipeline_nir[MESA_SHADER_TESS_CTRL]->info);
|
2020-12-22 17:29:45 +10:00
|
|
|
const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state =
|
|
|
|
vk_find_struct_const(pCreateInfo->pTessellationState,
|
|
|
|
PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO);
|
|
|
|
if (!domain_origin_state || domain_origin_state->domainOrigin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT)
|
|
|
|
pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw = !pipeline->pipeline_nir[MESA_SHADER_TESS_EVAL]->info.tess.ccw;
|
2020-06-19 16:40:27 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool has_fragment_shader = false;
|
|
|
|
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
|
2020-09-30 05:29:04 +10:00
|
|
|
gl_shader_stage stage = lvp_shader_stage(pCreateInfo->pStages[i].stage);
|
|
|
|
lvp_pipeline_compile(pipeline, stage);
|
2020-06-19 16:40:27 +10:00
|
|
|
if (stage == MESA_SHADER_FRAGMENT)
|
|
|
|
has_fragment_shader = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_fragment_shader == false) {
|
|
|
|
/* create a dummy fragment shader for this pipeline. */
|
2020-10-26 11:37:25 -07:00
|
|
|
nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_FRAGMENT, NULL,
|
|
|
|
"dummy_frag");
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
pipeline->pipeline_nir[MESA_SHADER_FRAGMENT] = b.shader;
|
2021-02-17 17:07:54 -08:00
|
|
|
struct pipe_shader_state shstate = {0};
|
2020-06-19 16:40:27 +10:00
|
|
|
shstate.type = PIPE_SHADER_IR_NIR;
|
|
|
|
shstate.ir.nir = pipeline->pipeline_nir[MESA_SHADER_FRAGMENT];
|
|
|
|
pipeline->shader_cso[PIPE_SHADER_FRAGMENT] = device->queue.ctx->create_fs_state(device->queue.ctx, &shstate);
|
|
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_graphics_pipeline_create(
|
2020-06-19 16:40:27 +10:00
|
|
|
VkDevice _device,
|
|
|
|
VkPipelineCache _cache,
|
|
|
|
const VkGraphicsPipelineCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkPipeline *pPipeline)
|
|
|
|
{
|
2020-09-30 05:29:04 +10:00
|
|
|
LVP_FROM_HANDLE(lvp_device, device, _device);
|
|
|
|
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
|
|
|
|
struct lvp_pipeline *pipeline;
|
2020-06-19 16:40:27 +10:00
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
|
|
|
|
|
2020-11-16 08:55:16 +10:00
|
|
|
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
|
2020-06-19 16:40:27 +10:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (pipeline == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
vk_object_base_init(&device->vk, &pipeline->base,
|
|
|
|
VK_OBJECT_TYPE_PIPELINE);
|
2020-09-30 05:29:04 +10:00
|
|
|
result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
|
2020-06-19 16:40:27 +10:00
|
|
|
pAllocator);
|
|
|
|
if (result != VK_SUCCESS) {
|
2020-11-16 08:55:16 +10:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, pipeline);
|
2020-06-19 16:40:27 +10:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-09-30 05:29:04 +10:00
|
|
|
*pPipeline = lvp_pipeline_to_handle(pipeline);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-10-18 13:08:21 +02:00
|
|
|
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateGraphicsPipelines(
|
2020-06-19 16:40:27 +10:00
|
|
|
VkDevice _device,
|
|
|
|
VkPipelineCache pipelineCache,
|
|
|
|
uint32_t count,
|
|
|
|
const VkGraphicsPipelineCreateInfo* pCreateInfos,
|
|
|
|
const VkAllocationCallbacks* pAllocator,
|
|
|
|
VkPipeline* pPipelines)
|
|
|
|
{
|
|
|
|
VkResult result = VK_SUCCESS;
|
|
|
|
unsigned i = 0;
|
|
|
|
|
|
|
|
for (; i < count; i++) {
|
|
|
|
VkResult r;
|
2020-09-30 05:29:04 +10:00
|
|
|
r = lvp_graphics_pipeline_create(_device,
|
2020-06-19 16:40:27 +10:00
|
|
|
pipelineCache,
|
|
|
|
&pCreateInfos[i],
|
|
|
|
pAllocator, &pPipelines[i]);
|
|
|
|
if (r != VK_SUCCESS) {
|
|
|
|
result = r;
|
|
|
|
pPipelines[i] = VK_NULL_HANDLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
|
|
|
|
struct lvp_device *device,
|
|
|
|
struct lvp_pipeline_cache *cache,
|
2020-06-19 16:40:27 +10:00
|
|
|
const VkComputePipelineCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *alloc)
|
|
|
|
{
|
2021-03-10 17:49:28 -05:00
|
|
|
VK_FROM_HANDLE(vk_shader_module, module,
|
2020-06-19 16:40:27 +10:00
|
|
|
pCreateInfo->stage.module);
|
|
|
|
if (alloc == NULL)
|
2020-11-16 08:55:16 +10:00
|
|
|
alloc = &device->vk.alloc;
|
2020-06-19 16:40:27 +10:00
|
|
|
pipeline->device = device;
|
2020-09-30 05:29:04 +10:00
|
|
|
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
|
2020-06-19 16:40:27 +10:00
|
|
|
pipeline->force_min_sample = false;
|
|
|
|
|
2020-12-08 08:31:58 +10:00
|
|
|
pipeline->mem_ctx = ralloc_context(NULL);
|
|
|
|
deep_copy_compute_create_info(pipeline->mem_ctx,
|
|
|
|
&pipeline->compute_create_info, pCreateInfo);
|
2020-06-19 16:40:27 +10:00
|
|
|
pipeline->is_compute_pipeline = true;
|
|
|
|
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_shader_compile_to_ir(pipeline, module,
|
2020-06-19 16:40:27 +10:00
|
|
|
pCreateInfo->stage.pName,
|
|
|
|
MESA_SHADER_COMPUTE,
|
|
|
|
pCreateInfo->stage.pSpecializationInfo);
|
2021-03-31 10:25:13 +10:00
|
|
|
if (!pipeline->pipeline_nir[MESA_SHADER_COMPUTE])
|
|
|
|
return VK_ERROR_FEATURE_NOT_PRESENT;
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_pipeline_compile(pipeline, MESA_SHADER_COMPUTE);
|
2020-06-19 16:40:27 +10:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VkResult
|
2020-09-30 05:29:04 +10:00
|
|
|
lvp_compute_pipeline_create(
|
2020-06-19 16:40:27 +10:00
|
|
|
VkDevice _device,
|
|
|
|
VkPipelineCache _cache,
|
|
|
|
const VkComputePipelineCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkPipeline *pPipeline)
|
|
|
|
{
|
2020-09-30 05:29:04 +10:00
|
|
|
LVP_FROM_HANDLE(lvp_device, device, _device);
|
|
|
|
LVP_FROM_HANDLE(lvp_pipeline_cache, cache, _cache);
|
|
|
|
struct lvp_pipeline *pipeline;
|
2020-06-19 16:40:27 +10:00
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
|
|
|
|
|
2020-11-16 08:55:16 +10:00
|
|
|
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
|
2020-06-19 16:40:27 +10:00
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (pipeline == NULL)
|
|
|
|
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
vk_object_base_init(&device->vk, &pipeline->base,
|
|
|
|
VK_OBJECT_TYPE_PIPELINE);
|
2020-09-30 05:29:04 +10:00
|
|
|
result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
|
2020-06-19 16:40:27 +10:00
|
|
|
pAllocator);
|
|
|
|
if (result != VK_SUCCESS) {
|
2020-11-16 08:55:16 +10:00
|
|
|
vk_free2(&device->vk.alloc, pAllocator, pipeline);
|
2020-06-19 16:40:27 +10:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-09-30 05:29:04 +10:00
|
|
|
*pPipeline = lvp_pipeline_to_handle(pipeline);
|
2020-06-19 16:40:27 +10:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-10-18 13:08:21 +02:00
|
|
|
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateComputePipelines(
|
2020-06-19 16:40:27 +10:00
|
|
|
VkDevice _device,
|
|
|
|
VkPipelineCache pipelineCache,
|
|
|
|
uint32_t count,
|
|
|
|
const VkComputePipelineCreateInfo* pCreateInfos,
|
|
|
|
const VkAllocationCallbacks* pAllocator,
|
|
|
|
VkPipeline* pPipelines)
|
|
|
|
{
|
|
|
|
VkResult result = VK_SUCCESS;
|
|
|
|
unsigned i = 0;
|
|
|
|
|
|
|
|
for (; i < count; i++) {
|
|
|
|
VkResult r;
|
2020-09-30 05:29:04 +10:00
|
|
|
r = lvp_compute_pipeline_create(_device,
|
2020-06-19 16:40:27 +10:00
|
|
|
pipelineCache,
|
|
|
|
&pCreateInfos[i],
|
|
|
|
pAllocator, &pPipelines[i]);
|
|
|
|
if (r != VK_SUCCESS) {
|
|
|
|
result = r;
|
|
|
|
pPipelines[i] = VK_NULL_HANDLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|