lavapipe: Use the vk_pipeline_layout base struct
Reviewed-by: Mike Blumenkrantz <michael.blumenkrantz@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17286>
This commit is contained in:

committed by
Marge Bot

parent
003f401342
commit
a579d33352
@@ -228,28 +228,16 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
struct lvp_pipeline_layout *layout;
|
||||
|
||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
|
||||
layout = vk_pipeline_layout_zalloc(&device->vk, sizeof(*layout),
|
||||
pCreateInfo);
|
||||
|
||||
layout = vk_zalloc(&device->vk.alloc, sizeof(*layout), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
||||
if (layout == NULL)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vk_object_base_init(&device->vk, &layout->base,
|
||||
VK_OBJECT_TYPE_PIPELINE_LAYOUT);
|
||||
layout->ref_cnt = 1;
|
||||
layout->num_sets = pCreateInfo->setLayoutCount;
|
||||
if (pCreateInfo->flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT)
|
||||
layout->independent_sets = true;
|
||||
|
||||
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
|
||||
LVP_FROM_HANDLE(lvp_descriptor_set_layout, set_layout,
|
||||
pCreateInfo->pSetLayouts[set]);
|
||||
if (layout->independent_sets && (!layout->num_sets || !set_layout)) {
|
||||
layout->set[set].layout = NULL;
|
||||
for (uint32_t set = 0; set < layout->vk.set_count; set++) {
|
||||
if (layout->vk.set_layouts[set] == NULL)
|
||||
continue;
|
||||
}
|
||||
layout->set[set].layout = set_layout;
|
||||
|
||||
const struct lvp_descriptor_set_layout *set_layout =
|
||||
vk_to_lvp_descriptor_set_layout(layout->vk.set_layouts[set]);
|
||||
|
||||
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
|
||||
layout->stage[i].uniform_block_size += set_layout->stage[i].uniform_block_size;
|
||||
for (unsigned j = 0; j < set_layout->stage[i].uniform_block_count; j++) {
|
||||
@@ -258,7 +246,6 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
|
||||
}
|
||||
layout->stage[i].uniform_block_count += set_layout->stage[i].uniform_block_count;
|
||||
}
|
||||
vk_descriptor_set_layout_ref(&set_layout->vk);
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
@@ -277,13 +264,19 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
|
||||
uint16_t sampler_count = 0;
|
||||
uint16_t sampler_view_count = 0;
|
||||
uint16_t image_count = 0;
|
||||
for (unsigned j = 0; j < layout->num_sets; j++) {
|
||||
if (layout->set[j].layout && layout->set[j].layout->shader_stages & array[i]) {
|
||||
const_buffer_count += layout->set[j].layout->stage[i].const_buffer_count;
|
||||
shader_buffer_count += layout->set[j].layout->stage[i].shader_buffer_count;
|
||||
sampler_count += layout->set[j].layout->stage[i].sampler_count;
|
||||
sampler_view_count += layout->set[j].layout->stage[i].sampler_view_count;
|
||||
image_count += layout->set[j].layout->stage[i].image_count;
|
||||
for (unsigned j = 0; j < layout->vk.set_count; j++) {
|
||||
if (layout->vk.set_layouts[j] == NULL)
|
||||
continue;
|
||||
|
||||
const struct lvp_descriptor_set_layout *set_layout =
|
||||
vk_to_lvp_descriptor_set_layout(layout->vk.set_layouts[j]);
|
||||
|
||||
if (set_layout->shader_stages & array[i]) {
|
||||
const_buffer_count += set_layout->stage[i].const_buffer_count;
|
||||
shader_buffer_count += set_layout->stage[i].shader_buffer_count;
|
||||
sampler_count += set_layout->stage[i].sampler_count;
|
||||
sampler_view_count += set_layout->stage[i].sampler_view_count;
|
||||
image_count += set_layout->stage[i].image_count;
|
||||
}
|
||||
}
|
||||
assert(const_buffer_count <= device->physical_device->device_limits.maxPerStageDescriptorUniformBuffers);
|
||||
@@ -307,32 +300,6 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreatePipelineLayout(
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void lvp_pipeline_layout_destroy(struct lvp_device *device,
|
||||
struct lvp_pipeline_layout *pipeline_layout)
|
||||
{
|
||||
assert(pipeline_layout->ref_cnt == 0);
|
||||
|
||||
for (uint32_t i = 0; i < pipeline_layout->num_sets; i++)
|
||||
vk_descriptor_set_layout_unref(&device->vk, &pipeline_layout->set[i].layout->vk);
|
||||
|
||||
vk_object_base_finish(&pipeline_layout->base);
|
||||
vk_free(&device->vk.alloc, pipeline_layout);
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipelineLayout(
|
||||
VkDevice _device,
|
||||
VkPipelineLayout _pipelineLayout,
|
||||
const VkAllocationCallbacks* pAllocator)
|
||||
{
|
||||
LVP_FROM_HANDLE(lvp_device, device, _device);
|
||||
LVP_FROM_HANDLE(lvp_pipeline_layout, pipeline_layout, _pipelineLayout);
|
||||
|
||||
if (!_pipelineLayout)
|
||||
return;
|
||||
|
||||
lvp_pipeline_layout_unref(device, pipeline_layout);
|
||||
}
|
||||
|
||||
VkResult
|
||||
lvp_descriptor_set_create(struct lvp_device *device,
|
||||
struct lvp_descriptor_set_layout *layout,
|
||||
|
@@ -1498,16 +1498,15 @@ ref_pipeline_layout(struct vk_device *vk_device, VkPipelineLayout _layout)
|
||||
{
|
||||
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, _layout);
|
||||
|
||||
lvp_pipeline_layout_ref(layout);
|
||||
vk_pipeline_layout_ref(&layout->vk);
|
||||
}
|
||||
|
||||
static void
|
||||
unref_pipeline_layout(struct vk_device *vk_device, VkPipelineLayout _layout)
|
||||
unref_pipeline_layout(struct vk_device *device, VkPipelineLayout _layout)
|
||||
{
|
||||
struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
|
||||
LVP_FROM_HANDLE(lvp_pipeline_layout, layout, _layout);
|
||||
|
||||
lvp_pipeline_layout_unref(device, layout);
|
||||
vk_pipeline_layout_unref(device, &layout->vk);
|
||||
}
|
||||
|
||||
VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateDevice(
|
||||
|
@@ -1387,8 +1387,12 @@ static void handle_set_stage(struct rendering_state *state,
|
||||
}
|
||||
|
||||
static void increment_dyn_info(struct dyn_info *dyn_info,
|
||||
struct lvp_descriptor_set_layout *layout, bool inc_dyn)
|
||||
const struct vk_descriptor_set_layout *vk_layout,
|
||||
bool inc_dyn)
|
||||
{
|
||||
const struct lvp_descriptor_set_layout *layout =
|
||||
vk_to_lvp_descriptor_set_layout(vk_layout);
|
||||
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
|
||||
dyn_info->stage[stage].const_buffer_count += layout->stage[stage].const_buffer_count;
|
||||
dyn_info->stage[stage].shader_buffer_count += layout->stage[stage].shader_buffer_count;
|
||||
@@ -1410,14 +1414,14 @@ static void handle_compute_descriptor_sets(struct vk_cmd_queue_entry *cmd,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bds->first_set; i++) {
|
||||
increment_dyn_info(dyn_info, layout->set[i].layout, false);
|
||||
increment_dyn_info(dyn_info, layout->vk.set_layouts[i], false);
|
||||
}
|
||||
for (i = 0; i < bds->descriptor_set_count; i++) {
|
||||
const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]);
|
||||
|
||||
if (set->layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT)
|
||||
handle_set_stage(state, dyn_info, set, MESA_SHADER_COMPUTE, PIPE_SHADER_COMPUTE);
|
||||
increment_dyn_info(dyn_info, layout->set[bds->first_set + i].layout, true);
|
||||
increment_dyn_info(dyn_info, layout->vk.set_layouts[bds->first_set + i], true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1440,12 +1444,13 @@ static void handle_descriptor_sets(struct vk_cmd_queue_entry *cmd,
|
||||
}
|
||||
|
||||
for (i = 0; i < bds->first_set; i++) {
|
||||
increment_dyn_info(&dyn_info, layout->set[i].layout, false);
|
||||
increment_dyn_info(&dyn_info, layout->vk.set_layouts[i], false);
|
||||
}
|
||||
|
||||
for (i = 0; i < bds->descriptor_set_count; i++) {
|
||||
if (!layout->set[bds->first_set + i].layout)
|
||||
if (!layout->vk.set_layouts[bds->first_set + i])
|
||||
continue;
|
||||
|
||||
const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]);
|
||||
if (!set)
|
||||
continue;
|
||||
@@ -1471,7 +1476,7 @@ static void handle_descriptor_sets(struct vk_cmd_queue_entry *cmd,
|
||||
if (set->layout->shader_stages & VK_SHADER_STAGE_FRAGMENT_BIT)
|
||||
handle_set_stage(state, &dyn_info, set, MESA_SHADER_FRAGMENT, PIPE_SHADER_FRAGMENT);
|
||||
|
||||
increment_dyn_info(&dyn_info, layout->set[bds->first_set + i].layout, true);
|
||||
increment_dyn_info(&dyn_info, layout->vk.set_layouts[bds->first_set + i], true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3088,17 +3093,19 @@ static void handle_compute_push_descriptor_set(struct lvp_cmd_push_descriptor_se
|
||||
struct dyn_info *dyn_info,
|
||||
struct rendering_state *state)
|
||||
{
|
||||
struct lvp_descriptor_set_layout *layout = pds->layout->set[pds->set].layout;
|
||||
const struct lvp_descriptor_set_layout *layout =
|
||||
vk_to_lvp_descriptor_set_layout(pds->layout->vk.set_layouts[pds->set]);
|
||||
|
||||
if (!(layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT))
|
||||
return;
|
||||
for (unsigned i = 0; i < pds->set; i++) {
|
||||
increment_dyn_info(dyn_info, pds->layout->set[i].layout, false);
|
||||
increment_dyn_info(dyn_info, pds->layout->vk.set_layouts[i], false);
|
||||
}
|
||||
unsigned info_idx = 0;
|
||||
for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
|
||||
struct lvp_write_descriptor *desc = &pds->descriptors[i];
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->binding[desc->dst_binding];
|
||||
const struct lvp_descriptor_set_binding_layout *binding =
|
||||
&layout->binding[desc->dst_binding];
|
||||
|
||||
if (!binding->valid)
|
||||
continue;
|
||||
@@ -3195,13 +3202,11 @@ static struct lvp_cmd_push_descriptor_set *create_push_descriptor_set(struct vk_
|
||||
static void handle_push_descriptor_set_generic(struct vk_cmd_push_descriptor_set_khr *_pds,
|
||||
struct rendering_state *state)
|
||||
{
|
||||
struct lvp_cmd_push_descriptor_set *pds;
|
||||
struct lvp_descriptor_set_layout *layout;
|
||||
struct lvp_cmd_push_descriptor_set *pds = create_push_descriptor_set(_pds);
|
||||
const struct lvp_descriptor_set_layout *layout =
|
||||
vk_to_lvp_descriptor_set_layout(pds->layout->vk.set_layouts[pds->set]);
|
||||
|
||||
struct dyn_info dyn_info;
|
||||
|
||||
pds = create_push_descriptor_set(_pds);
|
||||
layout = pds->layout->set[pds->set].layout;
|
||||
|
||||
memset(&dyn_info.stage, 0, sizeof(dyn_info.stage));
|
||||
dyn_info.dyn_index = 0;
|
||||
if (pds->bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
|
||||
@@ -3209,13 +3214,14 @@ static void handle_push_descriptor_set_generic(struct vk_cmd_push_descriptor_set
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < pds->set; i++) {
|
||||
increment_dyn_info(&dyn_info, pds->layout->set[i].layout, false);
|
||||
increment_dyn_info(&dyn_info, pds->layout->vk.set_layouts[i], false);
|
||||
}
|
||||
|
||||
unsigned info_idx = 0;
|
||||
for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
|
||||
struct lvp_write_descriptor *desc = &pds->descriptors[i];
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->binding[desc->dst_binding];
|
||||
const struct lvp_descriptor_set_binding_layout *binding =
|
||||
&layout->binding[desc->dst_binding];
|
||||
|
||||
if (!binding->valid)
|
||||
continue;
|
||||
|
@@ -76,13 +76,28 @@ lower_uniform_block_access(const nir_instr *instr, const void *data_cb)
|
||||
return deref->modes == nir_var_mem_ubo;
|
||||
}
|
||||
|
||||
static const struct lvp_descriptor_set_layout *
|
||||
get_set_layout(const struct lvp_pipeline_layout *layout, uint32_t set)
|
||||
{
|
||||
return container_of(layout->vk.set_layouts[set],
|
||||
const struct lvp_descriptor_set_layout, vk);
|
||||
}
|
||||
|
||||
static const struct lvp_descriptor_set_binding_layout *
|
||||
get_binding_layout(const struct lvp_pipeline_layout *layout,
|
||||
uint32_t set, uint32_t binding)
|
||||
{
|
||||
return &get_set_layout(layout, set)->binding[binding];
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
lower_block_instr(nir_builder *b, nir_instr *instr, void *data_cb)
|
||||
{
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
nir_binding nb = nir_chase_binding(intrin->src[0]);
|
||||
struct lvp_pipeline_layout *layout = data_cb;
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->set[nb.desc_set].layout->binding[nb.binding];
|
||||
const struct lvp_pipeline_layout *layout = data_cb;
|
||||
const struct lvp_descriptor_set_binding_layout *binding =
|
||||
get_binding_layout(layout, nb.desc_set, nb.binding);
|
||||
if (binding->type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
|
||||
return NULL;
|
||||
if (!binding->array_size)
|
||||
@@ -91,7 +106,7 @@ lower_block_instr(nir_builder *b, nir_instr *instr, void *data_cb)
|
||||
assert(intrin->src[0].ssa->num_components == 2);
|
||||
unsigned value = 0;
|
||||
for (unsigned s = 0; s < nb.desc_set; s++)
|
||||
value += layout->set[s].layout->stage[b->shader->info.stage].uniform_block_size;
|
||||
value += get_set_layout(layout, s)->stage[b->shader->info.stage].uniform_block_size;
|
||||
if (layout->push_constant_stages & BITFIELD_BIT(b->shader->info.stage))
|
||||
value += layout->push_constant_size;
|
||||
value += binding->stage[b->shader->info.stage].uniform_block_offset;
|
||||
@@ -111,8 +126,9 @@ static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
unsigned desc_set_idx = nir_intrinsic_desc_set(intrin);
|
||||
unsigned binding_idx = nir_intrinsic_binding(intrin);
|
||||
struct lvp_pipeline_layout *layout = data_cb;
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
const struct lvp_pipeline_layout *layout = data_cb;
|
||||
const struct lvp_descriptor_set_binding_layout *binding =
|
||||
get_binding_layout(data_cb, desc_set_idx, binding_idx);
|
||||
int value = 0;
|
||||
bool is_ubo = (binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
|
||||
binding->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
@@ -122,12 +138,12 @@ static nir_ssa_def *lower_vri_intrin_vri(struct nir_builder *b,
|
||||
return nir_imm_ivec2(b, 0, 0);
|
||||
|
||||
for (unsigned s = 0; s < desc_set_idx; s++) {
|
||||
if (!layout->set[s].layout)
|
||||
if (!layout->vk.set_layouts[s])
|
||||
continue;
|
||||
if (is_ubo)
|
||||
value += layout->set[s].layout->stage[b->shader->info.stage].const_buffer_count;
|
||||
value += get_set_layout(layout, s)->stage[b->shader->info.stage].const_buffer_count;
|
||||
else
|
||||
value += layout->set[s].layout->stage[b->shader->info.stage].shader_buffer_count;
|
||||
value += get_set_layout(layout, s)->stage[b->shader->info.stage].shader_buffer_count;
|
||||
}
|
||||
if (is_ubo)
|
||||
value += binding->stage[b->shader->info.stage].const_buffer_index + 1;
|
||||
@@ -180,15 +196,16 @@ lower_vri_instr_tex_deref(nir_tex_instr *tex,
|
||||
unsigned desc_set_idx = var->data.descriptor_set;
|
||||
unsigned binding_idx = var->data.binding;
|
||||
int value = 0;
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
const struct lvp_descriptor_set_binding_layout *binding =
|
||||
get_binding_layout(layout, desc_set_idx, binding_idx);
|
||||
nir_tex_instr_remove_src(tex, deref_src_idx);
|
||||
for (unsigned s = 0; s < desc_set_idx; s++) {
|
||||
if (!layout->set[s].layout)
|
||||
if (!layout->vk.set_layouts[s])
|
||||
continue;
|
||||
if (deref_src_type == nir_tex_src_sampler_deref)
|
||||
value += layout->set[s].layout->stage[stage].sampler_count;
|
||||
value += get_set_layout(layout, s)->stage[stage].sampler_count;
|
||||
else
|
||||
value += layout->set[s].layout->stage[stage].sampler_view_count;
|
||||
value += get_set_layout(layout, s)->stage[stage].sampler_view_count;
|
||||
}
|
||||
if (deref_src_type == nir_tex_src_sampler_deref)
|
||||
value += binding->stage[stage].sampler_index;
|
||||
@@ -243,21 +260,21 @@ static void lower_vri_instr_tex(struct nir_builder *b,
|
||||
static void lower_vri_intrin_image(struct nir_builder *b,
|
||||
nir_intrinsic_instr *intrin, void *data_cb)
|
||||
{
|
||||
struct lvp_pipeline_layout *layout = data_cb;
|
||||
const struct lvp_pipeline_layout *layout = data_cb;
|
||||
gl_shader_stage stage = b->shader->info.stage;
|
||||
|
||||
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
|
||||
nir_variable *var = nir_deref_instr_get_variable(deref);
|
||||
unsigned desc_set_idx = var->data.descriptor_set;
|
||||
unsigned binding_idx = var->data.binding;
|
||||
struct lvp_descriptor_set_binding_layout *binding =
|
||||
&layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
const struct lvp_descriptor_set_binding_layout *binding =
|
||||
get_binding_layout(layout, desc_set_idx, binding_idx);
|
||||
|
||||
int value = 0;
|
||||
for (unsigned s = 0; s < desc_set_idx; s++) {
|
||||
if (!layout->set[s].layout)
|
||||
if (!layout->vk.set_layouts[s])
|
||||
continue;
|
||||
value += layout->set[s].layout->stage[stage].image_count;
|
||||
value += get_set_layout(layout, s)->stage[stage].image_count;
|
||||
}
|
||||
value += binding->stage[stage].image_index;
|
||||
|
||||
@@ -344,22 +361,23 @@ void lvp_lower_pipeline_layout(const struct lvp_device *device,
|
||||
glsl_get_base_type(glsl_without_array(type));
|
||||
unsigned desc_set_idx = var->data.descriptor_set;
|
||||
unsigned binding_idx = var->data.binding;
|
||||
struct lvp_descriptor_set_binding_layout *binding = &layout->set[desc_set_idx].layout->binding[binding_idx];
|
||||
const struct lvp_descriptor_set_binding_layout *binding =
|
||||
get_binding_layout(layout, desc_set_idx, binding_idx);
|
||||
int value = 0;
|
||||
var->data.descriptor_set = 0;
|
||||
if (base_type == GLSL_TYPE_SAMPLER || base_type == GLSL_TYPE_TEXTURE) {
|
||||
if (binding->type == VK_DESCRIPTOR_TYPE_SAMPLER) {
|
||||
for (unsigned s = 0; s < desc_set_idx; s++) {
|
||||
if (!layout->set[s].layout)
|
||||
if (!layout->vk.set_layouts[s])
|
||||
continue;
|
||||
value += layout->set[s].layout->stage[shader->info.stage].sampler_count;
|
||||
value += get_set_layout(layout, s)->stage[shader->info.stage].sampler_count;
|
||||
}
|
||||
value += binding->stage[shader->info.stage].sampler_index;
|
||||
} else {
|
||||
for (unsigned s = 0; s < desc_set_idx; s++) {
|
||||
if (!layout->set[s].layout)
|
||||
if (!layout->vk.set_layouts[s])
|
||||
continue;
|
||||
value += layout->set[s].layout->stage[shader->info.stage].sampler_view_count;
|
||||
value += get_set_layout(layout, s)->stage[shader->info.stage].sampler_view_count;
|
||||
}
|
||||
value += binding->stage[shader->info.stage].sampler_view_index;
|
||||
}
|
||||
@@ -368,9 +386,9 @@ void lvp_lower_pipeline_layout(const struct lvp_device *device,
|
||||
if (base_type == GLSL_TYPE_IMAGE) {
|
||||
var->data.descriptor_set = 0;
|
||||
for (unsigned s = 0; s < desc_set_idx; s++) {
|
||||
if (!layout->set[s].layout)
|
||||
if (!layout->vk.set_layouts[s])
|
||||
continue;
|
||||
value += layout->set[s].layout->stage[shader->info.stage].image_count;
|
||||
value += get_set_layout(layout, s)->stage[shader->info.stage].image_count;
|
||||
}
|
||||
value += binding->stage[shader->info.stage].image_index;
|
||||
var->data.binding = value;
|
||||
|
@@ -63,7 +63,7 @@ lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline)
|
||||
ralloc_free(pipeline->pipeline_nir[i]);
|
||||
|
||||
if (pipeline->layout)
|
||||
lvp_pipeline_layout_unref(device, pipeline->layout);
|
||||
vk_pipeline_layout_unref(&device->vk, &pipeline->layout->vk);
|
||||
|
||||
ralloc_free(pipeline->mem_ctx);
|
||||
vk_object_base_finish(&pipeline->base);
|
||||
@@ -1302,20 +1302,29 @@ merge_layouts(struct lvp_pipeline *dst, struct lvp_pipeline_layout *src)
|
||||
}
|
||||
#ifndef NDEBUG
|
||||
/* verify that layouts match */
|
||||
const struct lvp_pipeline_layout *smaller = dst->layout->num_sets < src->num_sets ? dst->layout : src;
|
||||
const struct lvp_pipeline_layout *smaller = dst->layout->vk.set_count < src->vk.set_count ? dst->layout : src;
|
||||
const struct lvp_pipeline_layout *bigger = smaller == dst->layout ? src : dst->layout;
|
||||
for (unsigned i = 0; i < smaller->num_sets; i++) {
|
||||
assert(!smaller->set[i].layout || !bigger->set[i].layout ||
|
||||
!smaller->set[i].layout->binding_count || !bigger->set[i].layout->binding_count ||
|
||||
smaller->set[i].layout == bigger->set[i].layout ||
|
||||
layouts_equal(smaller->set[i].layout, bigger->set[i].layout));
|
||||
for (unsigned i = 0; i < smaller->vk.set_count; i++) {
|
||||
if (!smaller->vk.set_layouts[i] || !bigger->vk.set_layouts[i] ||
|
||||
smaller->vk.set_layouts[i] == bigger->vk.set_layouts[i])
|
||||
continue;
|
||||
|
||||
const struct lvp_descriptor_set_layout *smaller_set_layout =
|
||||
vk_to_lvp_descriptor_set_layout(smaller->vk.set_layouts[i]);
|
||||
const struct lvp_descriptor_set_layout *bigger_set_layout =
|
||||
vk_to_lvp_descriptor_set_layout(bigger->vk.set_layouts[i]);
|
||||
|
||||
assert(!smaller_set_layout->binding_count ||
|
||||
!bigger_set_layout->binding_count ||
|
||||
layouts_equal(smaller_set_layout, bigger_set_layout));
|
||||
}
|
||||
#endif
|
||||
for (unsigned i = 0; i < src->num_sets; i++) {
|
||||
if (!dst->layout->set[i].layout)
|
||||
dst->layout->set[i].layout = src->set[i].layout;
|
||||
for (unsigned i = 0; i < src->vk.set_count; i++) {
|
||||
if (!dst->layout->vk.set_layouts[i])
|
||||
dst->layout->vk.set_layouts[i] = src->vk.set_layouts[i];
|
||||
}
|
||||
dst->layout->num_sets = MAX2(dst->layout->num_sets, src->num_sets);
|
||||
dst->layout->vk.set_count = MAX2(dst->layout->vk.set_count,
|
||||
src->vk.set_count);
|
||||
dst->layout->push_constant_size += src->push_constant_size;
|
||||
dst->layout->push_constant_stages |= src->push_constant_stages;
|
||||
}
|
||||
@@ -1346,9 +1355,9 @@ lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
|
||||
|
||||
struct lvp_pipeline_layout *layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
|
||||
if (layout)
|
||||
lvp_pipeline_layout_ref(layout);
|
||||
vk_pipeline_layout_ref(&layout->vk);
|
||||
|
||||
if (!layout || !layout->independent_sets)
|
||||
if (!layout || !(layout->vk.create_flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT))
|
||||
/* this is a regular pipeline with no partials: directly reuse */
|
||||
pipeline->layout = layout;
|
||||
else if (pipeline->stages & layout_stages) {
|
||||
@@ -1381,7 +1390,7 @@ lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
|
||||
if (p->stages & VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT)
|
||||
pipeline->force_min_sample = p->force_min_sample;
|
||||
if (p->stages & layout_stages) {
|
||||
if (!layout || layout->independent_sets)
|
||||
if (!layout || (layout->vk.create_flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT))
|
||||
merge_layouts(pipeline, p->layout);
|
||||
}
|
||||
pipeline->stages |= p->stages;
|
||||
@@ -1607,7 +1616,7 @@ lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
|
||||
pCreateInfo->stage.module);
|
||||
pipeline->device = device;
|
||||
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
|
||||
lvp_pipeline_layout_ref(pipeline->layout);
|
||||
vk_pipeline_layout_ref(&pipeline->layout->vk);
|
||||
pipeline->force_min_sample = false;
|
||||
|
||||
pipeline->mem_ctx = ralloc_context(NULL);
|
||||
|
@@ -68,6 +68,7 @@ typedef uint32_t xcb_window_t;
|
||||
#include "vk_command_buffer.h"
|
||||
#include "vk_command_pool.h"
|
||||
#include "vk_descriptor_set_layout.h"
|
||||
#include "vk_pipeline_layout.h"
|
||||
#include "vk_queue.h"
|
||||
#include "vk_sync.h"
|
||||
#include "vk_sync_timeline.h"
|
||||
@@ -320,6 +321,12 @@ struct lvp_descriptor_set_layout {
|
||||
struct lvp_descriptor_set_binding_layout binding[0];
|
||||
};
|
||||
|
||||
static inline const struct lvp_descriptor_set_layout *
|
||||
vk_to_lvp_descriptor_set_layout(const struct vk_descriptor_set_layout *layout)
|
||||
{
|
||||
return container_of(layout, const struct lvp_descriptor_set_layout, vk);
|
||||
}
|
||||
|
||||
union lvp_descriptor_info {
|
||||
struct {
|
||||
struct lvp_sampler *sampler;
|
||||
@@ -398,16 +405,8 @@ lvp_descriptor_set_destroy(struct lvp_device *device,
|
||||
struct lvp_descriptor_set *set);
|
||||
|
||||
struct lvp_pipeline_layout {
|
||||
struct vk_object_base base;
|
||||
struct vk_pipeline_layout vk;
|
||||
|
||||
/* Pipeline layouts can be destroyed at almost any time */
|
||||
uint32_t ref_cnt;
|
||||
|
||||
struct {
|
||||
struct lvp_descriptor_set_layout *layout;
|
||||
} set[MAX_SETS];
|
||||
|
||||
uint32_t num_sets;
|
||||
uint32_t push_constant_size;
|
||||
VkShaderStageFlags push_constant_stages;
|
||||
struct {
|
||||
@@ -415,28 +414,8 @@ struct lvp_pipeline_layout {
|
||||
uint16_t uniform_block_count;
|
||||
uint16_t uniform_block_sizes[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS];
|
||||
} stage[MESA_SHADER_STAGES];
|
||||
bool independent_sets;
|
||||
};
|
||||
|
||||
void lvp_pipeline_layout_destroy(struct lvp_device *device,
|
||||
struct lvp_pipeline_layout *layout);
|
||||
|
||||
static inline void
|
||||
lvp_pipeline_layout_ref(struct lvp_pipeline_layout *layout)
|
||||
{
|
||||
assert(layout && layout->ref_cnt >= 1);
|
||||
p_atomic_inc(&layout->ref_cnt);
|
||||
}
|
||||
|
||||
static inline void
|
||||
lvp_pipeline_layout_unref(struct lvp_device *device,
|
||||
struct lvp_pipeline_layout *layout)
|
||||
{
|
||||
assert(layout && layout->ref_cnt >= 1);
|
||||
if (p_atomic_dec_zero(&layout->ref_cnt))
|
||||
lvp_pipeline_layout_destroy(device, layout);
|
||||
}
|
||||
|
||||
struct lvp_access_info {
|
||||
uint32_t images_read;
|
||||
uint32_t images_written;
|
||||
@@ -571,7 +550,7 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_cache, base, VkPipelineCache,
|
||||
VK_OBJECT_TYPE_PIPELINE_CACHE)
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline, base, VkPipeline,
|
||||
VK_OBJECT_TYPE_PIPELINE)
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_layout, base, VkPipelineLayout,
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_pipeline_layout, vk.base, VkPipelineLayout,
|
||||
VK_OBJECT_TYPE_PIPELINE_LAYOUT)
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_query_pool, base, VkQueryPool,
|
||||
VK_OBJECT_TYPE_QUERY_POOL)
|
||||
|
Reference in New Issue
Block a user