vulkan/runtime: Add a level field to vk_command_buffer
Looks like 3 implementations already have that field in their private command_buffer struct, and having it at the vk_command_buffer opens the door for generic (but suboptimal) secondary command buffer support. Reviewed-by: Jason Ekstrand <jason.ekstrand@collabora.com> Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14917>
This commit is contained in:

committed by
Marge Bot

parent
7b0e306854
commit
5e263cc324
@@ -448,7 +448,7 @@ radv_create_cmd_buffer(struct radv_device *device, struct radv_cmd_pool *pool,
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
VkResult result =
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
|
||||
return result;
|
||||
@@ -456,7 +456,6 @@ radv_create_cmd_buffer(struct radv_device *device, struct radv_cmd_pool *pool,
|
||||
|
||||
cmd_buffer->device = device;
|
||||
cmd_buffer->pool = pool;
|
||||
cmd_buffer->level = level;
|
||||
|
||||
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
|
||||
cmd_buffer->queue_family_index = pool->queue_family_index;
|
||||
@@ -661,7 +660,7 @@ radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
|
||||
uint64_t va;
|
||||
|
||||
va = radv_buffer_get_va(device->trace_bo);
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
|
||||
va += 4;
|
||||
|
||||
++cmd_buffer->state.trace_id;
|
||||
@@ -4355,10 +4354,9 @@ radv_AllocateCommandBuffers(VkDevice _device, const VkCommandBufferAllocateInfo
|
||||
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
|
||||
|
||||
result = radv_reset_cmd_buffer(cmd_buffer);
|
||||
cmd_buffer->level = pAllocateInfo->level;
|
||||
vk_command_buffer_finish(&cmd_buffer->vk);
|
||||
VkResult init_result =
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk, pAllocateInfo->level);
|
||||
if (init_result != VK_SUCCESS)
|
||||
result = init_result;
|
||||
|
||||
@@ -4539,7 +4537,7 @@ radv_BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBegi
|
||||
cmd_buffer->state.mesh_shading = false;
|
||||
cmd_buffer->usage_flags = pBeginInfo->flags;
|
||||
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
|
||||
(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
|
||||
struct radv_subpass *subpass = NULL;
|
||||
|
||||
|
@@ -4382,7 +4382,7 @@ radv_queue_submit(struct vk_queue *vqueue, struct vk_queue_submit *submission)
|
||||
for (uint32_t j = 0; j < submission->command_buffer_count; j++) {
|
||||
struct radv_cmd_buffer *cmd_buffer =
|
||||
(struct radv_cmd_buffer *)submission->command_buffers[j];
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
|
||||
cs_array[j] = cmd_buffer->cs;
|
||||
if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
|
||||
|
@@ -1504,7 +1504,6 @@ struct radv_cmd_buffer {
|
||||
struct list_head pool_link;
|
||||
|
||||
VkCommandBufferUsageFlags usage_flags;
|
||||
VkCommandBufferLevel level;
|
||||
enum radv_cmd_buffer_status status;
|
||||
struct radeon_cmdbuf *cs;
|
||||
struct radv_cmd_state state;
|
||||
|
@@ -82,8 +82,7 @@ v3dv_CreateCommandPool(VkDevice _device,
|
||||
static void
|
||||
cmd_buffer_init(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
struct v3dv_device *device,
|
||||
struct v3dv_cmd_pool *pool,
|
||||
VkCommandBufferLevel level)
|
||||
struct v3dv_cmd_pool *pool)
|
||||
{
|
||||
/* Do not reset the base object! If we are calling this from a command
|
||||
* buffer reset that would reset the loader's dispatch table for the
|
||||
@@ -95,7 +94,6 @@ cmd_buffer_init(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
|
||||
cmd_buffer->device = device;
|
||||
cmd_buffer->pool = pool;
|
||||
cmd_buffer->level = level;
|
||||
|
||||
list_inithead(&cmd_buffer->private_objs);
|
||||
list_inithead(&cmd_buffer->jobs);
|
||||
@@ -126,13 +124,13 @@ cmd_buffer_create(struct v3dv_device *device,
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
VkResult result;
|
||||
result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free2(&device->vk.alloc, &pool->alloc, cmd_buffer);
|
||||
return result;
|
||||
}
|
||||
|
||||
cmd_buffer_init(cmd_buffer, device, pool, level);
|
||||
cmd_buffer_init(cmd_buffer, device, pool);
|
||||
|
||||
*pCommandBuffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
|
||||
|
||||
@@ -351,7 +349,7 @@ cmd_buffer_can_merge_subpass(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
const struct v3dv_physical_device *physical_device =
|
||||
&cmd_buffer->device->instance->physicalDevice;
|
||||
|
||||
if (cmd_buffer->level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
return false;
|
||||
|
||||
if (!cmd_buffer->state.job)
|
||||
@@ -645,7 +643,7 @@ v3dv_cmd_buffer_finish_job(struct v3dv_cmd_buffer *cmd_buffer)
|
||||
* a transfer command. The only exception are secondary command buffers
|
||||
* inside a render pass.
|
||||
*/
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
|
||||
v3dv_cl_offset(&job->bcl) > 0);
|
||||
|
||||
/* When we merge multiple subpasses into the same job we must only emit one
|
||||
@@ -684,7 +682,7 @@ v3dv_cmd_buffer_finish_job(struct v3dv_cmd_buffer *cmd_buffer)
|
||||
* that case we want to defer this until we finish recording the primary
|
||||
* job into which we execute the secondary.
|
||||
*/
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ||
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ||
|
||||
!cmd_buffer->state.pass) {
|
||||
cmd_buffer_add_cpu_jobs_for_pending_state(cmd_buffer);
|
||||
}
|
||||
@@ -779,7 +777,7 @@ v3dv_job_init(struct v3dv_job *job,
|
||||
cmd_buffer->state.dirty_descriptor_stages = ~0;
|
||||
|
||||
/* Honor inheritance of occlussion queries in secondaries if requested */
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
|
||||
cmd_buffer->state.inheritance.occlusion_query_enable) {
|
||||
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
|
||||
}
|
||||
@@ -839,7 +837,6 @@ cmd_buffer_reset(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_INITIALIZED) {
|
||||
struct v3dv_device *device = cmd_buffer->device;
|
||||
struct v3dv_cmd_pool *pool = cmd_buffer->pool;
|
||||
VkCommandBufferLevel level = cmd_buffer->level;
|
||||
|
||||
/* cmd_buffer_init below will re-add the command buffer to the pool
|
||||
* so remove it here so we don't end up adding it again.
|
||||
@@ -852,7 +849,7 @@ cmd_buffer_reset(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_NEW)
|
||||
cmd_buffer_free_resources(cmd_buffer);
|
||||
|
||||
cmd_buffer_init(cmd_buffer, device, pool, level);
|
||||
cmd_buffer_init(cmd_buffer, device, pool);
|
||||
}
|
||||
|
||||
assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
|
||||
@@ -1030,7 +1027,7 @@ cmd_buffer_begin_render_pass_secondary(
|
||||
struct v3dv_cmd_buffer *cmd_buffer,
|
||||
const VkCommandBufferInheritanceInfo *inheritance_info)
|
||||
{
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
|
||||
assert(inheritance_info);
|
||||
|
||||
@@ -1098,7 +1095,7 @@ v3dv_BeginCommandBuffer(VkCommandBuffer commandBuffer,
|
||||
|
||||
cmd_buffer->usage_flags = pBeginInfo->flags;
|
||||
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
|
||||
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
|
||||
result =
|
||||
cmd_buffer_begin_render_pass_secondary(cmd_buffer,
|
||||
@@ -1309,7 +1306,7 @@ cmd_buffer_state_set_attachments(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
} else if (framebuffer) {
|
||||
state->attachments[i].image_view = framebuffer->attachments[i];
|
||||
} else {
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
state->attachments[i].image_view = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1413,7 +1410,7 @@ v3dv_CmdNextSubpass2(VkCommandBuffer commandBuffer,
|
||||
static void
|
||||
cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer *cmd_buffer)
|
||||
{
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
|
||||
assert(cmd_buffer->state.pass);
|
||||
assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
|
||||
@@ -1615,7 +1612,7 @@ v3dv_cmd_buffer_subpass_start(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
* attachment load clears, but we don't have any instances of that right
|
||||
* now.
|
||||
*/
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
cmd_buffer_emit_subpass_clears(cmd_buffer);
|
||||
|
||||
return job;
|
||||
@@ -1629,11 +1626,11 @@ v3dv_cmd_buffer_subpass_resume(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
|
||||
|
||||
struct v3dv_job *job;
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
|
||||
V3DV_JOB_TYPE_GPU_CL);
|
||||
} else {
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
|
||||
V3DV_JOB_TYPE_GPU_CL_SECONDARY);
|
||||
}
|
||||
@@ -1695,7 +1692,7 @@ v3dv_EndCommandBuffer(VkCommandBuffer commandBuffer)
|
||||
* inside a render pass.
|
||||
*/
|
||||
if (cmd_buffer->state.job) {
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
|
||||
cmd_buffer->state.pass);
|
||||
v3dv_cmd_buffer_finish_job(cmd_buffer);
|
||||
}
|
||||
@@ -2484,7 +2481,7 @@ cmd_buffer_restart_job_for_msaa_if_needed(struct v3dv_cmd_buffer *cmd_buffer)
|
||||
* draw calls in them, and then using that info to decide if we need to
|
||||
* restart the primary job into which they are being recorded.
|
||||
*/
|
||||
if (cmd_buffer->level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
return;
|
||||
|
||||
/* Drop the current job and restart it with MSAA enabled */
|
||||
|
@@ -1424,7 +1424,6 @@ struct v3dv_cmd_buffer {
|
||||
struct list_head list_link;
|
||||
|
||||
VkCommandBufferUsageFlags usage_flags;
|
||||
VkCommandBufferLevel level;
|
||||
|
||||
enum v3dv_cmd_buffer_status status;
|
||||
|
||||
|
@@ -526,7 +526,7 @@ v3dv_write_uniforms_wg_offsets(struct v3dv_cmd_buffer *cmd_buffer,
|
||||
} else if (cmd_buffer->state.framebuffer) {
|
||||
num_layers = cmd_buffer->state.framebuffer->layers;
|
||||
} else {
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
num_layers = 2048;
|
||||
#if DEBUG
|
||||
fprintf(stderr, "Skipping gl_LayerID shader sanity check for "
|
||||
|
@@ -794,7 +794,7 @@ v3dX(cmd_buffer_emit_render_pass_rcl)(struct v3dv_cmd_buffer *cmd_buffer)
|
||||
* buffer.
|
||||
*/
|
||||
if (!framebuffer) {
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1460,7 +1460,7 @@ job_update_ez_state(struct v3dv_job *job,
|
||||
struct v3dv_framebuffer *fb = state->framebuffer;
|
||||
|
||||
if (!fb) {
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
perf_debug("Loading depth aspect in a secondary command buffer "
|
||||
"without framebuffer info disables early-z tests.\n");
|
||||
job->first_ez_state = V3D_EZ_DISABLED;
|
||||
|
@@ -75,7 +75,7 @@ tu6_lazy_emit_tessfactor_addr(struct tu_cmd_buffer *cmd)
|
||||
if (cmd->state.tessfactor_addr_set)
|
||||
return;
|
||||
|
||||
assert(cmd->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
assert(cmd->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
|
||||
tu_cs_emit_regs(&cmd->cs, A6XX_PC_TESSFACTOR_ADDR(.qword = cmd->device->tess_bo.iova));
|
||||
cmd->state.tessfactor_addr_set = true;
|
||||
@@ -1505,7 +1505,7 @@ tu_create_cmd_buffer(struct tu_device *device,
|
||||
if (cmd_buffer == NULL)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free2(&device->vk.alloc, NULL, cmd_buffer);
|
||||
return result;
|
||||
@@ -1513,7 +1513,6 @@ tu_create_cmd_buffer(struct tu_device *device,
|
||||
|
||||
cmd_buffer->device = device;
|
||||
cmd_buffer->pool = pool;
|
||||
cmd_buffer->level = level;
|
||||
|
||||
if (pool) {
|
||||
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
|
||||
@@ -1621,10 +1620,9 @@ tu_AllocateCommandBuffers(VkDevice _device,
|
||||
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
|
||||
|
||||
result = tu_reset_cmd_buffer(cmd_buffer);
|
||||
cmd_buffer->level = pAllocateInfo->level;
|
||||
vk_command_buffer_finish(&cmd_buffer->vk);
|
||||
VkResult init_result =
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk, pAllocateInfo->level);
|
||||
if (init_result != VK_SUCCESS)
|
||||
result = init_result;
|
||||
|
||||
@@ -1725,7 +1723,7 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
|
||||
tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
|
||||
|
||||
/* setup initial configuration into command buffer */
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
switch (cmd_buffer->queue_family_index) {
|
||||
case TU_QUEUE_GENERAL:
|
||||
tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
|
||||
@@ -1733,7 +1731,7 @@ tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
|
||||
} else if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
|
||||
assert(pBeginInfo->pInheritanceInfo);
|
||||
|
||||
vk_foreach_struct(ext, pBeginInfo->pInheritanceInfo) {
|
||||
@@ -2350,7 +2348,7 @@ tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
|
||||
/* Set up the tess factor address if this is the first tess pipeline bound
|
||||
* to the primary cmdbuf.
|
||||
*/
|
||||
if (cmd->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
if (cmd->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
tu6_lazy_emit_tessfactor_addr(cmd);
|
||||
|
||||
/* maximum number of patches that can fit in tess factor/param buffers */
|
||||
|
@@ -1132,7 +1132,6 @@ struct tu_cmd_buffer
|
||||
struct list_head renderpass_autotune_results;
|
||||
|
||||
VkCommandBufferUsageFlags usage_flags;
|
||||
VkCommandBufferLevel level;
|
||||
enum tu_cmd_buffer_status status;
|
||||
|
||||
struct tu_cmd_state state;
|
||||
|
@@ -38,7 +38,7 @@ static VkResult lvp_create_cmd_buffer(
|
||||
if (cmd_buffer == NULL)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
VkResult result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free(&pool->alloc, cmd_buffer);
|
||||
return result;
|
||||
@@ -94,10 +94,10 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_AllocateCommandBuffers(
|
||||
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
|
||||
|
||||
result = lvp_reset_cmd_buffer(cmd_buffer);
|
||||
cmd_buffer->level = pAllocateInfo->level;
|
||||
vk_command_buffer_finish(&cmd_buffer->vk);
|
||||
VkResult init_result =
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
vk_command_buffer_init(&cmd_buffer->vk, &device->vk,
|
||||
pAllocateInfo->level);
|
||||
if (init_result != VK_SUCCESS)
|
||||
result = init_result;
|
||||
|
||||
|
@@ -558,7 +558,6 @@ struct lvp_cmd_buffer {
|
||||
|
||||
struct lvp_device * device;
|
||||
|
||||
VkCommandBufferLevel level;
|
||||
enum lvp_cmd_buffer_status status;
|
||||
struct lvp_cmd_pool * pool;
|
||||
struct list_head pool_link;
|
||||
|
@@ -964,7 +964,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
|
||||
{
|
||||
struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
|
||||
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
/* When we start a batch buffer, we subtract a certain amount of
|
||||
* padding from the end to ensure that we always have room to emit a
|
||||
* BATCH_BUFFER_START to chain to the next BO. We need to remove
|
||||
@@ -993,7 +993,7 @@ anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
|
||||
|
||||
cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
|
||||
} else {
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
/* If this is a secondary command buffer, we need to determine the
|
||||
* mode in which it will be executed with vkExecuteCommands. We
|
||||
* determine this statically here so that this stays in sync with the
|
||||
|
@@ -275,7 +275,7 @@ static VkResult anv_create_cmd_buffer(
|
||||
if (cmd_buffer == NULL)
|
||||
return vk_error(pool, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
|
||||
result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk, level);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail_alloc;
|
||||
|
||||
@@ -283,7 +283,6 @@ static VkResult anv_create_cmd_buffer(
|
||||
|
||||
cmd_buffer->device = device;
|
||||
cmd_buffer->pool = pool;
|
||||
cmd_buffer->level = level;
|
||||
|
||||
result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
|
||||
if (result != VK_SUCCESS)
|
||||
|
@@ -147,7 +147,7 @@ anv_measure_start_snapshot(struct anv_cmd_buffer *cmd_buffer,
|
||||
uintptr_t framebuffer = (uintptr_t)cmd_buffer->state.framebuffer;
|
||||
|
||||
if (!measure->base.framebuffer &&
|
||||
cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
|
||||
cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
|
||||
/* secondary command buffer inherited the framebuffer from the primary */
|
||||
measure->base.framebuffer = framebuffer;
|
||||
|
||||
|
@@ -3201,7 +3201,6 @@ struct anv_cmd_buffer {
|
||||
struct anv_state_stream general_state_stream;
|
||||
|
||||
VkCommandBufferUsageFlags usage_flags;
|
||||
VkCommandBufferLevel level;
|
||||
|
||||
struct anv_query_pool *perf_query_pool;
|
||||
|
||||
|
@@ -1768,7 +1768,7 @@ genX(BeginCommandBuffer)(
|
||||
* secondary command buffer is considered to be entirely inside a render
|
||||
* pass. If this is a primary command buffer, then this bit is ignored.
|
||||
*/
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
|
||||
cmd_buffer->usage_flags &= ~VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
|
||||
|
||||
trace_intel_begin_cmd_buffer(&cmd_buffer->trace, cmd_buffer);
|
||||
@@ -1892,7 +1892,7 @@ genX(BeginCommandBuffer)(
|
||||
}
|
||||
|
||||
#if GFX_VERx10 >= 75
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
|
||||
const VkCommandBufferInheritanceConditionalRenderingInfoEXT *conditional_rendering_info =
|
||||
vk_find_struct_const(pBeginInfo->pInheritanceInfo->pNext, COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT);
|
||||
|
||||
@@ -1978,7 +1978,8 @@ genX(EndCommandBuffer)(
|
||||
|
||||
emit_isp_disable(cmd_buffer);
|
||||
|
||||
trace_intel_end_cmd_buffer(&cmd_buffer->trace, cmd_buffer, cmd_buffer->level);
|
||||
trace_intel_end_cmd_buffer(&cmd_buffer->trace, cmd_buffer,
|
||||
cmd_buffer->vk.level);
|
||||
|
||||
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
|
||||
|
||||
@@ -1993,7 +1994,7 @@ genX(CmdExecuteCommands)(
|
||||
{
|
||||
ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
|
||||
|
||||
assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
assert(primary->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
|
||||
if (anv_batch_has_error(&primary->batch))
|
||||
return;
|
||||
@@ -2011,7 +2012,7 @@ genX(CmdExecuteCommands)(
|
||||
for (uint32_t i = 0; i < commandBufferCount; i++) {
|
||||
ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
|
||||
|
||||
assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(secondary->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
|
||||
assert(!anv_batch_has_error(&secondary->batch));
|
||||
|
||||
#if GFX_VERx10 >= 75
|
||||
@@ -7296,7 +7297,7 @@ void genX(CmdNextSubpass2)(
|
||||
if (anv_batch_has_error(&cmd_buffer->batch))
|
||||
return;
|
||||
|
||||
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
|
||||
|
||||
uint32_t prev_subpass = anv_get_subpass_id(&cmd_buffer->state);
|
||||
cmd_buffer_end_subpass(cmd_buffer);
|
||||
|
@@ -90,7 +90,7 @@ gfx7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer)
|
||||
vp->x + vp->width - 1);
|
||||
|
||||
/* Do this math using int64_t so overflow gets clamped correctly. */
|
||||
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
|
||||
y_min = clamp_int64((uint64_t) y_min,
|
||||
cmd_buffer->state.render_area.offset.y, max);
|
||||
x_min = clamp_int64((uint64_t) x_min,
|
||||
|
@@ -654,7 +654,6 @@ struct panvk_cmd_buffer {
|
||||
struct list_head batches;
|
||||
|
||||
VkCommandBufferUsageFlags usage_flags;
|
||||
VkCommandBufferLevel level;
|
||||
enum panvk_cmd_buffer_status status;
|
||||
|
||||
struct panvk_cmd_state state;
|
||||
|
@@ -1011,14 +1011,13 @@ panvk_create_cmdbuf(struct panvk_device *device,
|
||||
if (!cmdbuf)
|
||||
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
VkResult result = vk_command_buffer_init(&cmdbuf->vk, &device->vk);
|
||||
VkResult result = vk_command_buffer_init(&cmdbuf->vk, &device->vk, level);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_free(&device->vk.alloc, cmdbuf);
|
||||
return result;
|
||||
}
|
||||
|
||||
cmdbuf->device = device;
|
||||
cmdbuf->level = level;
|
||||
cmdbuf->pool = pool;
|
||||
|
||||
if (pool) {
|
||||
@@ -1068,9 +1067,8 @@ panvk_per_arch(AllocateCommandBuffers)(VkDevice _device,
|
||||
list_del(&cmdbuf->pool_link);
|
||||
list_addtail(&cmdbuf->pool_link, &pool->active_cmd_buffers);
|
||||
|
||||
cmdbuf->level = pAllocateInfo->level;
|
||||
vk_command_buffer_finish(&cmdbuf->vk);
|
||||
result = vk_command_buffer_init(&cmdbuf->vk, &device->vk);
|
||||
result = vk_command_buffer_init(&cmdbuf->vk, &device->vk, pAllocateInfo->level);
|
||||
} else {
|
||||
result = panvk_create_cmdbuf(device, pool, pAllocateInfo->level, &cmdbuf);
|
||||
}
|
||||
|
@@ -25,12 +25,14 @@
|
||||
|
||||
VkResult
|
||||
vk_command_buffer_init(struct vk_command_buffer *command_buffer,
|
||||
struct vk_device *device)
|
||||
struct vk_device *device,
|
||||
VkCommandBufferLevel level)
|
||||
{
|
||||
memset(command_buffer, 0, sizeof(*command_buffer));
|
||||
vk_object_base_init(device, &command_buffer->base,
|
||||
VK_OBJECT_TYPE_COMMAND_BUFFER);
|
||||
|
||||
command_buffer->level = level;
|
||||
util_dynarray_init(&command_buffer->labels, NULL);
|
||||
command_buffer->region_begin = true;
|
||||
|
||||
|
@@ -34,6 +34,9 @@ extern "C" {
|
||||
struct vk_command_buffer {
|
||||
struct vk_object_base base;
|
||||
|
||||
/** VkCommandBufferAllocateInfo::level */
|
||||
VkCommandBufferLevel level;
|
||||
|
||||
/**
|
||||
* VK_EXT_debug_utils
|
||||
*
|
||||
@@ -80,7 +83,8 @@ VK_DEFINE_HANDLE_CASTS(vk_command_buffer, base, VkCommandBuffer,
|
||||
|
||||
VkResult MUST_CHECK
|
||||
vk_command_buffer_init(struct vk_command_buffer *command_buffer,
|
||||
struct vk_device *device);
|
||||
struct vk_device *device,
|
||||
VkCommandBufferLevel level);
|
||||
|
||||
void
|
||||
vk_command_buffer_reset(struct vk_command_buffer *command_buffer);
|
||||
|
Reference in New Issue
Block a user