pvr: Split out unioned structs from struct pvr_sub_cmd

This is a simple optimization to make type-specific uses of struct
pvr_sub_cmd slightly less verbose.

Signed-off-by: Matt Coster <matt.coster@imgtec.com>
Reviewed-by: Rajnesh Kanwal <rajnesh.kanwal@imgtec.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17458>
This commit is contained in:
Matt Coster
2022-06-29 11:34:09 +01:00
parent 1df16b5f22
commit b9d6ed445d
7 changed files with 139 additions and 137 deletions

View File

@@ -247,12 +247,12 @@ static void pvr_cmd_buffer_update_barriers(struct pvr_cmd_buffer *cmd_buffer,
static VkResult pvr_cmd_buffer_upload_tables(struct pvr_device *device, static VkResult pvr_cmd_buffer_upload_tables(struct pvr_device *device,
struct pvr_cmd_buffer *cmd_buffer) struct pvr_cmd_buffer *cmd_buffer)
{ {
struct pvr_sub_cmd *sub_cmd = cmd_buffer->state.current_sub_cmd; struct pvr_sub_cmd_gfx *sub_cmd = &cmd_buffer->state.current_sub_cmd->gfx;
const uint32_t cache_line_size = const uint32_t cache_line_size =
rogue_get_slc_cache_line_size(&device->pdevice->dev_info); rogue_get_slc_cache_line_size(&device->pdevice->dev_info);
VkResult result; VkResult result;
assert(!sub_cmd->gfx.depth_bias_bo && !sub_cmd->gfx.scissor_bo); assert(!sub_cmd->depth_bias_bo && !sub_cmd->scissor_bo);
if (cmd_buffer->depth_bias_array.size > 0) { if (cmd_buffer->depth_bias_array.size > 0) {
result = result =
@@ -261,7 +261,7 @@ static VkResult pvr_cmd_buffer_upload_tables(struct pvr_device *device,
util_dynarray_begin(&cmd_buffer->depth_bias_array), util_dynarray_begin(&cmd_buffer->depth_bias_array),
cmd_buffer->depth_bias_array.size, cmd_buffer->depth_bias_array.size,
cache_line_size, cache_line_size,
&sub_cmd->gfx.depth_bias_bo); &sub_cmd->depth_bias_bo);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
} }
@@ -272,7 +272,7 @@ static VkResult pvr_cmd_buffer_upload_tables(struct pvr_device *device,
util_dynarray_begin(&cmd_buffer->scissor_array), util_dynarray_begin(&cmd_buffer->scissor_array),
cmd_buffer->scissor_array.size, cmd_buffer->scissor_array.size,
cache_line_size, cache_line_size,
&sub_cmd->gfx.scissor_bo); &sub_cmd->scissor_bo);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
goto err_free_depth_bias_bo; goto err_free_depth_bias_bo;
} }
@@ -283,24 +283,24 @@ static VkResult pvr_cmd_buffer_upload_tables(struct pvr_device *device,
return VK_SUCCESS; return VK_SUCCESS;
err_free_depth_bias_bo: err_free_depth_bias_bo:
pvr_bo_free(device, sub_cmd->gfx.depth_bias_bo); pvr_bo_free(device, sub_cmd->depth_bias_bo);
sub_cmd->gfx.depth_bias_bo = NULL; sub_cmd->depth_bias_bo = NULL;
return result; return result;
} }
static VkResult pvr_cmd_buffer_emit_ppp_state(struct pvr_cmd_buffer *cmd_buffer) static VkResult pvr_cmd_buffer_emit_ppp_state(struct pvr_cmd_buffer *cmd_buffer)
{ {
struct pvr_sub_cmd *sub_cmd = cmd_buffer->state.current_sub_cmd; struct pvr_sub_cmd_gfx *sub_cmd = &cmd_buffer->state.current_sub_cmd->gfx;
struct pvr_framebuffer *framebuffer = struct pvr_framebuffer *framebuffer =
cmd_buffer->state.render_pass_info.framebuffer; cmd_buffer->state.render_pass_info.framebuffer;
pvr_csb_emit (&sub_cmd->gfx.control_stream, VDMCTRL_PPP_STATE0, state0) { pvr_csb_emit (&sub_cmd->control_stream, VDMCTRL_PPP_STATE0, state0) {
state0.addrmsb = framebuffer->ppp_state_bo->vma->dev_addr; state0.addrmsb = framebuffer->ppp_state_bo->vma->dev_addr;
state0.word_count = framebuffer->ppp_state_size; state0.word_count = framebuffer->ppp_state_size;
} }
pvr_csb_emit (&sub_cmd->gfx.control_stream, VDMCTRL_PPP_STATE1, state1) { pvr_csb_emit (&sub_cmd->control_stream, VDMCTRL_PPP_STATE1, state1) {
state1.addrlsb = framebuffer->ppp_state_bo->vma->dev_addr; state1.addrlsb = framebuffer->ppp_state_bo->vma->dev_addr;
} }
@@ -869,13 +869,13 @@ pvr_pass_get_pixel_output_width(const struct pvr_render_pass *pass,
static VkResult pvr_sub_cmd_gfx_job_init(const struct pvr_device_info *dev_info, static VkResult pvr_sub_cmd_gfx_job_init(const struct pvr_device_info *dev_info,
struct pvr_cmd_buffer *cmd_buffer, struct pvr_cmd_buffer *cmd_buffer,
struct pvr_sub_cmd *sub_cmd) struct pvr_sub_cmd_gfx *sub_cmd)
{ {
struct pvr_render_pass_info *render_pass_info = struct pvr_render_pass_info *render_pass_info =
&cmd_buffer->state.render_pass_info; &cmd_buffer->state.render_pass_info;
const struct pvr_renderpass_hwsetup_render *hw_render = const struct pvr_renderpass_hwsetup_render *hw_render =
&render_pass_info->pass->hw_setup->renders[sub_cmd->gfx.hw_render_idx]; &render_pass_info->pass->hw_setup->renders[sub_cmd->hw_render_idx];
struct pvr_render_job *job = &sub_cmd->gfx.job; struct pvr_render_job *job = &sub_cmd->job;
struct pvr_pds_upload pds_pixel_event_program; struct pvr_pds_upload pds_pixel_event_program;
uint32_t pbe_cs_words[PVR_MAX_COLOR_ATTACHMENTS] uint32_t pbe_cs_words[PVR_MAX_COLOR_ATTACHMENTS]
@@ -929,7 +929,7 @@ static VkResult pvr_sub_cmd_gfx_job_init(const struct pvr_device_info *dev_info,
* when the pool gets emptied? * when the pool gets emptied?
*/ */
result = pvr_load_op_data_create_and_upload(cmd_buffer, result = pvr_load_op_data_create_and_upload(cmd_buffer,
sub_cmd->gfx.hw_render_idx, sub_cmd->hw_render_idx,
&load_op_program); &load_op_program);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
@@ -944,30 +944,29 @@ static VkResult pvr_sub_cmd_gfx_job_init(const struct pvr_device_info *dev_info,
render_target = pvr_get_render_target(render_pass_info->pass, render_target = pvr_get_render_target(render_pass_info->pass,
render_pass_info->framebuffer, render_pass_info->framebuffer,
sub_cmd->gfx.hw_render_idx); sub_cmd->hw_render_idx);
job->rt_dataset = render_target->rt_dataset; job->rt_dataset = render_target->rt_dataset;
job->ctrl_stream_addr = job->ctrl_stream_addr = pvr_csb_get_start_address(&sub_cmd->control_stream);
pvr_csb_get_start_address(&sub_cmd->gfx.control_stream);
/* FIXME: Need to set up the border color table at device creation /* FIXME: Need to set up the border color table at device creation
* time. Set to invalid for the time being. * time. Set to invalid for the time being.
*/ */
job->border_colour_table_addr = PVR_DEV_ADDR_INVALID; job->border_colour_table_addr = PVR_DEV_ADDR_INVALID;
if (sub_cmd->gfx.depth_bias_bo) if (sub_cmd->depth_bias_bo)
job->depth_bias_table_addr = sub_cmd->gfx.depth_bias_bo->vma->dev_addr; job->depth_bias_table_addr = sub_cmd->depth_bias_bo->vma->dev_addr;
else else
job->depth_bias_table_addr = PVR_DEV_ADDR_INVALID; job->depth_bias_table_addr = PVR_DEV_ADDR_INVALID;
if (sub_cmd->gfx.scissor_bo) if (sub_cmd->scissor_bo)
job->scissor_table_addr = sub_cmd->gfx.scissor_bo->vma->dev_addr; job->scissor_table_addr = sub_cmd->scissor_bo->vma->dev_addr;
else else
job->scissor_table_addr = PVR_DEV_ADDR_INVALID; job->scissor_table_addr = PVR_DEV_ADDR_INVALID;
job->pixel_output_width = job->pixel_output_width =
pvr_pass_get_pixel_output_width(render_pass_info->pass, pvr_pass_get_pixel_output_width(render_pass_info->pass,
sub_cmd->gfx.hw_render_idx, sub_cmd->hw_render_idx,
dev_info); dev_info);
if (hw_render->ds_surface_id != -1) { if (hw_render->ds_surface_id != -1) {
@@ -1041,15 +1040,15 @@ static VkResult pvr_sub_cmd_gfx_job_init(const struct pvr_device_info *dev_info,
job->samples = 1; job->samples = 1;
} }
if (sub_cmd->gfx.max_tiles_in_flight == if (sub_cmd->max_tiles_in_flight ==
PVR_GET_FEATURE_VALUE(dev_info, isp_max_tiles_in_flight, 1U)) { PVR_GET_FEATURE_VALUE(dev_info, isp_max_tiles_in_flight, 1U)) {
/* Use the default limit based on the partition store. */ /* Use the default limit based on the partition store. */
job->max_tiles_in_flight = 0U; job->max_tiles_in_flight = 0U;
} else { } else {
job->max_tiles_in_flight = sub_cmd->gfx.max_tiles_in_flight; job->max_tiles_in_flight = sub_cmd->max_tiles_in_flight;
} }
job->frag_uses_atomic_ops = sub_cmd->gfx.frag_uses_atomic_ops; job->frag_uses_atomic_ops = sub_cmd->frag_uses_atomic_ops;
job->disable_compute_overlap = false; job->disable_compute_overlap = false;
job->max_shared_registers = cmd_buffer->state.max_shared_regs; job->max_shared_registers = cmd_buffer->state.max_shared_regs;
job->run_frag = true; job->run_frag = true;
@@ -1065,56 +1064,53 @@ static VkResult pvr_sub_cmd_gfx_job_init(const struct pvr_device_info *dev_info,
static void pvr_sub_cmd_compute_job_init(const struct pvr_device_info *dev_info, static void pvr_sub_cmd_compute_job_init(const struct pvr_device_info *dev_info,
struct pvr_cmd_buffer *cmd_buffer, struct pvr_cmd_buffer *cmd_buffer,
struct pvr_sub_cmd *sub_cmd) struct pvr_sub_cmd_compute *sub_cmd)
{ {
if (sub_cmd->compute.uses_barrier) { if (sub_cmd->uses_barrier)
sub_cmd->compute.submit_info.flags |= sub_cmd->submit_info.flags |= PVR_WINSYS_COMPUTE_FLAG_PREVENT_ALL_OVERLAP;
PVR_WINSYS_COMPUTE_FLAG_PREVENT_ALL_OVERLAP;
}
pvr_csb_pack (&sub_cmd->compute.submit_info.regs.cdm_ctrl_stream_base, pvr_csb_pack (&sub_cmd->submit_info.regs.cdm_ctrl_stream_base,
CR_CDM_CTRL_STREAM_BASE, CR_CDM_CTRL_STREAM_BASE,
value) { value) {
value.addr = pvr_csb_get_start_address(&sub_cmd->compute.control_stream); value.addr = pvr_csb_get_start_address(&sub_cmd->control_stream);
} }
/* FIXME: Need to set up the border color table at device creation /* FIXME: Need to set up the border color table at device creation
* time. Set to invalid for the time being. * time. Set to invalid for the time being.
*/ */
pvr_csb_pack (&sub_cmd->compute.submit_info.regs.tpu_border_colour_table, pvr_csb_pack (&sub_cmd->submit_info.regs.tpu_border_colour_table,
CR_TPU_BORDER_COLOUR_TABLE_CDM, CR_TPU_BORDER_COLOUR_TABLE_CDM,
value) { value) {
value.border_colour_table_address = PVR_DEV_ADDR_INVALID; value.border_colour_table_address = PVR_DEV_ADDR_INVALID;
} }
sub_cmd->compute.num_shared_regs = MAX2(PVR_IDF_WDF_IN_REGISTER_CONST_COUNT, sub_cmd->num_shared_regs = MAX2(PVR_IDF_WDF_IN_REGISTER_CONST_COUNT,
cmd_buffer->state.max_shared_regs); cmd_buffer->state.max_shared_regs);
cmd_buffer->state.max_shared_regs = 0U; cmd_buffer->state.max_shared_regs = 0U;
if (PVR_HAS_FEATURE(dev_info, compute_morton_capable)) if (PVR_HAS_FEATURE(dev_info, compute_morton_capable))
sub_cmd->compute.submit_info.regs.cdm_item = 0; sub_cmd->submit_info.regs.cdm_item = 0;
pvr_csb_pack (&sub_cmd->compute.submit_info.regs.tpu, CR_TPU, value) { pvr_csb_pack (&sub_cmd->submit_info.regs.tpu, CR_TPU, value) {
value.tag_cem_4k_face_packing = true; value.tag_cem_4k_face_packing = true;
} }
if (PVR_HAS_FEATURE(dev_info, cluster_grouping) && if (PVR_HAS_FEATURE(dev_info, cluster_grouping) &&
PVR_HAS_FEATURE(dev_info, slc_mcu_cache_controls) && PVR_HAS_FEATURE(dev_info, slc_mcu_cache_controls) &&
rogue_get_num_phantoms(dev_info) > 1 && rogue_get_num_phantoms(dev_info) > 1 && sub_cmd->uses_atomic_ops) {
sub_cmd->compute.uses_atomic_ops) {
/* Each phantom has its own MCU, so atomicity can only be guaranteed /* Each phantom has its own MCU, so atomicity can only be guaranteed
* when all work items are processed on the same phantom. This means we * when all work items are processed on the same phantom. This means we
* need to disable all USCs other than those of the first phantom, which * need to disable all USCs other than those of the first phantom, which
* has 4 clusters. * has 4 clusters.
*/ */
pvr_csb_pack (&sub_cmd->compute.submit_info.regs.compute_cluster, pvr_csb_pack (&sub_cmd->submit_info.regs.compute_cluster,
CR_COMPUTE_CLUSTER, CR_COMPUTE_CLUSTER,
value) { value) {
value.mask = 0xFU; value.mask = 0xFU;
} }
} else { } else {
pvr_csb_pack (&sub_cmd->compute.submit_info.regs.compute_cluster, pvr_csb_pack (&sub_cmd->submit_info.regs.compute_cluster,
CR_COMPUTE_CLUSTER, CR_COMPUTE_CLUSTER,
value) { value) {
value.mask = 0U; value.mask = 0U;
@@ -1122,8 +1118,8 @@ static void pvr_sub_cmd_compute_job_init(const struct pvr_device_info *dev_info,
} }
if (PVR_HAS_FEATURE(dev_info, gpu_multicore_support) && if (PVR_HAS_FEATURE(dev_info, gpu_multicore_support) &&
sub_cmd->compute.uses_atomic_ops) { sub_cmd->uses_atomic_ops) {
sub_cmd->compute.submit_info.flags |= PVR_WINSYS_COMPUTE_FLAG_SINGLE_CORE; sub_cmd->submit_info.flags |= PVR_WINSYS_COMPUTE_FLAG_SINGLE_CORE;
} }
} }
@@ -1390,7 +1386,7 @@ static VkResult pvr_cmd_buffer_end_sub_cmd(struct pvr_cmd_buffer *cmd_buffer)
result = pvr_sub_cmd_gfx_job_init(&device->pdevice->dev_info, result = pvr_sub_cmd_gfx_job_init(&device->pdevice->dev_info,
cmd_buffer, cmd_buffer,
sub_cmd); &sub_cmd->gfx);
if (result != VK_SUCCESS) { if (result != VK_SUCCESS) {
state->status = result; state->status = result;
return result; return result;
@@ -1409,7 +1405,7 @@ static VkResult pvr_cmd_buffer_end_sub_cmd(struct pvr_cmd_buffer *cmd_buffer)
pvr_sub_cmd_compute_job_init(&device->pdevice->dev_info, pvr_sub_cmd_compute_job_init(&device->pdevice->dev_info,
cmd_buffer, cmd_buffer,
sub_cmd); &sub_cmd->compute);
break; break;
case PVR_SUB_CMD_TYPE_TRANSFER: case PVR_SUB_CMD_TYPE_TRANSFER:
@@ -2478,14 +2474,16 @@ VkResult pvr_BeginCommandBuffer(VkCommandBuffer commandBuffer,
VkResult pvr_cmd_buffer_add_transfer_cmd(struct pvr_cmd_buffer *cmd_buffer, VkResult pvr_cmd_buffer_add_transfer_cmd(struct pvr_cmd_buffer *cmd_buffer,
struct pvr_transfer_cmd *transfer_cmd) struct pvr_transfer_cmd *transfer_cmd)
{ {
struct pvr_sub_cmd_transfer *sub_cmd;
VkResult result; VkResult result;
result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_TRANSFER); result = pvr_cmd_buffer_start_sub_cmd(cmd_buffer, PVR_SUB_CMD_TYPE_TRANSFER);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
list_addtail(&transfer_cmd->link, sub_cmd = &cmd_buffer->state.current_sub_cmd->transfer;
&cmd_buffer->state.current_sub_cmd->transfer.transfer_cmds);
list_addtail(&transfer_cmd->link, &sub_cmd->transfer_cmds);
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -3616,7 +3614,7 @@ pvr_setup_fragment_state_pointers(struct pvr_cmd_buffer *const cmd_buffer)
&cmd_buffer->device->pdevice->dev_info; &cmd_buffer->device->pdevice->dev_info;
struct pvr_emit_state *const emit_state = &state->emit_state; struct pvr_emit_state *const emit_state = &state->emit_state;
struct pvr_ppp_state *const ppp_state = &state->ppp_state; struct pvr_ppp_state *const ppp_state = &state->ppp_state;
struct pvr_sub_cmd *sub_cmd = state->current_sub_cmd; struct pvr_sub_cmd_gfx *sub_cmd = &state->current_sub_cmd->gfx;
const uint32_t pds_uniform_size = const uint32_t pds_uniform_size =
DIV_ROUND_UP(uniform_shader_state->pds_info.data_size_in_dwords, DIV_ROUND_UP(uniform_shader_state->pds_info.data_size_in_dwords,
@@ -3647,8 +3645,8 @@ pvr_setup_fragment_state_pointers(struct pvr_cmd_buffer *const cmd_buffer)
uint32_t size_info_mask; uint32_t size_info_mask;
uint32_t size_info2; uint32_t size_info2;
if (max_tiles_in_flight < sub_cmd->gfx.max_tiles_in_flight) if (max_tiles_in_flight < sub_cmd->max_tiles_in_flight)
sub_cmd->gfx.max_tiles_in_flight = max_tiles_in_flight; sub_cmd->max_tiles_in_flight = max_tiles_in_flight;
pvr_csb_pack (&ppp_state->pds.pixel_shader_base, pvr_csb_pack (&ppp_state->pds.pixel_shader_base,
TA_STATE_PDS_SHADERBASE, TA_STATE_PDS_SHADERBASE,

View File

@@ -36,14 +36,14 @@
static void pvr_compute_job_ws_submit_info_init( static void pvr_compute_job_ws_submit_info_init(
struct pvr_compute_ctx *ctx, struct pvr_compute_ctx *ctx,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_compute *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,
struct pvr_winsys_compute_submit_info *submit_info) struct pvr_winsys_compute_submit_info *submit_info)
{ {
const struct pvr_compute_ctx_switch *const ctx_switch = &ctx->ctx_switch; const struct pvr_compute_ctx_switch *const ctx_switch = &ctx->ctx_switch;
uint32_t shared_regs = sub_cmd->compute.num_shared_regs; uint32_t shared_regs = sub_cmd->num_shared_regs;
submit_info->frame_num = ctx->device->global_queue_present_count; submit_info->frame_num = ctx->device->global_queue_present_count;
submit_info->job_num = ctx->device->global_queue_job_count; submit_info->job_num = ctx->device->global_queue_job_count;
@@ -86,7 +86,7 @@ static void pvr_compute_job_ws_submit_info_init(
} }
VkResult pvr_compute_job_submit(struct pvr_compute_ctx *ctx, VkResult pvr_compute_job_submit(struct pvr_compute_ctx *ctx,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_compute *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,
@@ -99,9 +99,9 @@ VkResult pvr_compute_job_submit(struct pvr_compute_ctx *ctx,
waits, waits,
wait_count, wait_count,
stage_flags, stage_flags,
&sub_cmd->compute.submit_info); &sub_cmd->submit_info);
return device->ws->ops->compute_submit(ctx->ws_ctx, return device->ws->ops->compute_submit(ctx->ws_ctx,
&sub_cmd->compute.submit_info, &sub_cmd->submit_info,
signal_sync); signal_sync);
} }

View File

@@ -28,11 +28,11 @@
#include <vulkan/vulkan.h> #include <vulkan/vulkan.h>
struct pvr_compute_ctx; struct pvr_compute_ctx;
struct pvr_sub_cmd; struct pvr_sub_cmd_compute;
struct vk_sync; struct vk_sync;
VkResult pvr_compute_job_submit(struct pvr_compute_ctx *ctx, VkResult pvr_compute_job_submit(struct pvr_compute_ctx *ctx,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_compute *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,

View File

@@ -38,7 +38,7 @@
/* FIXME: Implement gpu based transfer support. */ /* FIXME: Implement gpu based transfer support. */
VkResult pvr_transfer_job_submit(struct pvr_device *device, VkResult pvr_transfer_job_submit(struct pvr_device *device,
struct pvr_transfer_ctx *ctx, struct pvr_transfer_ctx *ctx,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_transfer *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,
@@ -61,7 +61,7 @@ VkResult pvr_transfer_job_submit(struct pvr_device *device,
list_for_each_entry_safe (struct pvr_transfer_cmd, list_for_each_entry_safe (struct pvr_transfer_cmd,
transfer_cmd, transfer_cmd,
&sub_cmd->transfer.transfer_cmds, &sub_cmd->transfer_cmds,
link) { link) {
bool src_mapped = false; bool src_mapped = false;
bool dst_mapped = false; bool dst_mapped = false;

View File

@@ -28,13 +28,13 @@
#include <vulkan/vulkan.h> #include <vulkan/vulkan.h>
struct pvr_device; struct pvr_device;
struct pvr_sub_cmd; struct pvr_sub_cmd_transfer;
struct pvr_transfer_ctx; struct pvr_transfer_ctx;
struct vk_sync; struct vk_sync;
VkResult pvr_transfer_job_submit(struct pvr_device *device, VkResult pvr_transfer_job_submit(struct pvr_device *device,
struct pvr_transfer_ctx *ctx, struct pvr_transfer_ctx *ctx,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_transfer *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,

View File

@@ -514,6 +514,75 @@ struct pvr_transfer_cmd {
VkBufferCopy2 regions[0]; VkBufferCopy2 regions[0];
}; };
struct pvr_sub_cmd_gfx {
const struct pvr_framebuffer *framebuffer;
struct pvr_render_job job;
struct pvr_bo *depth_bias_bo;
struct pvr_bo *scissor_bo;
/* Tracking how the loaded depth/stencil values are being used. */
enum pvr_depth_stencil_usage depth_usage;
enum pvr_depth_stencil_usage stencil_usage;
/* Tracking whether the subcommand modifies depth/stencil. */
bool modifies_depth;
bool modifies_stencil;
/* Control stream builder object */
struct pvr_csb control_stream;
uint32_t hw_render_idx;
uint32_t max_tiles_in_flight;
bool empty_cmd;
/* True if any fragment shader used in this sub command uses atomic
* operations.
*/
bool frag_uses_atomic_ops;
bool disable_compute_overlap;
/* True if any fragment shader used in this sub command has side
* effects.
*/
bool frag_has_side_effects;
/* True if any vertex shader used in this sub command contains both
* texture reads and texture writes.
*/
bool vertex_uses_texture_rw;
/* True if any fragment shader used in this sub command contains
* both texture reads and texture writes.
*/
bool frag_uses_texture_rw;
};
struct pvr_sub_cmd_compute {
/* Control stream builder object. */
struct pvr_csb control_stream;
struct pvr_winsys_compute_submit_info submit_info;
uint32_t num_shared_regs;
/* True if any shader used in this sub command uses atomic
* operations.
*/
bool uses_atomic_ops;
bool uses_barrier;
};
struct pvr_sub_cmd_transfer {
/* List of pvr_transfer_cmd type structures. */
struct list_head transfer_cmds;
};
struct pvr_sub_cmd { struct pvr_sub_cmd {
/* This links the subcommand in pvr_cmd_buffer:sub_cmds list. */ /* This links the subcommand in pvr_cmd_buffer:sub_cmds list. */
struct list_head link; struct list_head link;
@@ -521,74 +590,9 @@ struct pvr_sub_cmd {
enum pvr_sub_cmd_type type; enum pvr_sub_cmd_type type;
union { union {
struct { struct pvr_sub_cmd_gfx gfx;
const struct pvr_framebuffer *framebuffer; struct pvr_sub_cmd_compute compute;
struct pvr_sub_cmd_transfer transfer;
struct pvr_render_job job;
struct pvr_bo *depth_bias_bo;
struct pvr_bo *scissor_bo;
/* Tracking how the loaded depth/stencil values are being used. */
enum pvr_depth_stencil_usage depth_usage;
enum pvr_depth_stencil_usage stencil_usage;
/* Tracking whether the subcommand modifies depth/stencil. */
bool modifies_depth;
bool modifies_stencil;
/* Control stream builder object */
struct pvr_csb control_stream;
uint32_t hw_render_idx;
uint32_t max_tiles_in_flight;
bool empty_cmd;
/* True if any fragment shader used in this sub command uses atomic
* operations.
*/
bool frag_uses_atomic_ops;
bool disable_compute_overlap;
/* True if any fragment shader used in this sub command has side
* effects.
*/
bool frag_has_side_effects;
/* True if any vertex shader used in this sub command contains both
* texture reads and texture writes.
*/
bool vertex_uses_texture_rw;
/* True if any fragment shader used in this sub command contains
* both texture reads and texture writes.
*/
bool frag_uses_texture_rw;
} gfx;
struct {
/* Control stream builder object. */
struct pvr_csb control_stream;
struct pvr_winsys_compute_submit_info submit_info;
uint32_t num_shared_regs;
/* True if any shader used in this sub command uses atomic
* operations.
*/
bool uses_atomic_ops;
bool uses_barrier;
} compute;
struct {
/* List of pvr_transfer_cmd type structures. */
struct list_head transfer_cmds;
} transfer;
}; };
}; };

View File

@@ -233,13 +233,13 @@ static VkResult
pvr_process_graphics_cmd(struct pvr_device *device, pvr_process_graphics_cmd(struct pvr_device *device,
struct pvr_queue *queue, struct pvr_queue *queue,
struct pvr_cmd_buffer *cmd_buffer, struct pvr_cmd_buffer *cmd_buffer,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_gfx *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,
struct vk_sync *completions[static PVR_JOB_TYPE_MAX]) struct vk_sync *completions[static PVR_JOB_TYPE_MAX])
{ {
const struct pvr_framebuffer *framebuffer = sub_cmd->gfx.framebuffer; const struct pvr_framebuffer *framebuffer = sub_cmd->framebuffer;
struct vk_sync *sync_geom; struct vk_sync *sync_geom;
struct vk_sync *sync_frag; struct vk_sync *sync_frag;
uint32_t bo_count = 0; uint32_t bo_count = 0;
@@ -272,7 +272,7 @@ pvr_process_graphics_cmd(struct pvr_device *device,
/* FIXME: If the framebuffer being rendered to has multiple layers then we /* FIXME: If the framebuffer being rendered to has multiple layers then we
* need to split submissions that run a fragment job into two. * need to split submissions that run a fragment job into two.
*/ */
if (sub_cmd->gfx.job.run_frag && framebuffer->layers > 1) if (sub_cmd->job.run_frag && framebuffer->layers > 1)
pvr_finishme("Split job submission for framebuffers with > 1 layers"); pvr_finishme("Split job submission for framebuffers with > 1 layers");
/* Get any imported buffers used in framebuffer attachments. */ /* Get any imported buffers used in framebuffer attachments. */
@@ -287,7 +287,7 @@ pvr_process_graphics_cmd(struct pvr_device *device,
/* This passes ownership of the wait fences to pvr_render_job_submit(). */ /* This passes ownership of the wait fences to pvr_render_job_submit(). */
result = pvr_render_job_submit(queue->gfx_ctx, result = pvr_render_job_submit(queue->gfx_ctx,
&sub_cmd->gfx.job, &sub_cmd->job,
bos, bos,
bo_count, bo_count,
waits, waits,
@@ -321,7 +321,7 @@ pvr_process_graphics_cmd(struct pvr_device *device,
static VkResult static VkResult
pvr_process_compute_cmd(struct pvr_device *device, pvr_process_compute_cmd(struct pvr_device *device,
struct pvr_queue *queue, struct pvr_queue *queue,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_compute *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,
@@ -362,7 +362,7 @@ pvr_process_compute_cmd(struct pvr_device *device,
static VkResult static VkResult
pvr_process_transfer_cmds(struct pvr_device *device, pvr_process_transfer_cmds(struct pvr_device *device,
struct pvr_queue *queue, struct pvr_queue *queue,
struct pvr_sub_cmd *sub_cmd, struct pvr_sub_cmd_transfer *sub_cmd,
struct vk_sync **waits, struct vk_sync **waits,
uint32_t wait_count, uint32_t wait_count,
uint32_t *stage_flags, uint32_t *stage_flags,
@@ -516,7 +516,7 @@ pvr_process_cmd_buffer(struct pvr_device *device,
result = pvr_process_graphics_cmd(device, result = pvr_process_graphics_cmd(device,
queue, queue,
cmd_buffer, cmd_buffer,
sub_cmd, &sub_cmd->gfx,
waits, waits,
wait_count, wait_count,
stage_flags, stage_flags,
@@ -526,7 +526,7 @@ pvr_process_cmd_buffer(struct pvr_device *device,
case PVR_SUB_CMD_TYPE_COMPUTE: case PVR_SUB_CMD_TYPE_COMPUTE:
result = pvr_process_compute_cmd(device, result = pvr_process_compute_cmd(device,
queue, queue,
sub_cmd, &sub_cmd->compute,
waits, waits,
wait_count, wait_count,
stage_flags, stage_flags,
@@ -536,7 +536,7 @@ pvr_process_cmd_buffer(struct pvr_device *device,
case PVR_SUB_CMD_TYPE_TRANSFER: case PVR_SUB_CMD_TYPE_TRANSFER:
result = pvr_process_transfer_cmds(device, result = pvr_process_transfer_cmds(device,
queue, queue,
sub_cmd, &sub_cmd->transfer,
waits, waits,
wait_count, wait_count,
stage_flags, stage_flags,