tu: C++-proofing: various enum fixes
Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21931>
This commit is contained in:

committed by
Marge Bot

parent
5430d10d06
commit
af3e075f05
@@ -60,7 +60,6 @@ format_to_ifmt(enum pipe_format format)
|
||||
return is_int ? R2D_INT32 : R2D_FLOAT32;
|
||||
default:
|
||||
unreachable("bad format");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -415,7 +414,7 @@ r2d_setup_common(struct tu_cmd_buffer *cmd,
|
||||
|
||||
uint32_t blit_cntl = A6XX_RB_2D_BLIT_CNTL(
|
||||
.scissor = scissor,
|
||||
.rotate = blit_param,
|
||||
.rotate = (enum a6xx_rotation) blit_param,
|
||||
.solid_color = clear,
|
||||
.d24s8 = fmt == FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 && !clear,
|
||||
.color_format = fmt,
|
||||
@@ -757,7 +756,7 @@ tu_init_clear_blit_shaders(struct tu_device *dev)
|
||||
|
||||
for (uint32_t num_rts = 0; num_rts <= MAX_RTS; num_rts++) {
|
||||
compile_shader(dev, build_clear_fs_shader(num_rts), num_rts, &offset,
|
||||
GLOBAL_SH_FS_CLEAR0 + num_rts);
|
||||
(enum global_shader) (GLOBAL_SH_FS_CLEAR0 + num_rts));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,7 +788,7 @@ r3d_common(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit,
|
||||
|
||||
unsigned num_rts = util_bitcount(rts_mask);
|
||||
if (!blit)
|
||||
fs_id = GLOBAL_SH_FS_CLEAR0 + num_rts;
|
||||
fs_id = (enum global_shader) (GLOBAL_SH_FS_CLEAR0 + num_rts);
|
||||
|
||||
struct ir3_shader_variant *fs = cmd->device->global_shader_variants[fs_id];
|
||||
uint64_t fs_iova = cmd->device->global_shader_va[fs_id];
|
||||
@@ -1666,7 +1665,7 @@ tu6_blit_image(struct tu_cmd_buffer *cmd,
|
||||
|
||||
ops->setup(cmd, cs, src_format, dst_format, info->dstSubresource.aspectMask,
|
||||
blit_param, false, dst_image->layout[0].ubwc,
|
||||
dst_image->layout[0].nr_samples);
|
||||
(VkSampleCountFlagBits) dst_image->layout[0].nr_samples);
|
||||
|
||||
if (ops == &r3d_ops) {
|
||||
const float coords[] = { info->dstOffsets[0].x, info->dstOffsets[0].y,
|
||||
@@ -1809,7 +1808,7 @@ tu_copy_buffer_to_image(struct tu_cmd_buffer *cmd,
|
||||
|
||||
ops->setup(cmd, cs, src_format, dst_format,
|
||||
info->imageSubresource.aspectMask, 0, false, dst_image->layout[0].ubwc,
|
||||
dst_image->layout[0].nr_samples);
|
||||
(VkSampleCountFlagBits) dst_image->layout[0].nr_samples);
|
||||
|
||||
struct fdl6_view dst;
|
||||
tu_image_view_copy(&dst, dst_image, dst_format, &info->imageSubresource, offset.z);
|
||||
@@ -2087,7 +2086,7 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd,
|
||||
}, false);
|
||||
|
||||
ops->setup(cmd, cs, src_format, src_format, VK_IMAGE_ASPECT_COLOR_BIT, 0, false, false,
|
||||
dst_image->layout[0].nr_samples);
|
||||
(VkSampleCountFlagBits) dst_image->layout[0].nr_samples);
|
||||
coords(ops, cs, &staging_offset, &src_offset, &extent);
|
||||
|
||||
for (uint32_t i = 0; i < layers_to_copy; i++) {
|
||||
@@ -2116,7 +2115,7 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd,
|
||||
|
||||
ops->setup(cmd, cs, dst_format, dst_format, info->dstSubresource.aspectMask,
|
||||
0, false, dst_image->layout[0].ubwc,
|
||||
dst_image->layout[0].nr_samples);
|
||||
(VkSampleCountFlagBits) dst_image->layout[0].nr_samples);
|
||||
coords(ops, cs, &dst_offset, &staging_offset, &extent);
|
||||
|
||||
for (uint32_t i = 0; i < layers_to_copy; i++) {
|
||||
@@ -2130,7 +2129,7 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd,
|
||||
|
||||
ops->setup(cmd, cs, format, format, info->dstSubresource.aspectMask,
|
||||
0, false, dst_image->layout[0].ubwc,
|
||||
dst_image->layout[0].nr_samples);
|
||||
(VkSampleCountFlagBits) dst_image->layout[0].nr_samples);
|
||||
coords(ops, cs, &dst_offset, &src_offset, &extent);
|
||||
|
||||
for (uint32_t i = 0; i < layers_to_copy; i++) {
|
||||
@@ -2440,7 +2439,7 @@ clear_image(struct tu_cmd_buffer *cmd,
|
||||
const struct blit_ops *ops = image->layout[0].nr_samples > 1 ? &r3d_ops : &r2d_ops;
|
||||
|
||||
ops->setup(cmd, cs, format, format, aspect_mask, 0, true, image->layout[0].ubwc,
|
||||
image->layout[0].nr_samples);
|
||||
(VkSampleCountFlagBits) image->layout[0].nr_samples);
|
||||
if (image->vk.format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)
|
||||
ops->clear_value(cs, PIPE_FORMAT_R9G9B9E5_FLOAT, clear_value);
|
||||
else
|
||||
@@ -3296,7 +3295,7 @@ static void
|
||||
store_3d_blit(struct tu_cmd_buffer *cmd,
|
||||
struct tu_cs *cs,
|
||||
const struct tu_image_view *iview,
|
||||
uint32_t dst_samples,
|
||||
VkSampleCountFlagBits dst_samples,
|
||||
bool separate_stencil,
|
||||
enum pipe_format src_format,
|
||||
enum pipe_format dst_format,
|
||||
|
@@ -135,7 +135,7 @@ tu6_emit_flushes(struct tu_cmd_buffer *cmd_buffer,
|
||||
struct tu_cs *cs,
|
||||
struct tu_cache_state *cache)
|
||||
{
|
||||
enum tu_cmd_flush_bits flushes = cache->flush_bits;
|
||||
BITMASK_ENUM(tu_cmd_flush_bits) flushes = cache->flush_bits;
|
||||
cache->flush_bits = 0;
|
||||
|
||||
if (TU_DEBUG(FLUSHALL))
|
||||
@@ -328,7 +328,7 @@ tu6_emit_mrt(struct tu_cmd_buffer *cmd,
|
||||
{
|
||||
const struct tu_framebuffer *fb = cmd->state.framebuffer;
|
||||
|
||||
enum a6xx_format mrt0_format = 0;
|
||||
enum a6xx_format mrt0_format = FMT6_NONE;
|
||||
|
||||
for (uint32_t i = 0; i < subpass->color_count; ++i) {
|
||||
uint32_t a = subpass->color_attachments[i].attachment;
|
||||
@@ -368,7 +368,7 @@ tu6_emit_mrt(struct tu_cmd_buffer *cmd,
|
||||
tu_cs_image_flag_ref(cs, &iview->view, 0);
|
||||
|
||||
if (i == 0)
|
||||
mrt0_format = iview->view.SP_FS_MRT_REG & 0xff;
|
||||
mrt0_format = (enum a6xx_format) (iview->view.SP_FS_MRT_REG & 0xff);
|
||||
}
|
||||
|
||||
tu_cs_emit_regs(cs, A6XX_GRAS_LRZ_MRT_BUF_INFO_0(.color_format = mrt0_format));
|
||||
@@ -1452,7 +1452,7 @@ tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
|
||||
A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(0x6));
|
||||
|
||||
tu_cs_emit_regs(cs,
|
||||
A6XX_VFD_MODE_CNTL(0));
|
||||
A6XX_VFD_MODE_CNTL(RENDERING_PASS));
|
||||
|
||||
tu_cs_emit_regs(cs,
|
||||
A6XX_PC_POWER_CNTL(phys_dev->info->a6xx.magic.PC_POWER_CNTL));
|
||||
@@ -2566,7 +2566,7 @@ tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
|
||||
/* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
|
||||
tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
|
||||
tu_cs_emit_qw(cs, global_iova_arr(cmd, flush_base, i));
|
||||
tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
|
||||
tu6_emit_event_write(cmd, cs, (enum vgt_event_type) (FLUSH_SO_0 + i));
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < (pCounterBuffers ? counterBufferCount : 0); i++) {
|
||||
@@ -3555,7 +3555,7 @@ tu_flush_for_access(struct tu_cache_state *cache,
|
||||
enum tu_cmd_access_mask src_mask,
|
||||
enum tu_cmd_access_mask dst_mask)
|
||||
{
|
||||
enum tu_cmd_flush_bits flush_bits = 0;
|
||||
BITMASK_ENUM(tu_cmd_flush_bits) flush_bits = 0;
|
||||
|
||||
if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
|
||||
cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
|
||||
@@ -3687,7 +3687,7 @@ gfx_write_access(VkAccessFlags2 flags, VkPipelineStageFlags2 stages,
|
||||
static enum tu_cmd_access_mask
|
||||
vk2tu_access(VkAccessFlags2 flags, VkPipelineStageFlags2 stages, bool image_only, bool gmem)
|
||||
{
|
||||
enum tu_cmd_access_mask mask = 0;
|
||||
BITMASK_ENUM(tu_cmd_access_mask) mask = 0;
|
||||
|
||||
if (gfx_read_access(flags, stages,
|
||||
VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT |
|
||||
@@ -4268,9 +4268,9 @@ tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
|
||||
sanitize_src_stage(barrier->src_stage_mask);
|
||||
VkPipelineStageFlags2 dst_stage_vk =
|
||||
sanitize_dst_stage(barrier->dst_stage_mask);
|
||||
enum tu_cmd_access_mask src_flags =
|
||||
BITMASK_ENUM(tu_cmd_access_mask) src_flags =
|
||||
vk2tu_access(barrier->src_access_mask, src_stage_vk, false, false);
|
||||
enum tu_cmd_access_mask dst_flags =
|
||||
BITMASK_ENUM(tu_cmd_access_mask) dst_flags =
|
||||
vk2tu_access(barrier->dst_access_mask, dst_stage_vk, false, false);
|
||||
|
||||
if (barrier->incoherent_ccu_color)
|
||||
@@ -4711,7 +4711,7 @@ tu6_const_size(struct tu_cmd_buffer *cmd,
|
||||
dwords += tu6_user_consts_size(pipeline, MESA_SHADER_COMPUTE);
|
||||
} else {
|
||||
for (uint32_t type = MESA_SHADER_VERTEX; type <= MESA_SHADER_FRAGMENT; type++)
|
||||
dwords += tu6_user_consts_size(pipeline, type);
|
||||
dwords += tu6_user_consts_size(pipeline, (gl_shader_stage) type);
|
||||
}
|
||||
|
||||
return dwords;
|
||||
@@ -4750,7 +4750,8 @@ tu6_emit_consts(struct tu_cmd_buffer *cmd,
|
||||
struct tu_descriptor_state *descriptors =
|
||||
tu_get_descriptors_state(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS);
|
||||
for (uint32_t type = MESA_SHADER_VERTEX; type <= MESA_SHADER_FRAGMENT; type++)
|
||||
tu6_emit_user_consts(&cs, pipeline, type, descriptors, cmd->push_constants);
|
||||
tu6_emit_user_consts(&cs, pipeline, (gl_shader_stage) type,
|
||||
descriptors, cmd->push_constants);
|
||||
}
|
||||
|
||||
return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
|
||||
@@ -5187,12 +5188,12 @@ tu_draw_initiator(struct tu_cmd_buffer *cmd, enum pc_di_src_sel src_sel)
|
||||
enum pc_di_primtype primtype = cmd->state.primtype;
|
||||
|
||||
if (primtype == DI_PT_PATCHES0)
|
||||
primtype += cmd->state.patch_control_points;
|
||||
primtype = (enum pc_di_primtype) (primtype + cmd->state.patch_control_points);
|
||||
|
||||
uint32_t initiator =
|
||||
CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
|
||||
CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel) |
|
||||
CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd->state.index_size) |
|
||||
CP_DRAW_INDX_OFFSET_0_INDEX_SIZE((enum a4xx_index_size) cmd->state.index_size) |
|
||||
CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY);
|
||||
|
||||
if (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
|
||||
@@ -5962,8 +5963,8 @@ tu_barrier(struct tu_cmd_buffer *cmd,
|
||||
{
|
||||
VkPipelineStageFlags2 srcStage = 0;
|
||||
VkPipelineStageFlags2 dstStage = 0;
|
||||
enum tu_cmd_access_mask src_flags = 0;
|
||||
enum tu_cmd_access_mask dst_flags = 0;
|
||||
BITMASK_ENUM(tu_cmd_access_mask) src_flags = 0;
|
||||
BITMASK_ENUM(tu_cmd_access_mask) dst_flags = 0;
|
||||
|
||||
/* Inside a renderpass, we don't know yet whether we'll be using sysmem
|
||||
* so we have to use the sysmem flushes.
|
||||
@@ -6247,7 +6248,7 @@ tu_CmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer,
|
||||
* Flush CCU in order to make the results of previous transfer
|
||||
* operation visible to CP.
|
||||
*/
|
||||
tu_flush_for_access(cache, 0, TU_ACCESS_SYSMEM_WRITE);
|
||||
tu_flush_for_access(cache, TU_ACCESS_NONE, TU_ACCESS_SYSMEM_WRITE);
|
||||
|
||||
/* Flags that only require a top-of-pipe event. DrawIndirect parameters are
|
||||
* read by the CP, so the draw indirect stage counts as top-of-pipe too.
|
||||
@@ -6264,7 +6265,7 @@ tu_CmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer,
|
||||
* - There was a barrier to synchronize other writes with WriteBufferMarkerAMD
|
||||
* and they had to include our pipelineStage which forces the WFI.
|
||||
*/
|
||||
if (cache->flush_bits != 0 && is_top_of_pipe) {
|
||||
if (cache->flush_bits && is_top_of_pipe) {
|
||||
cache->flush_bits |= TU_CMD_FLAG_WAIT_FOR_IDLE;
|
||||
}
|
||||
|
||||
@@ -6287,5 +6288,5 @@ tu_CmdWriteBufferMarker2AMD(VkCommandBuffer commandBuffer,
|
||||
}
|
||||
|
||||
/* Make sure the result of this write is visible to others. */
|
||||
tu_flush_for_access(cache, TU_ACCESS_CP_WRITE, 0);
|
||||
tu_flush_for_access(cache, TU_ACCESS_CP_WRITE, TU_ACCESS_NONE);
|
||||
}
|
||||
|
@@ -80,6 +80,7 @@ enum tu_cmd_dirty_bits
|
||||
*/
|
||||
|
||||
enum tu_cmd_access_mask {
|
||||
TU_ACCESS_NONE = 0,
|
||||
TU_ACCESS_UCHE_READ = 1 << 0,
|
||||
TU_ACCESS_UCHE_WRITE = 1 << 1,
|
||||
TU_ACCESS_CCU_COLOR_READ = 1 << 2,
|
||||
@@ -252,9 +253,9 @@ struct tu_cache_state {
|
||||
* any users outside that cache domain, and caches which must be
|
||||
* invalidated eventually if there are any reads.
|
||||
*/
|
||||
enum tu_cmd_flush_bits pending_flush_bits;
|
||||
BITMASK_ENUM(tu_cmd_flush_bits) pending_flush_bits;
|
||||
/* Pending flushes */
|
||||
enum tu_cmd_flush_bits flush_bits;
|
||||
BITMASK_ENUM(tu_cmd_flush_bits) flush_bits;
|
||||
};
|
||||
|
||||
struct tu_vs_params {
|
||||
@@ -313,6 +314,96 @@ struct tu_render_pass_state
|
||||
uint32_t drawcall_bandwidth_per_sample_sum;
|
||||
};
|
||||
|
||||
/* These are the states of the suspend/resume state machine. In addition to
|
||||
* tracking whether we're in the middle of a chain of suspending and
|
||||
* resuming passes that will be merged, we need to track whether the
|
||||
* command buffer begins in the middle of such a chain, for when it gets
|
||||
* merged with other command buffers. We call such a chain that begins
|
||||
* before the command buffer starts a "pre-chain".
|
||||
*
|
||||
* Note that when this command buffer is finished, this state is untouched
|
||||
* but it gains a different meaning. For example, if we finish in state
|
||||
* SR_IN_CHAIN, we finished in the middle of a suspend/resume chain, so
|
||||
* there's a suspend/resume chain that extends past the end of the command
|
||||
* buffer. In this sense it's the "opposite" of SR_AFTER_PRE_CHAIN, which
|
||||
* means that there's a suspend/resume chain that extends before the
|
||||
* beginning.
|
||||
*/
|
||||
enum tu_suspend_resume_state
|
||||
{
|
||||
/* Either there are no suspend/resume chains, or they are entirely
|
||||
* contained in the current command buffer.
|
||||
*
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_NONE = 0,
|
||||
|
||||
/* We are in the middle of a suspend/resume chain that starts before the
|
||||
* current command buffer. This happens when the command buffer begins
|
||||
* with a resuming render pass and all of the passes up to the current
|
||||
* one are suspending. In this state, our part of the chain is not saved
|
||||
* and is in the current draw_cs/state.
|
||||
*
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_IN_PRE_CHAIN,
|
||||
|
||||
/* We are currently outside of any suspend/resume chains, but there is a
|
||||
* chain starting before the current command buffer. It is saved in
|
||||
* pre_chain.
|
||||
*
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* // This part is stashed in pre_chain
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* BeginRendering(resuming) ... EndRendering() // end of chain
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_AFTER_PRE_CHAIN,
|
||||
|
||||
/* We are in the middle of a suspend/resume chain and there is no chain
|
||||
* starting before the current command buffer.
|
||||
*
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* ...
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_IN_CHAIN,
|
||||
|
||||
/* We are in the middle of a suspend/resume chain and there is another,
|
||||
* separate, chain starting before the current command buffer.
|
||||
*
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* CommandBufferBegin() <- start of current command buffer
|
||||
* // This part is stashed in pre_chain
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* BeginRendering(resuming) ... EndRendering() // end of chain
|
||||
* ...
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_IN_CHAIN_AFTER_PRE_CHAIN,
|
||||
};
|
||||
|
||||
struct tu_cmd_state
|
||||
{
|
||||
uint32_t dirty;
|
||||
@@ -439,94 +530,7 @@ struct tu_cmd_state
|
||||
|
||||
bool prim_generated_query_running_before_rp;
|
||||
|
||||
/* These are the states of the suspend/resume state machine. In addition to
|
||||
* tracking whether we're in the middle of a chain of suspending and
|
||||
* resuming passes that will be merged, we need to track whether the
|
||||
* command buffer begins in the middle of such a chain, for when it gets
|
||||
* merged with other command buffers. We call such a chain that begins
|
||||
* before the command buffer starts a "pre-chain".
|
||||
*
|
||||
* Note that when this command buffer is finished, this state is untouched
|
||||
* but it gains a different meaning. For example, if we finish in state
|
||||
* SR_IN_CHAIN, we finished in the middle of a suspend/resume chain, so
|
||||
* there's a suspend/resume chain that extends past the end of the command
|
||||
* buffer. In this sense it's the "opposite" of SR_AFTER_PRE_CHAIN, which
|
||||
* means that there's a suspend/resume chain that extends before the
|
||||
* beginning.
|
||||
*/
|
||||
enum {
|
||||
/* Either there are no suspend/resume chains, or they are entirely
|
||||
* contained in the current command buffer.
|
||||
*
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_NONE = 0,
|
||||
|
||||
/* We are in the middle of a suspend/resume chain that starts before the
|
||||
* current command buffer. This happens when the command buffer begins
|
||||
* with a resuming render pass and all of the passes up to the current
|
||||
* one are suspending. In this state, our part of the chain is not saved
|
||||
* and is in the current draw_cs/state.
|
||||
*
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_IN_PRE_CHAIN,
|
||||
|
||||
/* We are currently outside of any suspend/resume chains, but there is a
|
||||
* chain starting before the current command buffer. It is saved in
|
||||
* pre_chain.
|
||||
*
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* // This part is stashed in pre_chain
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* BeginRendering(resuming) ... EndRendering() // end of chain
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_AFTER_PRE_CHAIN,
|
||||
|
||||
/* We are in the middle of a suspend/resume chain and there is no chain
|
||||
* starting before the current command buffer.
|
||||
*
|
||||
* BeginCommandBuffer() <- start of current command buffer
|
||||
* ...
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_IN_CHAIN,
|
||||
|
||||
/* We are in the middle of a suspend/resume chain and there is another,
|
||||
* separate, chain starting before the current command buffer.
|
||||
*
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* CommandBufferBegin() <- start of current command buffer
|
||||
* // This part is stashed in pre_chain
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* BeginRendering(resuming) ... EndRendering() // end of chain
|
||||
* ...
|
||||
* BeginRendering() ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* BeginRendering(resuming) ... EndRendering(suspending)
|
||||
* ...
|
||||
* // we are here
|
||||
*/
|
||||
SR_IN_CHAIN_AFTER_PRE_CHAIN,
|
||||
} suspend_resume;
|
||||
enum tu_suspend_resume_state suspend_resume;
|
||||
|
||||
bool suspending, resuming;
|
||||
|
||||
|
@@ -129,7 +129,9 @@ tu_cs_add_bo(struct tu_cs *cs, uint32_t size)
|
||||
|
||||
VkResult result =
|
||||
tu_bo_init_new(cs->device, &new_bo, size * sizeof(uint32_t),
|
||||
TU_BO_ALLOC_GPU_READ_ONLY | TU_BO_ALLOC_ALLOW_DUMP, cs->name);
|
||||
(enum tu_bo_alloc_flags)(TU_BO_ALLOC_GPU_READ_ONLY |
|
||||
TU_BO_ALLOC_ALLOW_DUMP),
|
||||
cs->name);
|
||||
if (result != VK_SUCCESS) {
|
||||
return result;
|
||||
}
|
||||
|
@@ -1724,7 +1724,7 @@ tu_trace_create_ts_buffer(struct u_trace_context *utctx, uint32_t size)
|
||||
container_of(utctx, struct tu_device, trace_context);
|
||||
|
||||
struct tu_bo *bo;
|
||||
tu_bo_init_new(device, &bo, size, false, "trace");
|
||||
tu_bo_init_new(device, &bo, size, TU_BO_ALLOC_NO_FLAGS, "trace");
|
||||
|
||||
return bo;
|
||||
}
|
||||
@@ -2194,10 +2194,11 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
|
||||
if (custom_border_colors)
|
||||
global_size += TU_BORDER_COLOR_COUNT * sizeof(struct bcolor_entry);
|
||||
|
||||
tu_bo_suballocator_init(&device->pipeline_suballoc, device,
|
||||
128 * 1024, TU_BO_ALLOC_GPU_READ_ONLY | TU_BO_ALLOC_ALLOW_DUMP);
|
||||
tu_bo_suballocator_init(
|
||||
&device->pipeline_suballoc, device, 128 * 1024,
|
||||
(enum tu_bo_alloc_flags) (TU_BO_ALLOC_GPU_READ_ONLY | TU_BO_ALLOC_ALLOW_DUMP));
|
||||
tu_bo_suballocator_init(&device->autotune_suballoc, device,
|
||||
128 * 1024, 0);
|
||||
128 * 1024, TU_BO_ALLOC_NO_FLAGS);
|
||||
|
||||
result = tu_bo_init_new(device, &device->global_bo, global_size,
|
||||
TU_BO_ALLOC_ALLOW_DUMP, "global");
|
||||
@@ -2222,9 +2223,9 @@ tu_CreateDevice(VkPhysicalDevice physicalDevice,
|
||||
global->dbg_gmem_total_stores = 0;
|
||||
global->dbg_gmem_taken_stores = 0;
|
||||
for (int i = 0; i < TU_BORDER_COLOR_BUILTIN; i++) {
|
||||
VkClearColorValue border_color = vk_border_color_value(i);
|
||||
VkClearColorValue border_color = vk_border_color_value((VkBorderColor) i);
|
||||
tu6_pack_border_color(&global->bcolor_builtin[i], &border_color,
|
||||
vk_border_color_is_int(i));
|
||||
vk_border_color_is_int((VkBorderColor) i));
|
||||
}
|
||||
|
||||
/* initialize to ones so ffs can be used to find unused slots */
|
||||
@@ -2603,7 +2604,7 @@ tu_AllocateMemory(VkDevice _device,
|
||||
}
|
||||
} else {
|
||||
uint64_t client_address = 0;
|
||||
enum tu_bo_alloc_flags alloc_flags = TU_BO_ALLOC_NO_FLAGS;
|
||||
BITMASK_ENUM(tu_bo_alloc_flags) alloc_flags = TU_BO_ALLOC_NO_FLAGS;
|
||||
|
||||
const VkMemoryOpaqueCaptureAddressAllocateInfo *replay_info =
|
||||
vk_find_struct_const(pAllocateInfo->pNext,
|
||||
@@ -3084,7 +3085,7 @@ tu_init_sampler(struct tu_device *device,
|
||||
COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
|
||||
A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
|
||||
A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
|
||||
A6XX_TEX_SAMP_0_ANISO(aniso) |
|
||||
A6XX_TEX_SAMP_0_ANISO((enum a6xx_tex_aniso) aniso) |
|
||||
A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
|
||||
A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
|
||||
A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
|
||||
|
@@ -204,6 +204,15 @@ struct tu6_global
|
||||
(global_iova(cmd, member) + sizeof_field(struct tu6_global, member[0]) * (idx))
|
||||
|
||||
|
||||
#ifdef ANDROID
|
||||
enum tu_gralloc_type
|
||||
{
|
||||
TU_GRALLOC_UNKNOWN,
|
||||
TU_GRALLOC_CROS,
|
||||
TU_GRALLOC_OTHER,
|
||||
};
|
||||
#endif
|
||||
|
||||
struct tu_device
|
||||
{
|
||||
struct vk_device vk;
|
||||
@@ -320,11 +329,7 @@ struct tu_device
|
||||
|
||||
#ifdef ANDROID
|
||||
const void *gralloc;
|
||||
enum {
|
||||
TU_GRALLOC_UNKNOWN,
|
||||
TU_GRALLOC_CROS,
|
||||
TU_GRALLOC_OTHER,
|
||||
} gralloc_type;
|
||||
enum tu_gralloc_type gralloc_type;
|
||||
#endif
|
||||
|
||||
uint32_t submit_count;
|
||||
|
@@ -293,7 +293,7 @@ tu_physical_device_get_format_properties(
|
||||
* DEPTH_STENCIL_ATTACHMENT_BIT for the optimal features.
|
||||
*/
|
||||
linear = optimal;
|
||||
if (tu6_pipe2depth(vk_format) != (enum a6xx_depth_format)~0)
|
||||
if (tu6_pipe2depth(vk_format) != DEPTH6_NONE)
|
||||
optimal |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
|
||||
|
||||
if (!tiling_possible(vk_format) &&
|
||||
@@ -422,7 +422,7 @@ tu_get_image_format_properties(
|
||||
VkExtent3D maxExtent;
|
||||
uint32_t maxMipLevels;
|
||||
uint32_t maxArraySize;
|
||||
VkSampleCountFlags sampleCounts = VK_SAMPLE_COUNT_1_BIT;
|
||||
BITMASK_ENUM(VkSampleCountFlagBits) sampleCounts = VK_SAMPLE_COUNT_1_BIT;
|
||||
|
||||
tu_physical_device_get_format_properties(physical_device, info->format,
|
||||
&format_props);
|
||||
@@ -600,7 +600,7 @@ tu_get_external_image_format_properties(
|
||||
VkExternalMemoryHandleTypeFlagBits handleType,
|
||||
VkExternalImageFormatProperties *external_properties)
|
||||
{
|
||||
VkExternalMemoryFeatureFlagBits flags = 0;
|
||||
BITMASK_ENUM(VkExternalMemoryFeatureFlagBits) flags = 0;
|
||||
VkExternalMemoryHandleTypeFlags export_flags = 0;
|
||||
VkExternalMemoryHandleTypeFlags compat_flags = 0;
|
||||
|
||||
@@ -771,7 +771,7 @@ tu_GetPhysicalDeviceExternalBufferProperties(
|
||||
const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
|
||||
VkExternalBufferProperties *pExternalBufferProperties)
|
||||
{
|
||||
VkExternalMemoryFeatureFlagBits flags = 0;
|
||||
BITMASK_ENUM(VkExternalMemoryFeatureFlagBits) flags = 0;
|
||||
VkExternalMemoryHandleTypeFlags export_flags = 0;
|
||||
VkExternalMemoryHandleTypeFlags compat_flags = 0;
|
||||
switch (pExternalBufferInfo->handleType) {
|
||||
|
@@ -163,7 +163,7 @@ tu_image_view_init(struct tu_device *device,
|
||||
TU_FROM_HANDLE(tu_image, image, pCreateInfo->image);
|
||||
const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
|
||||
VkFormat vk_format = pCreateInfo->format;
|
||||
VkImageAspectFlagBits aspect_mask = pCreateInfo->subresourceRange.aspectMask;
|
||||
VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask;
|
||||
|
||||
const struct VkSamplerYcbcrConversionInfo *ycbcr_conversion =
|
||||
vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
|
||||
|
@@ -1166,13 +1166,11 @@ static const struct tu_knl msm_knl_funcs = {
|
||||
|
||||
const struct vk_sync_type tu_timeline_sync_type = {
|
||||
.size = sizeof(struct tu_timeline_sync),
|
||||
.features = VK_SYNC_FEATURE_BINARY |
|
||||
VK_SYNC_FEATURE_GPU_WAIT |
|
||||
VK_SYNC_FEATURE_GPU_MULTI_WAIT |
|
||||
VK_SYNC_FEATURE_CPU_WAIT |
|
||||
VK_SYNC_FEATURE_CPU_RESET |
|
||||
VK_SYNC_FEATURE_WAIT_ANY |
|
||||
VK_SYNC_FEATURE_WAIT_PENDING,
|
||||
.features = (enum vk_sync_features)(
|
||||
VK_SYNC_FEATURE_BINARY | VK_SYNC_FEATURE_GPU_WAIT |
|
||||
VK_SYNC_FEATURE_GPU_MULTI_WAIT | VK_SYNC_FEATURE_CPU_WAIT |
|
||||
VK_SYNC_FEATURE_CPU_RESET | VK_SYNC_FEATURE_WAIT_ANY |
|
||||
VK_SYNC_FEATURE_WAIT_PENDING),
|
||||
.init = tu_timeline_sync_init,
|
||||
.finish = tu_timeline_sync_finish,
|
||||
.reset = tu_timeline_sync_reset,
|
||||
|
@@ -218,7 +218,9 @@ get_kgsl_prop(int fd, unsigned int type, void *value, size_t size)
|
||||
.sizebytes = size,
|
||||
};
|
||||
|
||||
return safe_ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &getprop);
|
||||
return safe_ioctl(fd, IOCTL_KGSL_DEVICE_GETPROPERTY, &getprop)
|
||||
? VK_ERROR_UNKNOWN
|
||||
: VK_SUCCESS;
|
||||
}
|
||||
|
||||
enum kgsl_syncobj_state {
|
||||
@@ -814,13 +816,14 @@ vk_kgsl_sync_export_sync_file(struct vk_device *device,
|
||||
|
||||
const struct vk_sync_type vk_kgsl_sync_type = {
|
||||
.size = sizeof(struct vk_kgsl_syncobj),
|
||||
.features = VK_SYNC_FEATURE_BINARY |
|
||||
VK_SYNC_FEATURE_GPU_WAIT |
|
||||
VK_SYNC_FEATURE_GPU_MULTI_WAIT |
|
||||
VK_SYNC_FEATURE_CPU_WAIT |
|
||||
VK_SYNC_FEATURE_CPU_RESET |
|
||||
VK_SYNC_FEATURE_WAIT_ANY |
|
||||
VK_SYNC_FEATURE_WAIT_PENDING,
|
||||
.features = (enum vk_sync_features)
|
||||
(VK_SYNC_FEATURE_BINARY |
|
||||
VK_SYNC_FEATURE_GPU_WAIT |
|
||||
VK_SYNC_FEATURE_GPU_MULTI_WAIT |
|
||||
VK_SYNC_FEATURE_CPU_WAIT |
|
||||
VK_SYNC_FEATURE_CPU_RESET |
|
||||
VK_SYNC_FEATURE_WAIT_ANY |
|
||||
VK_SYNC_FEATURE_WAIT_PENDING),
|
||||
.init = vk_kgsl_sync_init,
|
||||
.finish = vk_kgsl_sync_finish,
|
||||
.reset = vk_kgsl_sync_reset,
|
||||
|
@@ -557,8 +557,9 @@ static void
|
||||
tu_render_pass_gmem_config(struct tu_render_pass *pass,
|
||||
const struct tu_physical_device *phys_dev)
|
||||
{
|
||||
for (enum tu_gmem_layout layout = 0; layout < TU_GMEM_LAYOUT_COUNT;
|
||||
layout++) {
|
||||
for (enum tu_gmem_layout layout = (enum tu_gmem_layout) 0;
|
||||
layout < TU_GMEM_LAYOUT_COUNT;
|
||||
layout = (enum tu_gmem_layout)(layout + 1)) {
|
||||
/* log2(gmem_align/(tile_align_w*tile_align_h)) */
|
||||
uint32_t block_align_shift = 3;
|
||||
uint32_t tile_align_w = phys_dev->info->tile_align_w;
|
||||
@@ -852,15 +853,15 @@ tu_CreateRenderPass2(VkDevice _device,
|
||||
subpass->color_count = desc->colorAttachmentCount;
|
||||
subpass->resolve_count = 0;
|
||||
subpass->resolve_depth_stencil = is_depth_stencil_resolve_enabled(ds_resolve);
|
||||
subpass->samples = 0;
|
||||
subpass->samples = (VkSampleCountFlagBits) 0;
|
||||
subpass->srgb_cntl = 0;
|
||||
|
||||
const VkSubpassDescriptionFlagBits raster_order_access_bits =
|
||||
const BITMASK_ENUM(VkSubpassDescriptionFlagBits) raster_order_access_bits =
|
||||
VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_EXT |
|
||||
VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT |
|
||||
VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT;
|
||||
|
||||
subpass->raster_order_attachment_access = desc->flags & raster_order_access_bits;
|
||||
subpass->raster_order_attachment_access = raster_order_access_bits & desc->flags;
|
||||
|
||||
subpass->multiview_mask = desc->viewMask;
|
||||
|
||||
@@ -985,7 +986,7 @@ tu_setup_dynamic_attachment(struct tu_render_pass_attachment *att,
|
||||
struct tu_image_view *view)
|
||||
{
|
||||
att->format = view->vk.format;
|
||||
att->samples = view->image->layout->nr_samples;
|
||||
att->samples = (VkSampleCountFlagBits) view->image->layout->nr_samples;
|
||||
|
||||
/* for d32s8, cpp is for the depth image, and
|
||||
* att->samples will be used as the cpp for the stencil image
|
||||
@@ -1014,7 +1015,7 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
|
||||
subpass->feedback_invalidate = false;
|
||||
subpass->feedback_loop_ds = subpass->feedback_loop_color = false;
|
||||
subpass->input_count = 0;
|
||||
subpass->samples = 0;
|
||||
subpass->samples = (VkSampleCountFlagBits) 0;
|
||||
subpass->srgb_cntl = 0;
|
||||
subpass->raster_order_attachment_access = false;
|
||||
subpass->multiview_mask = info->viewMask;
|
||||
@@ -1034,11 +1035,12 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
|
||||
tu_setup_dynamic_attachment(att, view);
|
||||
att->gmem = true;
|
||||
att->clear_views = info->viewMask;
|
||||
attachment_set_ops(device, att, att_info->loadOp, 0,
|
||||
att_info->storeOp, 0);
|
||||
attachment_set_ops(device, att, att_info->loadOp,
|
||||
VK_ATTACHMENT_LOAD_OP_DONT_CARE, att_info->storeOp,
|
||||
VK_ATTACHMENT_STORE_OP_DONT_CARE);
|
||||
subpass->color_attachments[i].attachment = a++;
|
||||
|
||||
subpass->samples = view->image->layout->nr_samples;
|
||||
subpass->samples = (VkSampleCountFlagBits) view->image->layout->nr_samples;
|
||||
|
||||
if (vk_format_is_srgb(view->vk.format))
|
||||
subpass->srgb_cntl |= 1 << i;
|
||||
@@ -1048,9 +1050,10 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
|
||||
TU_FROM_HANDLE(tu_image_view, resolve_view, att_info->resolveImageView);
|
||||
tu_setup_dynamic_attachment(resolve_att, resolve_view);
|
||||
resolve_att->gmem = false;
|
||||
attachment_set_ops(device, resolve_att,
|
||||
VK_ATTACHMENT_LOAD_OP_DONT_CARE, 0,
|
||||
VK_ATTACHMENT_STORE_OP_STORE, 0);
|
||||
attachment_set_ops(
|
||||
device, resolve_att, VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
||||
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE,
|
||||
VK_ATTACHMENT_STORE_OP_DONT_CARE);
|
||||
subpass->resolve_attachments[i].attachment = a++;
|
||||
att->will_be_resolved = true;
|
||||
} else {
|
||||
@@ -1075,13 +1078,18 @@ tu_setup_dynamic_render_pass(struct tu_cmd_buffer *cmd_buffer,
|
||||
att->clear_views = info->viewMask;
|
||||
subpass->depth_stencil_attachment.attachment = a++;
|
||||
|
||||
attachment_set_ops(device, att,
|
||||
info->pDepthAttachment ? info->pDepthAttachment->loadOp : 0,
|
||||
info->pStencilAttachment ? info->pStencilAttachment->loadOp : 0,
|
||||
info->pDepthAttachment ? info->pDepthAttachment->storeOp : 0,
|
||||
info->pStencilAttachment ? info->pStencilAttachment->storeOp : 0);
|
||||
attachment_set_ops(
|
||||
device, att,
|
||||
info->pDepthAttachment ? info->pDepthAttachment->loadOp
|
||||
: VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
||||
info->pStencilAttachment ? info->pStencilAttachment->loadOp
|
||||
: VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
||||
info->pDepthAttachment ? info->pDepthAttachment->storeOp
|
||||
: VK_ATTACHMENT_STORE_OP_DONT_CARE,
|
||||
info->pStencilAttachment ? info->pStencilAttachment->storeOp
|
||||
: VK_ATTACHMENT_STORE_OP_DONT_CARE);
|
||||
|
||||
subpass->samples = view->image->layout->nr_samples;
|
||||
subpass->samples = (VkSampleCountFlagBits) view->image->layout->nr_samples;
|
||||
|
||||
if (common_info->resolveMode != VK_RESOLVE_MODE_NONE) {
|
||||
unsigned i = subpass->resolve_count++;
|
||||
@@ -1134,7 +1142,7 @@ tu_setup_dynamic_inheritance(struct tu_cmd_buffer *cmd_buffer,
|
||||
subpass->feedback_invalidate = false;
|
||||
subpass->feedback_loop_ds = subpass->feedback_loop_color = false;
|
||||
subpass->input_count = 0;
|
||||
subpass->samples = 0;
|
||||
subpass->samples = (VkSampleCountFlagBits) 0;
|
||||
subpass->srgb_cntl = 0;
|
||||
subpass->raster_order_attachment_access = false;
|
||||
subpass->multiview_mask = info->viewMask;
|
||||
|
@@ -73,7 +73,7 @@ struct tu_subpass
|
||||
struct tu_render_pass_attachment
|
||||
{
|
||||
VkFormat format;
|
||||
uint32_t samples;
|
||||
VkSampleCountFlagBits samples;
|
||||
uint32_t cpp;
|
||||
VkImageAspectFlags clear_mask;
|
||||
uint32_t clear_views;
|
||||
|
@@ -1003,7 +1003,7 @@ tu6_emit_link_map(struct tu_cs *cs,
|
||||
producer->output_loc);
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
static enum a6xx_tess_output
|
||||
primitive_to_tess(enum shader_prim primitive) {
|
||||
switch (primitive) {
|
||||
case SHADER_PRIM_POINTS:
|
||||
@@ -1069,7 +1069,8 @@ tu6_vpc_varying_mode(const struct ir3_shader_variant *fs,
|
||||
/* If the last geometry shader doesn't statically write these, they're
|
||||
* implicitly zero and the FS is supposed to read zero.
|
||||
*/
|
||||
if (ir3_find_output(last_shader, fs->inputs[index].slot) < 0 &&
|
||||
const gl_varying_slot slot = (gl_varying_slot) fs->inputs[index].slot;
|
||||
if (ir3_find_output(last_shader, slot) < 0 &&
|
||||
(compmask & 0x1)) {
|
||||
*interp_mode |= INTERP_ZERO;
|
||||
} else {
|
||||
@@ -1395,7 +1396,7 @@ tu6_emit_vpc(struct tu_cs *cs,
|
||||
const struct ir3_shader_variant *tess =
|
||||
ds->tess.spacing == TESS_SPACING_UNSPECIFIED ? hs : ds;
|
||||
tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESS_CNTL, 1);
|
||||
uint32_t output;
|
||||
enum a6xx_tess_output output;
|
||||
if (tess->tess.point_mode)
|
||||
output = TESS_POINTS;
|
||||
else if (tess->tess.primitive_mode == TESS_PRIMITIVE_ISOLINES)
|
||||
@@ -1429,7 +1430,7 @@ tu6_emit_vpc(struct tu_cs *cs,
|
||||
|
||||
|
||||
if (gs) {
|
||||
uint32_t vertices_out, invocations, output, vec4_size;
|
||||
uint32_t vertices_out, invocations, vec4_size;
|
||||
uint32_t prev_stage_output_size = ds ? ds->output_size : vs->output_size;
|
||||
|
||||
if (hs) {
|
||||
@@ -1438,7 +1439,7 @@ tu6_emit_vpc(struct tu_cs *cs,
|
||||
tu6_emit_link_map(cs, vs, gs, SB6_GS_SHADER);
|
||||
}
|
||||
vertices_out = gs->gs.vertices_out - 1;
|
||||
output = primitive_to_tess(gs->gs.output_primitive);
|
||||
enum a6xx_tess_output output = primitive_to_tess((enum shader_prim) gs->gs.output_primitive);
|
||||
invocations = gs->gs.invocations - 1;
|
||||
/* Size of per-primitive alloction in ldlw memory in vec4s. */
|
||||
vec4_size = gs->gs.vertices_in *
|
||||
@@ -1843,8 +1844,6 @@ static void
|
||||
tu6_emit_program_config(struct tu_cs *cs,
|
||||
struct tu_pipeline_builder *builder)
|
||||
{
|
||||
gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
|
||||
STATIC_ASSERT(MESA_SHADER_VERTEX == 0);
|
||||
|
||||
bool shared_consts_enable = tu6_shared_constants_enable(&builder->layout,
|
||||
@@ -1859,7 +1858,9 @@ tu6_emit_program_config(struct tu_cs *cs,
|
||||
.fs_state = true,
|
||||
.gfx_ibo = true,
|
||||
.gfx_shared_const = shared_consts_enable));
|
||||
for (; stage < ARRAY_SIZE(builder->shader_iova); stage++) {
|
||||
for (size_t stage_idx = MESA_SHADER_VERTEX;
|
||||
stage_idx < ARRAY_SIZE(builder->shader_iova); stage_idx++) {
|
||||
gl_shader_stage stage = (gl_shader_stage) stage_idx;
|
||||
tu6_emit_xs_config(cs, stage, builder->variants[stage]);
|
||||
}
|
||||
}
|
||||
@@ -1886,10 +1887,11 @@ tu6_emit_program(struct tu_cs *cs,
|
||||
vs = bs;
|
||||
tu6_emit_xs(cs, stage, bs, &builder->pvtmem, builder->binning_vs_iova);
|
||||
tu6_emit_dynamic_offset(cs, bs, builder);
|
||||
stage++;
|
||||
stage = (gl_shader_stage) (stage + 1);
|
||||
}
|
||||
|
||||
for (; stage < ARRAY_SIZE(builder->shader_iova); stage++) {
|
||||
for (; stage < ARRAY_SIZE(builder->shader_iova);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
const struct ir3_shader_variant *xs = builder->variants[stage];
|
||||
|
||||
if (stage == MESA_SHADER_FRAGMENT && binning_pass)
|
||||
@@ -2696,8 +2698,8 @@ tu_link_shaders(struct tu_pipeline_builder *builder,
|
||||
nir_shader **shaders, unsigned shaders_count)
|
||||
{
|
||||
nir_shader *consumer = NULL;
|
||||
for (gl_shader_stage stage = shaders_count - 1;
|
||||
stage >= MESA_SHADER_VERTEX; stage--) {
|
||||
for (gl_shader_stage stage = (gl_shader_stage) (shaders_count - 1);
|
||||
stage >= MESA_SHADER_VERTEX; stage = (gl_shader_stage) (stage - 1)) {
|
||||
if (!shaders[stage])
|
||||
continue;
|
||||
|
||||
@@ -3140,7 +3142,7 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
|
||||
struct tu_shader_key keys[ARRAY_SIZE(stage_infos)] = { };
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(keys); stage++) {
|
||||
stage < ARRAY_SIZE(keys); stage = (gl_shader_stage) (stage+1)) {
|
||||
tu_shader_key_init(&keys[stage], stage_infos[stage], builder->device);
|
||||
}
|
||||
|
||||
@@ -3236,8 +3238,8 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
|
||||
struct tu_shader *shaders[ARRAY_SIZE(nir)] = { NULL };
|
||||
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < ARRAY_SIZE(nir);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
const VkPipelineShaderStageCreateInfo *stage_info = stage_infos[stage];
|
||||
if (!stage_info)
|
||||
continue;
|
||||
@@ -3265,14 +3267,15 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
}
|
||||
|
||||
if (executable_info) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (!nir[stage])
|
||||
continue;
|
||||
|
||||
nir_initial_disasm[stage] =
|
||||
nir_shader_as_str(nir[stage], pipeline->executables_mem_ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tu_link_shaders(builder, nir, ARRAY_SIZE(nir));
|
||||
@@ -3282,7 +3285,7 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
nir_shaders =
|
||||
tu_nir_shaders_init(builder->device, &nir_sha1, sizeof(nir_sha1));
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
stage < ARRAY_SIZE(nir); stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (!nir[stage])
|
||||
continue;
|
||||
|
||||
@@ -3304,8 +3307,8 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
}
|
||||
|
||||
uint32_t desc_sets = 0;
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < ARRAY_SIZE(nir);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (!nir[stage])
|
||||
continue;
|
||||
|
||||
@@ -3361,7 +3364,7 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
compiled_shaders->active_desc_sets = desc_sets;
|
||||
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(shaders); stage++) {
|
||||
stage < ARRAY_SIZE(shaders); stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (!shaders[stage])
|
||||
continue;
|
||||
|
||||
@@ -3383,7 +3386,7 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
ir3_key.safe_constlen = true;
|
||||
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(shaders); stage++) {
|
||||
stage < ARRAY_SIZE(shaders); stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (!shaders[stage])
|
||||
continue;
|
||||
|
||||
@@ -3413,8 +3416,8 @@ tu_pipeline_builder_compile_shaders(struct tu_pipeline_builder *builder,
|
||||
|
||||
ir3_key.safe_constlen = false;
|
||||
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < ARRAY_SIZE(nir);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (shaders[stage]) {
|
||||
tu_shader_destroy(builder->device, shaders[stage], builder->alloc);
|
||||
}
|
||||
@@ -3430,7 +3433,7 @@ done:;
|
||||
|
||||
if (compiled_shaders) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
stage < ARRAY_SIZE(nir); stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (compiled_shaders->variants[stage]) {
|
||||
tu_append_executable(pipeline, compiled_shaders->variants[stage],
|
||||
nir_initial_disasm[stage]);
|
||||
@@ -3445,7 +3448,7 @@ done:;
|
||||
|
||||
if (nir_shaders) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
stage < ARRAY_SIZE(nir); stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (nir_shaders->nir[stage]) {
|
||||
post_link_nir[stage] = nir_shaders->nir[stage];
|
||||
}
|
||||
@@ -3459,7 +3462,8 @@ done:;
|
||||
for (unsigned i = 0; i < builder->num_libraries; i++) {
|
||||
struct tu_pipeline *library = builder->libraries[i];
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(library->shaders); stage++) {
|
||||
stage < ARRAY_SIZE(library->shaders);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (!post_link_nir[stage] && library->shaders[stage].nir) {
|
||||
post_link_nir[stage] = library->shaders[stage].nir;
|
||||
keys[stage] = library->shaders[stage].key;
|
||||
@@ -3472,7 +3476,8 @@ done:;
|
||||
for (unsigned i = 0; i < builder->num_libraries; i++) {
|
||||
struct tu_pipeline *library = builder->libraries[i];
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(library->shaders); stage++) {
|
||||
stage < ARRAY_SIZE(library->shaders);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (library->shaders[stage].variant) {
|
||||
assert(!builder->variants[stage]);
|
||||
builder->variants[stage] = library->shaders[stage].variant;
|
||||
@@ -3490,7 +3495,8 @@ done:;
|
||||
if (builder->num_libraries > 0) {
|
||||
uint32_t safe_constlens = ir3_trim_constlen(builder->variants, compiler);
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(builder->variants); stage++) {
|
||||
stage < ARRAY_SIZE(builder->variants);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (safe_constlens & (1u << stage))
|
||||
builder->variants[stage] = safe_const_variants[stage];
|
||||
}
|
||||
@@ -3536,7 +3542,8 @@ done:;
|
||||
pipeline->nir_shaders = nir_shaders;
|
||||
pipeline->ir3_key = ir3_key;
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(pipeline->shaders); stage++) {
|
||||
stage < ARRAY_SIZE(pipeline->shaders);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
pipeline->shaders[stage].nir = post_link_nir[stage];
|
||||
pipeline->shaders[stage].key = keys[stage];
|
||||
pipeline->shaders[stage].const_state = builder->const_state[stage];
|
||||
@@ -3560,8 +3567,8 @@ done:;
|
||||
return VK_SUCCESS;
|
||||
|
||||
fail:
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX;
|
||||
stage < ARRAY_SIZE(nir); stage++) {
|
||||
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < ARRAY_SIZE(nir);
|
||||
stage = (gl_shader_stage) (stage + 1)) {
|
||||
if (shaders[stage]) {
|
||||
tu_shader_destroy(builder->device, shaders[stage], builder->alloc);
|
||||
}
|
||||
@@ -4670,8 +4677,8 @@ tu_pipeline_builder_parse_rasterization_order(
|
||||
* when implemented in the future.
|
||||
*/
|
||||
|
||||
uint32_t sysmem_prim_mode = NO_FLUSH;
|
||||
uint32_t gmem_prim_mode = NO_FLUSH;
|
||||
enum a6xx_single_prim_mode sysmem_prim_mode = NO_FLUSH;
|
||||
enum a6xx_single_prim_mode gmem_prim_mode = NO_FLUSH;
|
||||
|
||||
if (raster_order_attachment_access) {
|
||||
/* VK_EXT_rasterization_order_attachment_access:
|
||||
|
@@ -27,7 +27,8 @@
|
||||
void
|
||||
tu_bo_suballocator_init(struct tu_suballocator *suballoc,
|
||||
struct tu_device *dev,
|
||||
uint32_t default_size, uint32_t flags)
|
||||
uint32_t default_size,
|
||||
enum tu_bo_alloc_flags flags)
|
||||
{
|
||||
suballoc->dev = dev;
|
||||
suballoc->default_size = default_size;
|
||||
|
@@ -41,7 +41,7 @@ void
|
||||
tu_bo_suballocator_init(struct tu_suballocator *suballoc,
|
||||
struct tu_device *dev,
|
||||
uint32_t default_size,
|
||||
uint32_t flags);
|
||||
enum tu_bo_alloc_flags flags);
|
||||
void
|
||||
tu_bo_suballocator_finish(struct tu_suballocator *suballoc);
|
||||
|
||||
|
@@ -322,7 +322,8 @@ tu_framebuffer_tiling_config(struct tu_framebuffer *fb,
|
||||
{
|
||||
for (int gmem_layout = 0; gmem_layout < TU_GMEM_LAYOUT_COUNT; gmem_layout++) {
|
||||
struct tu_tiling_config *tiling = &fb->tiling[gmem_layout];
|
||||
tu_tiling_config_update_tile_layout(fb, device, pass, gmem_layout);
|
||||
tu_tiling_config_update_tile_layout(fb, device, pass,
|
||||
(enum tu_gmem_layout) gmem_layout);
|
||||
tu_tiling_config_update_pipe_layout(tiling, device);
|
||||
tu_tiling_config_update_pipes(tiling, device);
|
||||
tu_tiling_config_update_binning(tiling, device);
|
||||
|
@@ -106,14 +106,15 @@ tu_framebuffer_tiling_config(struct tu_framebuffer *fb,
|
||||
|
||||
#define tu_foreach_stage(stage, stage_bits) \
|
||||
for (gl_shader_stage stage, \
|
||||
__tmp = (gl_shader_stage)((stage_bits) &TU_STAGE_MASK); \
|
||||
stage = __builtin_ffs(__tmp) - 1, __tmp; __tmp &= ~(1 << (stage)))
|
||||
__tmp = (gl_shader_stage) ((stage_bits) &TU_STAGE_MASK); \
|
||||
stage = (gl_shader_stage) (__builtin_ffs(__tmp) - 1), __tmp; \
|
||||
__tmp = (gl_shader_stage) (__tmp & ~(1 << (stage))))
|
||||
|
||||
static inline enum a3xx_msaa_samples
|
||||
tu_msaa_samples(uint32_t samples)
|
||||
{
|
||||
assert(__builtin_popcount(samples) == 1);
|
||||
return util_logbase2(samples);
|
||||
return (enum a3xx_msaa_samples) util_logbase2(samples);
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
@@ -127,20 +128,20 @@ tu6_stage2opcode(gl_shader_stage stage)
|
||||
static inline enum a6xx_state_block
|
||||
tu6_stage2texsb(gl_shader_stage stage)
|
||||
{
|
||||
return SB6_VS_TEX + stage;
|
||||
return (enum a6xx_state_block) (SB6_VS_TEX + stage);
|
||||
}
|
||||
|
||||
static inline enum a6xx_state_block
|
||||
tu6_stage2shadersb(gl_shader_stage stage)
|
||||
{
|
||||
return SB6_VS_SHADER + stage;
|
||||
return (enum a6xx_state_block) (SB6_VS_SHADER + stage);
|
||||
}
|
||||
|
||||
static inline enum a3xx_rop_code
|
||||
tu6_rop(VkLogicOp op)
|
||||
{
|
||||
/* note: hw enum matches the VK enum, but with the 4 bits reversed */
|
||||
static const uint8_t lookup[] = {
|
||||
static const enum a3xx_rop_code lookup[] = {
|
||||
[VK_LOGIC_OP_CLEAR] = ROP_CLEAR,
|
||||
[VK_LOGIC_OP_AND] = ROP_AND,
|
||||
[VK_LOGIC_OP_AND_REVERSE] = ROP_AND_REVERSE,
|
||||
@@ -185,7 +186,7 @@ tu6_primtype_patches(enum pc_di_primtype type)
|
||||
static inline enum pc_di_primtype
|
||||
tu6_primtype(VkPrimitiveTopology topology)
|
||||
{
|
||||
static const uint8_t lookup[] = {
|
||||
static const enum pc_di_primtype lookup[] = {
|
||||
[VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = DI_PT_POINTLIST,
|
||||
[VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = DI_PT_LINELIST,
|
||||
[VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = DI_PT_LINESTRIP,
|
||||
@@ -218,7 +219,7 @@ tu6_stencil_op(VkStencilOp op)
|
||||
static inline enum adreno_rb_blend_factor
|
||||
tu6_blend_factor(VkBlendFactor factor)
|
||||
{
|
||||
static const uint8_t lookup[] = {
|
||||
static const enum adreno_rb_blend_factor lookup[] = {
|
||||
[VK_BLEND_FACTOR_ZERO] = FACTOR_ZERO,
|
||||
[VK_BLEND_FACTOR_ONE] = FACTOR_ONE,
|
||||
[VK_BLEND_FACTOR_SRC_COLOR] = FACTOR_SRC_COLOR,
|
||||
@@ -285,7 +286,7 @@ tu6_tex_type(VkImageViewType type, bool storage)
|
||||
static inline enum a6xx_tex_clamp
|
||||
tu6_tex_wrap(VkSamplerAddressMode address_mode)
|
||||
{
|
||||
uint8_t lookup[] = {
|
||||
static const enum a6xx_tex_clamp lookup[] = {
|
||||
[VK_SAMPLER_ADDRESS_MODE_REPEAT] = A6XX_TEX_REPEAT,
|
||||
[VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT] = A6XX_TEX_MIRROR_REPEAT,
|
||||
[VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE] = A6XX_TEX_CLAMP_TO_EDGE,
|
||||
@@ -332,7 +333,7 @@ tu6_pipe2depth(VkFormat format)
|
||||
case VK_FORMAT_S8_UINT:
|
||||
return DEPTH6_32;
|
||||
default:
|
||||
return ~0;
|
||||
return DEPTH6_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user