radv: remove predication on cache flushes

This can lead to a situation where cache flushes could get conditionally
disabled while still clearing the flush_bits, and thus flushes due to
application pipeline barriers may never get executed.

Fixes: a6c2001ace (radv: add support for cmd predication.)
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Matthew Nicholls
2018-01-29 16:26:18 +00:00
committed by Dave Airlie
parent 1ea9efd2f8
commit ef272b161e
4 changed files with 13 additions and 18 deletions

View File

@@ -446,7 +446,7 @@ radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
} }
/* Force wait for graphics or compute engines to be idle. */ /* Force wait for graphics or compute engines to be idle. */
si_cs_emit_cache_flush(cmd_buffer->cs, false, si_cs_emit_cache_flush(cmd_buffer->cs,
cmd_buffer->device->physical_device->rad_info.chip_class, cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va, ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer), radv_cmd_buffer_uses_mec(cmd_buffer),

View File

@@ -1771,7 +1771,6 @@ radv_get_preamble_cs(struct radv_queue *queue,
if (i == 0) { if (i == 0) {
si_cs_emit_cache_flush(cs, si_cs_emit_cache_flush(cs,
false,
queue->device->physical_device->rad_info.chip_class, queue->device->physical_device->rad_info.chip_class,
NULL, 0, NULL, 0,
queue->queue_family_index == RING_COMPUTE && queue->queue_family_index == RING_COMPUTE &&
@@ -1783,7 +1782,6 @@ radv_get_preamble_cs(struct radv_queue *queue,
RADV_CMD_FLAG_INV_GLOBAL_L2); RADV_CMD_FLAG_INV_GLOBAL_L2);
} else if (i == 1) { } else if (i == 1) {
si_cs_emit_cache_flush(cs, si_cs_emit_cache_flush(cs,
false,
queue->device->physical_device->rad_info.chip_class, queue->device->physical_device->rad_info.chip_class,
NULL, 0, NULL, 0,
queue->queue_family_index == RING_COMPUTE && queue->queue_family_index == RING_COMPUTE &&

View File

@@ -1021,7 +1021,6 @@ void si_emit_wait_fence(struct radeon_winsys_cs *cs,
uint64_t va, uint32_t ref, uint64_t va, uint32_t ref,
uint32_t mask); uint32_t mask);
void si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, void si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
bool predicated,
enum chip_class chip_class, enum chip_class chip_class,
uint32_t *fence_ptr, uint64_t va, uint32_t *fence_ptr, uint64_t va,
bool is_mec, bool is_mec,

View File

@@ -917,7 +917,6 @@ si_emit_acquire_mem(struct radeon_winsys_cs *cs,
void void
si_cs_emit_cache_flush(struct radeon_winsys_cs *cs, si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
bool predicated,
enum chip_class chip_class, enum chip_class chip_class,
uint32_t *flush_cnt, uint32_t *flush_cnt,
uint64_t flush_va, uint64_t flush_va,
@@ -948,7 +947,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
/* Necessary for DCC */ /* Necessary for DCC */
if (chip_class >= VI) { if (chip_class >= VI) {
si_cs_emit_write_event_eop(cs, si_cs_emit_write_event_eop(cs,
predicated, false,
chip_class, chip_class,
is_mec, is_mec,
V_028A90_FLUSH_AND_INV_CB_DATA_TS, V_028A90_FLUSH_AND_INV_CB_DATA_TS,
@@ -962,12 +961,12 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
} }
if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) { if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_CB_META) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0)); radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
} }
if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) { if (flush_bits & RADV_CMD_FLAG_FLUSH_AND_INV_DB_META) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0)); radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
} }
@@ -980,7 +979,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
} }
if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) { if (flush_bits & RADV_CMD_FLAG_CS_PARTIAL_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
} }
@@ -1037,14 +1036,14 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
assert(flush_cnt); assert(flush_cnt);
uint32_t old_fence = (*flush_cnt)++; uint32_t old_fence = (*flush_cnt)++;
si_cs_emit_write_event_eop(cs, predicated, chip_class, false, cb_db_event, tc_flags, 1, si_cs_emit_write_event_eop(cs, false, chip_class, false, cb_db_event, tc_flags, 1,
flush_va, old_fence, *flush_cnt); flush_va, old_fence, *flush_cnt);
si_emit_wait_fence(cs, predicated, flush_va, *flush_cnt, 0xffffffff); si_emit_wait_fence(cs, false, flush_va, *flush_cnt, 0xffffffff);
} }
/* VGT state sync */ /* VGT state sync */
if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) { if (flush_bits & RADV_CMD_FLAG_VGT_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, predicated)); radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0)); radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
} }
@@ -1057,13 +1056,13 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
RADV_CMD_FLAG_INV_GLOBAL_L2 | RADV_CMD_FLAG_INV_GLOBAL_L2 |
RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) && RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) &&
!is_mec) { !is_mec) {
radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, predicated)); radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
radeon_emit(cs, 0); radeon_emit(cs, 0);
} }
if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) || if ((flush_bits & RADV_CMD_FLAG_INV_GLOBAL_L2) ||
(chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) { (chip_class <= CIK && (flush_bits & RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2))) {
si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9, si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9,
cp_coher_cntl | cp_coher_cntl |
S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TC_ACTION_ENA(1) |
S_0085F0_TCL1_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
@@ -1077,7 +1076,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
* *
* WB doesn't work without NC. * WB doesn't work without NC.
*/ */
si_emit_acquire_mem(cs, is_mec, predicated, si_emit_acquire_mem(cs, is_mec, false,
chip_class >= GFX9, chip_class >= GFX9,
cp_coher_cntl | cp_coher_cntl |
S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_WB_ACTION_ENA(1) |
@@ -1086,7 +1085,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
} }
if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) { if (flush_bits & RADV_CMD_FLAG_INV_VMEM_L1) {
si_emit_acquire_mem(cs, is_mec, si_emit_acquire_mem(cs, is_mec,
predicated, chip_class >= GFX9, false, chip_class >= GFX9,
cp_coher_cntl | cp_coher_cntl |
S_0085F0_TCL1_ACTION_ENA(1)); S_0085F0_TCL1_ACTION_ENA(1));
cp_coher_cntl = 0; cp_coher_cntl = 0;
@@ -1097,7 +1096,7 @@ si_cs_emit_cache_flush(struct radeon_winsys_cs *cs,
* Therefore, it should be last. Done in PFP. * Therefore, it should be last. Done in PFP.
*/ */
if (cp_coher_cntl) if (cp_coher_cntl)
si_emit_acquire_mem(cs, is_mec, predicated, chip_class >= GFX9, cp_coher_cntl); si_emit_acquire_mem(cs, is_mec, false, chip_class >= GFX9, cp_coher_cntl);
} }
void void
@@ -1127,7 +1126,6 @@ si_emit_cache_flush(struct radv_cmd_buffer *cmd_buffer)
ptr = &cmd_buffer->gfx9_fence_idx; ptr = &cmd_buffer->gfx9_fence_idx;
} }
si_cs_emit_cache_flush(cmd_buffer->cs, si_cs_emit_cache_flush(cmd_buffer->cs,
cmd_buffer->state.predicating,
cmd_buffer->device->physical_device->rad_info.chip_class, cmd_buffer->device->physical_device->rad_info.chip_class,
ptr, va, ptr, va,
radv_cmd_buffer_uses_mec(cmd_buffer), radv_cmd_buffer_uses_mec(cmd_buffer),