v3d: Add support for the TMUWT instruction.
This instruction is used to ensure that TMU stores have been processed before moving on. In particular, you need any TMU ops to be done by the time the shader ends.
This commit is contained in:
@@ -402,7 +402,7 @@ calculate_deps(struct schedule_state *state, struct schedule_node *n)
|
||||
add_write_dep(state, &state->last_tmu_config, n);
|
||||
}
|
||||
|
||||
if (inst->sig.ldtmu) {
|
||||
if (v3d_qpu_waits_on_tmu(inst)) {
|
||||
/* TMU loads are coming from a FIFO, so ordering is important.
|
||||
*/
|
||||
add_write_dep(state, &state->last_tmu_write, n);
|
||||
@@ -564,7 +564,7 @@ get_instruction_priority(const struct v3d_qpu_instr *inst)
|
||||
next_score++;
|
||||
|
||||
/* Schedule texture read results collection late to hide latency. */
|
||||
if (inst->sig.ldtmu)
|
||||
if (v3d_qpu_waits_on_tmu(inst))
|
||||
return next_score;
|
||||
next_score++;
|
||||
|
||||
@@ -605,6 +605,9 @@ qpu_accesses_peripheral(const struct v3d_qpu_instr *inst)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (inst->alu.add.op == V3D_QPU_A_TMUWT)
|
||||
return true;
|
||||
|
||||
if (inst->alu.mul.op != V3D_QPU_M_NOP &&
|
||||
inst->alu.mul.magic_write &&
|
||||
qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
|
||||
@@ -910,7 +913,7 @@ static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr,
|
||||
*
|
||||
* because we associate the first load_tmu0 with the *second* tmu0_s.
|
||||
*/
|
||||
if (v3d_qpu_magic_waddr_is_tmu(waddr) && after->sig.ldtmu)
|
||||
if (v3d_qpu_magic_waddr_is_tmu(waddr) && v3d_qpu_waits_on_tmu(after))
|
||||
return 100;
|
||||
|
||||
/* Assume that anything depending on us is consuming the SFU result. */
|
||||
|
@@ -940,6 +940,7 @@ VIR_A_ALU0(TIDX)
|
||||
VIR_A_ALU0(EIDX)
|
||||
VIR_A_ALU1(LDVPMV_IN)
|
||||
VIR_A_ALU1(LDVPMV_OUT)
|
||||
VIR_A_ALU0(TMUWT)
|
||||
|
||||
VIR_A_ALU0(FXCD)
|
||||
VIR_A_ALU0(XCD)
|
||||
|
@@ -98,6 +98,7 @@ vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
|
||||
case V3D_QPU_A_STVPMD:
|
||||
case V3D_QPU_A_STVPMP:
|
||||
case V3D_QPU_A_VPMWT:
|
||||
case V3D_QPU_A_TMUWT:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
@@ -194,6 +195,11 @@ vir_is_tex(struct qinst *inst)
|
||||
if (inst->dst.file == QFILE_MAGIC)
|
||||
return v3d_qpu_magic_waddr_is_tmu(inst->dst.index);
|
||||
|
||||
if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
|
||||
inst->qpu.alu.add.op == V3D_QPU_A_TMUWT) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@@ -525,6 +525,14 @@ v3d_qpu_magic_waddr_is_tmu(enum v3d_qpu_waddr waddr)
|
||||
waddr <= V3D_QPU_WADDR_TMUHSLOD));
|
||||
}
|
||||
|
||||
bool
|
||||
v3d_qpu_waits_on_tmu(const struct v3d_qpu_instr *inst)
|
||||
{
|
||||
return (inst->sig.ldtmu ||
|
||||
(inst->type == V3D_QPU_INSTR_TYPE_ALU &&
|
||||
inst->alu.add.op == V3D_QPU_A_TMUWT));
|
||||
}
|
||||
|
||||
bool
|
||||
v3d_qpu_magic_waddr_is_tlb(enum v3d_qpu_waddr waddr)
|
||||
{
|
||||
|
@@ -452,6 +452,7 @@ bool v3d_qpu_writes_r4(const struct v3d_device_info *devinfo,
|
||||
const struct v3d_qpu_instr *instr) ATTRIBUTE_CONST;
|
||||
bool v3d_qpu_writes_r5(const struct v3d_device_info *devinfo,
|
||||
const struct v3d_qpu_instr *instr) ATTRIBUTE_CONST;
|
||||
bool v3d_qpu_waits_on_tmu(const struct v3d_qpu_instr *inst) ATTRIBUTE_CONST;
|
||||
bool v3d_qpu_uses_mux(const struct v3d_qpu_instr *inst, enum v3d_qpu_mux mux);
|
||||
bool v3d_qpu_uses_vpm(const struct v3d_qpu_instr *inst) ATTRIBUTE_CONST;
|
||||
bool v3d_qpu_reads_vpm(const struct v3d_qpu_instr *inst) ATTRIBUTE_CONST;
|
||||
|
Reference in New Issue
Block a user