v3d: line length style fixes

Reviewed-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Iago Toral Quiroga
2019-08-07 08:32:58 +02:00
parent 99e9809cab
commit 62e0ca3064

View File

@@ -192,7 +192,8 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
* need/can to do things slightly different, like not loading the * need/can to do things slightly different, like not loading the
* amount to add/sub, as that is implicit. * amount to add/sub, as that is implicit.
*/ */
bool atomic_add_replaced = ((instr->intrinsic == nir_intrinsic_ssbo_atomic_add || bool atomic_add_replaced =
((instr->intrinsic == nir_intrinsic_ssbo_atomic_add ||
instr->intrinsic == nir_intrinsic_shared_atomic_add) && instr->intrinsic == nir_intrinsic_shared_atomic_add) &&
(tmu_op == V3D_TMU_OP_WRITE_AND_READ_INC || (tmu_op == V3D_TMU_OP_WRITE_AND_READ_INC ||
tmu_op == V3D_TMU_OP_WRITE_OR_READ_DEC)); tmu_op == V3D_TMU_OP_WRITE_OR_READ_DEC));
@@ -240,7 +241,8 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
/* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by /* Note that QUNIFORM_UBO_ADDR takes a UBO index shifted up by
* 1 (0 is gallium's constant buffer 0). * 1 (0 is gallium's constant buffer 0).
*/ */
base_offset = vir_uniform(c, QUNIFORM_UBO_ADDR, base_offset =
vir_uniform(c, QUNIFORM_UBO_ADDR,
v3d_unit_data_create(index, const_offset)); v3d_unit_data_create(index, const_offset));
const_offset = 0; const_offset = 0;
} else if (is_shared_or_scratch) { } else if (is_shared_or_scratch) {
@@ -261,6 +263,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
1 : 0])); 1 : 0]));
} }
struct qreg tmud = vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD);
unsigned writemask = is_store ? nir_intrinsic_write_mask(instr) : 0; unsigned writemask = is_store ? nir_intrinsic_write_mask(instr) : 0;
uint32_t base_const_offset = const_offset; uint32_t base_const_offset = const_offset;
int first_component = -1; int first_component = -1;
@@ -301,29 +304,28 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
BITFIELD_RANGE(first_component, tmu_writes - 1); BITFIELD_RANGE(first_component, tmu_writes - 1);
writemask &= ~written_mask; writemask &= ~written_mask;
} else if (!is_load && !atomic_add_replaced) { } else if (!is_load && !atomic_add_replaced) {
vir_MOV_dest(c, struct qreg data =
vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD), ntq_get_src(c, instr->src[1 + has_index], 0);
ntq_get_src(c, instr->src[1 + has_index], 0)); vir_MOV_dest(c, tmud, data);
tmu_writes++; tmu_writes++;
if (tmu_op == V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH) { if (tmu_op == V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH) {
vir_MOV_dest(c, data = ntq_get_src(c, instr->src[2 + has_index],
vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_TMUD), 0);
ntq_get_src(c, instr->src[2 + has_index], vir_MOV_dest(c, tmud, data);
0));
tmu_writes++; tmu_writes++;
} }
} }
/* Make sure we won't exceed the 16-entry TMU fifo if each thread is /* Make sure we won't exceed the 16-entry TMU fifo if each
* storing at the same time. * thread is storing at the same time.
*/ */
while (tmu_writes > 16 / c->threads) while (tmu_writes > 16 / c->threads)
c->threads /= 2; c->threads /= 2;
/* The spec says that for atomics, the TYPE field is ignored, but that /* The spec says that for atomics, the TYPE field is ignored,
* doesn't seem to be the case for CMPXCHG. Just use the number of * but that doesn't seem to be the case for CMPXCHG. Just use
* tmud writes we did to decide the type (or choose "32bit" for atomic * the number of tmud writes we did to decide the type (or
* reads, which has been fine). * choose "32bit" for atomic reads, which has been fine).
*/ */
uint32_t num_components; uint32_t num_components;
if (is_load || atomic_add_replaced) { if (is_load || atomic_add_replaced) {
@@ -339,7 +341,8 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
if (num_components == 1) { if (num_components == 1) {
config |= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI; config |= GENERAL_TMU_LOOKUP_TYPE_32BIT_UI;
} else { } else {
config |= GENERAL_TMU_LOOKUP_TYPE_VEC2 + num_components - 2; config |= GENERAL_TMU_LOOKUP_TYPE_VEC2 +
num_components - 2;
} }
if (vir_in_nonuniform_control_flow(c)) { if (vir_in_nonuniform_control_flow(c)) {
@@ -360,8 +363,9 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
offset = vir_ADD(c, offset, offset = vir_ADD(c, offset,
vir_uniform_ui(c, const_offset)); vir_uniform_ui(c, const_offset));
} }
tmu = vir_ADD_dest(c, tmua, offset, struct qreg data =
ntq_get_src(c, instr->src[offset_src], 0)); ntq_get_src(c, instr->src[offset_src], 0);
tmu = vir_ADD_dest(c, tmua, offset, data);
} else { } else {
if (const_offset != 0) { if (const_offset != 0) {
tmu = vir_ADD_dest(c, tmua, base_offset, tmu = vir_ADD_dest(c, tmua, base_offset,
@@ -372,7 +376,8 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
} }
if (config != ~0) { if (config != ~0) {
tmu->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, tmu->uniform =
vir_get_uniform_index(c, QUNIFORM_CONSTANT,
config); config);
} }
@@ -382,8 +387,10 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
vir_emit_thrsw(c); vir_emit_thrsw(c);
/* Read the result, or wait for the TMU op to complete. */ /* Read the result, or wait for the TMU op to complete. */
for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) {
ntq_store_dest(c, &instr->dest, i, vir_MOV(c, vir_LDTMU(c))); ntq_store_dest(c, &instr->dest, i,
vir_MOV(c, vir_LDTMU(c)));
}
if (nir_intrinsic_dest_components(instr) == 0) if (nir_intrinsic_dest_components(instr) == 0)
vir_TMUWT(c); vir_TMUWT(c);