vc4: Add a "qir_for_each_inst_inorder" macro and use it in many places.

We have the prior list_foreach() all over the code, but I need to move
where instructions live as part of adding support for control flow.  Start
by just converting to a helper iterator macro.  (The simpler
"qir_for_each_inst()" will be used for the for-each-inst-in-a-block
iterator macro later)
This commit is contained in:
Eric Anholt
2016-07-08 15:24:34 -07:00
parent 6858f05924
commit d3cdbf6fd8
12 changed files with 17 additions and 14 deletions

View File

@@ -143,7 +143,7 @@ qir_opt_algebraic(struct vc4_compile *c)
{ {
bool progress = false; bool progress = false;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
switch (inst->op) { switch (inst->op) {
case QOP_FMIN: case QOP_FMIN:
if (is_1f(c, inst->src[1]) && if (is_1f(c, inst->src[1]) &&

View File

@@ -99,7 +99,7 @@ qir_opt_constant_folding(struct vc4_compile *c)
{ {
bool progress = false; bool progress = false;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
if (constant_fold(c, inst)) if (constant_fold(c, inst))
progress = true; progress = true;
} }

View File

@@ -40,7 +40,7 @@ qir_opt_copy_propagation(struct vc4_compile *c)
bool progress = false; bool progress = false;
bool debug = false; bool debug = false;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
int nsrc = qir_get_op_nsrc(inst->op); int nsrc = qir_get_op_nsrc(inst->op);
for (int i = 0; i < nsrc; i++) { for (int i = 0; i < nsrc; i++) {
if (inst->src[i].file != QFILE_TEMP) if (inst->src[i].file != QFILE_TEMP)

View File

@@ -38,7 +38,7 @@ qir_opt_small_immediates(struct vc4_compile *c)
{ {
bool progress = false; bool progress = false;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
/* The small immediate value sits in the raddr B field, so we /* The small immediate value sits in the raddr B field, so we
* can't have 2 small immediates in one instruction (unless * can't have 2 small immediates in one instruction (unless
* they're the same value, but that should be optimized away * they're the same value, but that should be optimized away

View File

@@ -44,7 +44,7 @@ qir_opt_vpm(struct vc4_compile *c)
uint32_t vpm_write_count = 0; uint32_t vpm_write_count = 0;
memset(&use_count, 0, sizeof(use_count)); memset(&use_count, 0, sizeof(use_count));
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
switch (inst->dst.file) { switch (inst->dst.file) {
case QFILE_VPM: case QFILE_VPM:
vpm_writes[vpm_write_count++] = inst; vpm_writes[vpm_write_count++] = inst;
@@ -64,7 +64,7 @@ qir_opt_vpm(struct vc4_compile *c)
/* For instructions reading from a temporary that contains a VPM read /* For instructions reading from a temporary that contains a VPM read
* result, try to move the instruction up in place of the VPM read. * result, try to move the instruction up in place of the VPM read.
*/ */
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
if (!inst) if (!inst)
continue; continue;

View File

@@ -2003,7 +2003,7 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
bool input_live[c->num_input_slots]; bool input_live[c->num_input_slots];
memset(input_live, 0, sizeof(input_live)); memset(input_live, 0, sizeof(input_live));
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
if (inst->src[i].file == QFILE_VARY) if (inst->src[i].file == QFILE_VARY)
input_live[inst->src[i].index] = true; input_live[inst->src[i].index] = true;

View File

@@ -719,4 +719,7 @@ qir_LOAD_IMM(struct vc4_compile *c, uint32_t val)
qir_reg(QFILE_LOAD_IMM, val), c->undef)); qir_reg(QFILE_LOAD_IMM, val), c->undef));
} }
#define qir_for_each_inst_inorder(inst, c) \
list_for_each_entry(struct qinst, inst, &c->instructions, link)
#endif /* VC4_QIR_H */ #endif /* VC4_QIR_H */

View File

@@ -118,7 +118,7 @@ qir_lower_uniforms(struct vc4_compile *c)
* than one uniform referenced, and add those uniform values to the * than one uniform referenced, and add those uniform values to the
* ht. * ht.
*/ */
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
uint32_t nsrc = qir_get_op_nsrc(inst->op); uint32_t nsrc = qir_get_op_nsrc(inst->op);
if (qir_get_instruction_uniform_count(inst) <= 1) if (qir_get_instruction_uniform_count(inst) <= 1)
@@ -154,7 +154,7 @@ qir_lower_uniforms(struct vc4_compile *c)
struct qinst *mov = qir_inst(QOP_MOV, temp, unif, c->undef); struct qinst *mov = qir_inst(QOP_MOV, temp, unif, c->undef);
list_add(&mov->link, &c->instructions); list_add(&mov->link, &c->instructions);
c->defs[temp.index] = mov; c->defs[temp.index] = mov;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
uint32_t nsrc = qir_get_op_nsrc(inst->op); uint32_t nsrc = qir_get_op_nsrc(inst->op);
uint32_t count = qir_get_instruction_uniform_count(inst); uint32_t count = qir_get_instruction_uniform_count(inst);

View File

@@ -53,7 +53,7 @@ void qir_validate(struct vc4_compile *c)
fail_instr(c, def, "SSA def with condition"); fail_instr(c, def, "SSA def with condition");
} }
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
switch (inst->dst.file) { switch (inst->dst.file) {
case QFILE_TEMP: case QFILE_TEMP:
if (inst->dst.index >= c->num_temps) if (inst->dst.index >= c->num_temps)

View File

@@ -213,7 +213,7 @@ vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c)
break; break;
} }
list_for_each_entry(struct qinst, qinst, &c->instructions, link) { qir_for_each_inst_inorder(qinst, c) {
#if 0 #if 0
fprintf(stderr, "translating qinst to qpu: "); fprintf(stderr, "translating qinst to qpu: ");
qir_dump_inst(qinst); qir_dump_inst(qinst);

View File

@@ -198,7 +198,7 @@ vc4_register_allocate(struct vc4_context *vc4, struct vc4_compile *c)
/* Compute the live ranges so we can figure out interference. /* Compute the live ranges so we can figure out interference.
*/ */
uint32_t ip = 0; uint32_t ip = 0;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
if (inst->dst.file == QFILE_TEMP) { if (inst->dst.file == QFILE_TEMP) {
def[inst->dst.index] = MIN2(ip, def[inst->dst.index]); def[inst->dst.index] = MIN2(ip, def[inst->dst.index]);
use[inst->dst.index] = ip; use[inst->dst.index] = ip;
@@ -242,7 +242,7 @@ vc4_register_allocate(struct vc4_context *vc4, struct vc4_compile *c)
sizeof(class_bits)); sizeof(class_bits));
ip = 0; ip = 0;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
if (qir_writes_r4(inst)) { if (qir_writes_r4(inst)) {
/* This instruction writes r4 (and optionally moves /* This instruction writes r4 (and optionally moves
* its result to a temp), so nothing else can be * its result to a temp), so nothing else can be

View File

@@ -43,7 +43,7 @@ qir_reorder_uniforms(struct vc4_compile *c)
uint32_t uniform_index_size = 0; uint32_t uniform_index_size = 0;
uint32_t next_uniform = 0; uint32_t next_uniform = 0;
list_for_each_entry(struct qinst, inst, &c->instructions, link) { qir_for_each_inst_inorder(inst, c) {
uint32_t new = ~0; uint32_t new = ~0;
for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {