util: use C99 declaration in the for-loop set_foreach() macro
Signed-off-by: Eric Engestrom <eric@engestrom.ch> Reviewed-by: Timothy Arceri <tarceri@itsqueeze.com>
This commit is contained in:

committed by
Eric Engestrom

parent
bb84fa146f
commit
e27902a261
@@ -111,7 +111,6 @@ public:
|
||||
/* TODO: Check write mask, and possibly not clear everything. */
|
||||
|
||||
/* For any usage of our variable on the RHS, clear it out. */
|
||||
struct set_entry *set_entry;
|
||||
set_foreach(entry->dsts, set_entry) {
|
||||
ir_variable *dst_var = (ir_variable *)set_entry->key;
|
||||
acp_entry *dst_entry = pull_acp(dst_var);
|
||||
|
@@ -87,8 +87,6 @@ public:
|
||||
|
||||
void remove_dead_variables()
|
||||
{
|
||||
struct set_entry *entry;
|
||||
|
||||
set_foreach(variables, entry) {
|
||||
ir_variable *ir = (ir_variable *) entry->key;
|
||||
|
||||
|
@@ -189,7 +189,6 @@ split_block_beginning(nir_block *block)
|
||||
new_block->cf_node.parent = block->cf_node.parent;
|
||||
exec_node_insert_node_before(&block->cf_node.node, &new_block->cf_node.node);
|
||||
|
||||
struct set_entry *entry;
|
||||
set_foreach(block->predecessors, entry) {
|
||||
nir_block *pred = (nir_block *) entry->key;
|
||||
replace_successor(pred, block, new_block);
|
||||
|
@@ -42,7 +42,6 @@ init_block(nir_block *block, nir_function_impl *impl)
|
||||
block->imm_dom = NULL;
|
||||
block->num_dom_children = 0;
|
||||
|
||||
struct set_entry *entry;
|
||||
set_foreach(block->dom_frontier, entry) {
|
||||
_mesa_set_remove(block->dom_frontier, entry);
|
||||
}
|
||||
@@ -72,7 +71,6 @@ static bool
|
||||
calc_dominance(nir_block *block)
|
||||
{
|
||||
nir_block *new_idom = NULL;
|
||||
struct set_entry *entry;
|
||||
set_foreach(block->predecessors, entry) {
|
||||
nir_block *pred = (nir_block *) entry->key;
|
||||
|
||||
@@ -96,7 +94,6 @@ static bool
|
||||
calc_dom_frontier(nir_block *block)
|
||||
{
|
||||
if (block->predecessors->entries > 1) {
|
||||
struct set_entry *entry;
|
||||
set_foreach(block->predecessors, entry) {
|
||||
nir_block *runner = (nir_block *) entry->key;
|
||||
|
||||
@@ -269,7 +266,6 @@ nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp)
|
||||
{
|
||||
nir_foreach_block(block, impl) {
|
||||
fprintf(fp, "DF(%u) = {", block->index);
|
||||
struct set_entry *entry;
|
||||
set_foreach(block->dom_frontier, entry) {
|
||||
nir_block *df = (nir_block *) entry->key;
|
||||
fprintf(fp, "%u, ", df->index);
|
||||
|
@@ -830,7 +830,6 @@ place_phi_read(nir_shader *shader, nir_register *reg,
|
||||
if (block != def->parent_instr->block) {
|
||||
/* Try to go up the single-successor tree */
|
||||
bool all_single_successors = true;
|
||||
struct set_entry *entry;
|
||||
set_foreach(block->predecessors, entry) {
|
||||
nir_block *pred = (nir_block *)entry->key;
|
||||
if (pred->successors[0] && pred->successors[1]) {
|
||||
|
@@ -62,7 +62,6 @@ nir_gs_count_vertices(const nir_shader *shader)
|
||||
/* set_vertex_count intrinsics only appear in predecessors of the
|
||||
* end block. So we don't need to walk all of them.
|
||||
*/
|
||||
struct set_entry *entry;
|
||||
set_foreach(function->impl->end_block->predecessors, entry) {
|
||||
nir_block *block = (nir_block *) entry->key;
|
||||
|
||||
|
@@ -218,7 +218,6 @@ nir_live_ssa_defs_impl(nir_function_impl *impl)
|
||||
* changed, add the predecessor to the work list so that we ensure
|
||||
* that the new information is used.
|
||||
*/
|
||||
struct set_entry *entry;
|
||||
set_foreach(block->predecessors, entry) {
|
||||
nir_block *pred = (nir_block *)entry->key;
|
||||
if (propagate_across_edge(pred, block, &state))
|
||||
|
@@ -165,7 +165,6 @@ append_set_vertex_count(nir_block *end_block, struct state *state)
|
||||
/* Insert the new intrinsic in all of the predecessors of the end block,
|
||||
* but before any jump instructions (return).
|
||||
*/
|
||||
struct set_entry *entry;
|
||||
set_foreach(end_block->predecessors, entry) {
|
||||
nir_block *pred = (nir_block *) entry->key;
|
||||
b->cursor = nir_after_block_before_jump(pred);
|
||||
|
@@ -98,7 +98,6 @@ emit_output_copies_impl(struct lower_io_state *state, nir_function_impl *impl)
|
||||
/* For all other shader types, we need to do the copies right before
|
||||
* the jumps to the end block.
|
||||
*/
|
||||
struct set_entry *block_entry;
|
||||
set_foreach(impl->end_block->predecessors, block_entry) {
|
||||
struct nir_block *block = (void *)block_entry->key;
|
||||
b.cursor = nir_after_block_before_jump(block);
|
||||
|
@@ -460,7 +460,6 @@ lower_copies_to_load_store(struct deref_node *node,
|
||||
nir_builder b;
|
||||
nir_builder_init(&b, state->impl);
|
||||
|
||||
struct set_entry *copy_entry;
|
||||
set_foreach(node->copies, copy_entry) {
|
||||
nir_intrinsic_instr *copy = (void *)copy_entry->key;
|
||||
|
||||
@@ -727,7 +726,6 @@ nir_lower_vars_to_ssa_impl(nir_function_impl *impl)
|
||||
assert(node->path.path[0]->var->constant_initializer == NULL);
|
||||
|
||||
if (node->stores) {
|
||||
struct set_entry *store_entry;
|
||||
set_foreach(node->stores, store_entry) {
|
||||
nir_intrinsic_instr *store =
|
||||
(nir_intrinsic_instr *)store_entry->key;
|
||||
|
@@ -40,7 +40,6 @@ find_continue_block(nir_loop *loop)
|
||||
|
||||
assert(header_block->predecessors->entries == 2);
|
||||
|
||||
struct set_entry *pred_entry;
|
||||
set_foreach(header_block->predecessors, pred_entry) {
|
||||
if (pred_entry->key != prev_block)
|
||||
return (nir_block*)pred_entry->key;
|
||||
|
@@ -129,7 +129,6 @@ nir_phi_builder_add_value(struct nir_phi_builder *pb, unsigned num_components,
|
||||
|
||||
while (w_start != w_end) {
|
||||
nir_block *cur = pb->W[w_start++];
|
||||
struct set_entry *dom_entry;
|
||||
set_foreach(cur->dom_frontier, dom_entry) {
|
||||
nir_block *next = (nir_block *) dom_entry->key;
|
||||
|
||||
@@ -276,7 +275,6 @@ nir_phi_builder_finish(struct nir_phi_builder *pb)
|
||||
* XXX: Calling qsort this many times seems expensive.
|
||||
*/
|
||||
int num_preds = 0;
|
||||
struct set_entry *entry;
|
||||
set_foreach(phi->instr.block->predecessors, entry)
|
||||
preds[num_preds++] = (nir_block *)entry->key;
|
||||
qsort(preds, num_preds, sizeof(*preds), compare_blocks);
|
||||
|
@@ -1085,7 +1085,6 @@ print_block(nir_block *block, print_state *state, unsigned tabs)
|
||||
nir_block **preds =
|
||||
malloc(block->predecessors->entries * sizeof(nir_block *));
|
||||
|
||||
struct set_entry *entry;
|
||||
unsigned i = 0;
|
||||
set_foreach(block->predecessors, entry) {
|
||||
preds[i++] = (nir_block *) entry->key;
|
||||
|
@@ -1200,7 +1200,6 @@ shrink_vec_var_list(struct exec_list *vars,
|
||||
if (!var_usage || !var_usage->vars_copied)
|
||||
continue;
|
||||
|
||||
struct set_entry *copy_entry;
|
||||
set_foreach(var_usage->vars_copied, copy_entry) {
|
||||
struct vec_var_usage *copy_usage = (void *)copy_entry->key;
|
||||
if (copy_usage->comps_kept != var_usage->comps_kept) {
|
||||
|
@@ -122,7 +122,6 @@ convert_loop_exit_for_ssa(nir_ssa_def *def, void *void_state)
|
||||
/* Create a phi node with as many sources pointing to the same ssa_def as
|
||||
* the block has predecessors.
|
||||
*/
|
||||
struct set_entry *entry;
|
||||
set_foreach(block_after_loop->predecessors, entry) {
|
||||
nir_phi_src *phi_src = ralloc(phi, nir_phi_src);
|
||||
phi_src->src = nir_src_for_ssa(def);
|
||||
|
@@ -182,8 +182,6 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
|
||||
static void
|
||||
batch_reset_resources_locked(struct fd_batch *batch)
|
||||
{
|
||||
struct set_entry *entry;
|
||||
|
||||
pipe_mutex_assert_locked(batch->ctx->screen->lock);
|
||||
|
||||
set_foreach(batch->resources, entry) {
|
||||
|
@@ -282,7 +282,6 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool non
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
|
||||
batch = cache->batches[i];
|
||||
debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
|
||||
struct set_entry *entry;
|
||||
set_foreach(batch->dependencies, entry) {
|
||||
struct fd_batch *dep = (struct fd_batch *)entry->key;
|
||||
debug_printf(" %d", dep->idx);
|
||||
|
@@ -2987,7 +2987,6 @@ get_block(struct ir3_context *ctx, const nir_block *nblock)
|
||||
{
|
||||
struct ir3_block *block;
|
||||
struct hash_entry *hentry;
|
||||
struct set_entry *sentry;
|
||||
unsigned i;
|
||||
|
||||
hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
|
||||
|
@@ -232,7 +232,6 @@ static int
|
||||
v3d_simulator_pin_bos(int fd, struct v3d_job *job)
|
||||
{
|
||||
struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
|
||||
struct set_entry *entry;
|
||||
|
||||
set_foreach(job->bos, entry) {
|
||||
struct v3d_bo *bo = (struct v3d_bo *)entry->key;
|
||||
@@ -250,7 +249,6 @@ static int
|
||||
v3d_simulator_unpin_bos(int fd, struct v3d_job *job)
|
||||
{
|
||||
struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
|
||||
struct set_entry *entry;
|
||||
|
||||
set_foreach(job->bos, entry) {
|
||||
struct v3d_bo *bo = (struct v3d_bo *)entry->key;
|
||||
|
@@ -138,7 +138,6 @@ brw_nir_apply_tcs_quads_workaround(nir_shader *nir)
|
||||
const unsigned num_end_preds = impl->end_block->predecessors->entries;
|
||||
nir_block *end_preds[num_end_preds];
|
||||
unsigned i = 0;
|
||||
struct set_entry *entry;
|
||||
|
||||
set_foreach(impl->end_block->predecessors, entry) {
|
||||
end_preds[i++] = (nir_block *) entry->key;
|
||||
|
@@ -89,7 +89,6 @@ anv_reloc_list_init_clone(struct anv_reloc_list *list,
|
||||
list->array_length * sizeof(*list->relocs));
|
||||
memcpy(list->reloc_bos, other_list->reloc_bos,
|
||||
list->array_length * sizeof(*list->reloc_bos));
|
||||
struct set_entry *entry;
|
||||
set_foreach(other_list->deps, entry) {
|
||||
_mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
|
||||
}
|
||||
@@ -205,7 +204,6 @@ anv_reloc_list_append(struct anv_reloc_list *list,
|
||||
|
||||
list->num_relocs += other->num_relocs;
|
||||
|
||||
struct set_entry *entry;
|
||||
set_foreach(other->deps, entry) {
|
||||
_mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
|
||||
}
|
||||
@@ -1135,7 +1133,6 @@ anv_execbuf_add_bo(struct anv_execbuf *exec,
|
||||
if (bos == NULL)
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
struct set_entry *entry;
|
||||
struct anv_bo **bo = bos;
|
||||
set_foreach(relocs->deps, entry) {
|
||||
*bo++ = (void *)entry->key;
|
||||
|
@@ -409,7 +409,6 @@ free_shared_state(struct gl_context *ctx, struct gl_shared_state *shared)
|
||||
_mesa_reference_buffer_object(ctx, &shared->NullBufferObj, NULL);
|
||||
|
||||
if (shared->SyncObjects) {
|
||||
struct set_entry *entry;
|
||||
set_foreach(shared->SyncObjects, entry) {
|
||||
_mesa_unref_sync_object(ctx, (struct gl_sync_object *) entry->key, 1);
|
||||
}
|
||||
|
@@ -168,8 +168,6 @@ _mesa_set_destroy(struct set *ht, void (*delete_function)(struct set_entry *entr
|
||||
return;
|
||||
|
||||
if (delete_function) {
|
||||
struct set_entry *entry;
|
||||
|
||||
set_foreach (ht, entry) {
|
||||
delete_function(entry);
|
||||
}
|
||||
@@ -187,8 +185,6 @@ _mesa_set_destroy(struct set *ht, void (*delete_function)(struct set_entry *entr
|
||||
void
|
||||
_mesa_set_clear(struct set *set, void (*delete_function)(struct set_entry *entry))
|
||||
{
|
||||
struct set_entry *entry;
|
||||
|
||||
if (!set)
|
||||
return;
|
||||
|
||||
@@ -256,7 +252,7 @@ static void
|
||||
set_rehash(struct set *ht, unsigned new_size_index)
|
||||
{
|
||||
struct set old_ht;
|
||||
struct set_entry *table, *entry;
|
||||
struct set_entry *table;
|
||||
|
||||
if (new_size_index >= ARRAY_SIZE(hash_sizes))
|
||||
return;
|
||||
|
@@ -97,7 +97,7 @@ _mesa_set_random_entry(struct set *set,
|
||||
* pointer).
|
||||
*/
|
||||
#define set_foreach(set, entry) \
|
||||
for (entry = _mesa_set_next_entry(set, NULL); \
|
||||
for (struct set_entry *entry = _mesa_set_next_entry(set, NULL); \
|
||||
entry != NULL; \
|
||||
entry = _mesa_set_next_entry(set, entry))
|
||||
|
||||
|
Reference in New Issue
Block a user