util: rename list_empty() to list_is_empty()
This makes it clear that it's a boolean test and not an action (eg. "empty the list"). Reviewed-by: Eric Engestrom <eric@engestrom.ch>
This commit is contained in:
@@ -3180,7 +3180,7 @@ VkResult radv_AllocateCommandBuffers(
|
||||
|
||||
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
|
||||
|
||||
if (!list_empty(&pool->free_cmd_buffers)) {
|
||||
if (!list_is_empty(&pool->free_cmd_buffers)) {
|
||||
struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
|
||||
|
||||
list_del(&cmd_buffer->pool_link);
|
||||
|
@@ -429,7 +429,7 @@ ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
|
||||
struct qreg result)
|
||||
{
|
||||
struct qinst *last_inst = NULL;
|
||||
if (!list_empty(&c->cur_block->instructions))
|
||||
if (!list_is_empty(&c->cur_block->instructions))
|
||||
last_inst = (struct qinst *)c->cur_block->instructions.prev;
|
||||
|
||||
assert((result.file == QFILE_TEMP &&
|
||||
|
@@ -1299,7 +1299,7 @@ schedule_instructions(struct v3d_compile *c,
|
||||
const struct v3d_device_info *devinfo = c->devinfo;
|
||||
uint32_t time = 0;
|
||||
|
||||
while (!list_empty(&scoreboard->dag->heads)) {
|
||||
while (!list_is_empty(&scoreboard->dag->heads)) {
|
||||
struct schedule_node *chosen =
|
||||
choose_instruction_to_schedule(devinfo,
|
||||
scoreboard,
|
||||
@@ -1439,7 +1439,7 @@ qpu_schedule_instructions_block(struct v3d_compile *c,
|
||||
list_inithead(&setup_list);
|
||||
|
||||
/* Wrap each instruction in a scheduler structure. */
|
||||
while (!list_empty(&block->instructions)) {
|
||||
while (!list_is_empty(&block->instructions)) {
|
||||
struct qinst *qinst = (struct qinst *)block->instructions.next;
|
||||
struct schedule_node *n =
|
||||
rzalloc(mem_ctx, struct schedule_node);
|
||||
|
@@ -1034,7 +1034,7 @@ vir_compile_destroy(struct v3d_compile *c)
|
||||
c->cursor.link = NULL;
|
||||
|
||||
vir_for_each_block(block, c) {
|
||||
while (!list_empty(&block->instructions)) {
|
||||
while (!list_is_empty(&block->instructions)) {
|
||||
struct qinst *qinst =
|
||||
list_first_entry(&block->instructions,
|
||||
struct qinst, link);
|
||||
|
@@ -1416,7 +1416,7 @@ nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest)
|
||||
{
|
||||
if (dest->is_ssa) {
|
||||
/* We can only overwrite an SSA destination if it has no uses. */
|
||||
assert(list_empty(&dest->ssa.uses) && list_empty(&dest->ssa.if_uses));
|
||||
assert(list_is_empty(&dest->ssa.uses) && list_is_empty(&dest->ssa.if_uses));
|
||||
} else {
|
||||
list_del(&dest->reg.def_link);
|
||||
if (dest->reg.indirect)
|
||||
@@ -1547,7 +1547,7 @@ nir_ssa_def_components_read(const nir_ssa_def *def)
|
||||
}
|
||||
}
|
||||
|
||||
if (!list_empty(&def->if_uses))
|
||||
if (!list_is_empty(&def->if_uses))
|
||||
read_mask |= 1;
|
||||
|
||||
return read_mask;
|
||||
@@ -1888,7 +1888,7 @@ nir_function_impl_lower_instructions(nir_function_impl *impl,
|
||||
list_for_each_entry_safe(nir_src, use_src, &old_if_uses, use_link)
|
||||
nir_if_rewrite_condition(use_src->parent_if, new_src);
|
||||
|
||||
if (list_empty(&old_def->uses) && list_empty(&old_def->if_uses)) {
|
||||
if (list_is_empty(&old_def->uses) && list_is_empty(&old_def->if_uses)) {
|
||||
iter = nir_instr_remove(instr);
|
||||
} else {
|
||||
iter = nir_after_instr(instr);
|
||||
|
@@ -629,7 +629,7 @@ fixup_phi_srcs(clone_state *state)
|
||||
list_addtail(&src->src.use_link, &src->src.reg.reg->uses);
|
||||
}
|
||||
}
|
||||
assert(list_empty(&state->phi_srcs));
|
||||
assert(list_is_empty(&state->phi_srcs));
|
||||
}
|
||||
|
||||
void
|
||||
@@ -669,7 +669,7 @@ clone_function_impl(clone_state *state, const nir_function_impl *fi)
|
||||
clone_reg_list(state, &nfi->registers, &fi->registers);
|
||||
nfi->reg_alloc = fi->reg_alloc;
|
||||
|
||||
assert(list_empty(&state->phi_srcs));
|
||||
assert(list_is_empty(&state->phi_srcs));
|
||||
|
||||
clone_cf_list(state, &nfi->body, &fi->body);
|
||||
|
||||
|
@@ -92,7 +92,7 @@ nir_deref_instr_remove_if_unused(nir_deref_instr *instr)
|
||||
for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) {
|
||||
/* If anyone is using this deref, leave it alone */
|
||||
assert(d->dest.is_ssa);
|
||||
if (!list_empty(&d->dest.ssa.uses))
|
||||
if (!list_is_empty(&d->dest.ssa.uses))
|
||||
break;
|
||||
|
||||
nir_instr_remove(&d->instr);
|
||||
@@ -855,7 +855,7 @@ opt_deref_cast(nir_builder *b, nir_deref_instr *cast)
|
||||
}
|
||||
|
||||
/* If uses would be a bit crazy */
|
||||
assert(list_empty(&cast->dest.ssa.if_uses));
|
||||
assert(list_is_empty(&cast->dest.ssa.if_uses));
|
||||
|
||||
nir_deref_instr_remove_if_unused(cast);
|
||||
return progress;
|
||||
|
@@ -495,7 +495,7 @@ rewrite_ssa_def(nir_ssa_def *def, void *void_state)
|
||||
}
|
||||
|
||||
nir_ssa_def_rewrite_uses(def, nir_src_for_reg(reg));
|
||||
assert(list_empty(&def->uses) && list_empty(&def->if_uses));
|
||||
assert(list_is_empty(&def->uses) && list_is_empty(&def->if_uses));
|
||||
|
||||
if (def->parent_instr->type == nir_instr_type_ssa_undef) {
|
||||
/* If it's an ssa_undef instruction, remove it since we know we just got
|
||||
@@ -961,7 +961,7 @@ ssa_def_is_local_to_block(nir_ssa_def *def, UNUSED void *state)
|
||||
}
|
||||
}
|
||||
|
||||
if (!list_empty(&def->if_uses))
|
||||
if (!list_is_empty(&def->if_uses))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@@ -1191,8 +1191,8 @@ lower_explicit_io_deref(nir_builder *b, nir_deref_instr *deref,
|
||||
* one deref which could break our list walking since we walk the list
|
||||
* backwards.
|
||||
*/
|
||||
assert(list_empty(&deref->dest.ssa.if_uses));
|
||||
if (list_empty(&deref->dest.ssa.uses)) {
|
||||
assert(list_is_empty(&deref->dest.ssa.if_uses));
|
||||
if (list_is_empty(&deref->dest.ssa.uses)) {
|
||||
nir_instr_remove(&deref->instr);
|
||||
return;
|
||||
}
|
||||
|
@@ -300,9 +300,9 @@ nir_lower_regs_to_ssa_impl(nir_function_impl *impl)
|
||||
|
||||
nir_foreach_register_safe(reg, &impl->registers) {
|
||||
if (state.values[reg->index]) {
|
||||
assert(list_empty(®->uses));
|
||||
assert(list_empty(®->if_uses));
|
||||
assert(list_empty(®->defs));
|
||||
assert(list_is_empty(®->uses));
|
||||
assert(list_is_empty(®->if_uses));
|
||||
assert(list_is_empty(®->defs));
|
||||
exec_node_remove(®->node);
|
||||
}
|
||||
}
|
||||
|
@@ -122,8 +122,8 @@ nir_lower_to_source_mods_block(nir_block *block,
|
||||
alu->src[i].swizzle[j] = parent->src[0].swizzle[alu->src[i].swizzle[j]];
|
||||
}
|
||||
|
||||
if (list_empty(&parent->dest.dest.ssa.uses) &&
|
||||
list_empty(&parent->dest.dest.ssa.if_uses))
|
||||
if (list_is_empty(&parent->dest.dest.ssa.uses) &&
|
||||
list_is_empty(&parent->dest.dest.ssa.if_uses))
|
||||
nir_instr_remove(&parent->instr);
|
||||
|
||||
progress = true;
|
||||
@@ -144,7 +144,7 @@ nir_lower_to_source_mods_block(nir_block *block,
|
||||
if (!(options & nir_lower_float_source_mods))
|
||||
continue;
|
||||
|
||||
if (!list_empty(&alu->dest.dest.ssa.if_uses))
|
||||
if (!list_is_empty(&alu->dest.dest.ssa.if_uses))
|
||||
continue;
|
||||
|
||||
bool all_children_are_sat = true;
|
||||
|
@@ -140,7 +140,7 @@ try_coalesce(nir_alu_instr *vec, unsigned start_idx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!list_empty(&vec->src[start_idx].src.ssa->if_uses))
|
||||
if (!list_is_empty(&vec->src[start_idx].src.ssa->if_uses))
|
||||
return 0;
|
||||
|
||||
if (vec->src[start_idx].src.ssa->parent_instr->type != nir_instr_type_alu)
|
||||
|
@@ -84,7 +84,7 @@ static struct combined_store *
|
||||
alloc_combined_store(struct combine_stores_state *state)
|
||||
{
|
||||
struct combined_store *result;
|
||||
if (list_empty(&state->freelist)) {
|
||||
if (list_is_empty(&state->freelist)) {
|
||||
result = linear_zalloc_child(state->lin_ctx, sizeof(*result));
|
||||
} else {
|
||||
result = list_first_entry(&state->freelist,
|
||||
|
@@ -514,7 +514,7 @@ complex_unroll_single_terminator(nir_loop *loop)
|
||||
static bool
|
||||
wrapper_unroll(nir_loop *loop)
|
||||
{
|
||||
if (!list_empty(&loop->info->loop_terminator_list)) {
|
||||
if (!list_is_empty(&loop->info->loop_terminator_list)) {
|
||||
|
||||
/* Unrolling a loop with a large number of exits can result in a
|
||||
* large inrease in register pressure. For now we just skip
|
||||
|
@@ -156,7 +156,7 @@ block_check_for_allowed_instrs(nir_block *block, unsigned *count,
|
||||
return false;
|
||||
|
||||
/* It cannot have any if-uses */
|
||||
if (!list_empty(&mov->dest.dest.ssa.if_uses))
|
||||
if (!list_is_empty(&mov->dest.dest.ssa.if_uses))
|
||||
return false;
|
||||
|
||||
/* The only uses of this definition must be phis in the successor */
|
||||
|
@@ -248,8 +248,8 @@ instr_try_combine(nir_instr *instr1, nir_instr *instr2)
|
||||
nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu1));
|
||||
}
|
||||
|
||||
assert(list_empty(&alu1->dest.dest.ssa.uses));
|
||||
assert(list_empty(&alu1->dest.dest.ssa.if_uses));
|
||||
assert(list_is_empty(&alu1->dest.dest.ssa.uses));
|
||||
assert(list_is_empty(&alu1->dest.dest.ssa.if_uses));
|
||||
|
||||
nir_foreach_use_safe(src, &alu2->dest.dest.ssa) {
|
||||
if (src->parent_instr->type == nir_instr_type_alu) {
|
||||
@@ -285,8 +285,8 @@ instr_try_combine(nir_instr *instr1, nir_instr *instr2)
|
||||
nir_if_rewrite_condition(src->parent_if, nir_src_for_ssa(new_alu2));
|
||||
}
|
||||
|
||||
assert(list_empty(&alu2->dest.dest.ssa.uses));
|
||||
assert(list_empty(&alu2->dest.dest.ssa.if_uses));
|
||||
assert(list_is_empty(&alu2->dest.dest.ssa.uses));
|
||||
assert(list_is_empty(&alu2->dest.dest.ssa.if_uses));
|
||||
|
||||
nir_instr_remove(instr1);
|
||||
nir_instr_remove(instr2);
|
||||
|
@@ -226,8 +226,8 @@ is_not_const_and_not_fsign(struct hash_table *ht, nir_alu_instr *instr, unsigned
|
||||
static inline bool
|
||||
is_used_once(nir_alu_instr *instr)
|
||||
{
|
||||
bool zero_if_use = list_empty(&instr->dest.dest.ssa.if_uses);
|
||||
bool zero_use = list_empty(&instr->dest.dest.ssa.uses);
|
||||
bool zero_if_use = list_is_empty(&instr->dest.dest.ssa.if_uses);
|
||||
bool zero_use = list_is_empty(&instr->dest.dest.ssa.uses);
|
||||
|
||||
if (zero_if_use && zero_use)
|
||||
return false;
|
||||
@@ -248,13 +248,13 @@ is_used_once(nir_alu_instr *instr)
|
||||
static inline bool
|
||||
is_used_by_if(nir_alu_instr *instr)
|
||||
{
|
||||
return !list_empty(&instr->dest.dest.ssa.if_uses);
|
||||
return !list_is_empty(&instr->dest.dest.ssa.if_uses);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
is_not_used_by_if(nir_alu_instr *instr)
|
||||
{
|
||||
return list_empty(&instr->dest.dest.ssa.if_uses);
|
||||
return list_is_empty(&instr->dest.dest.ssa.if_uses);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@@ -762,7 +762,7 @@ read_fixup_phis(read_ctx *ctx)
|
||||
|
||||
list_addtail(&src->src.use_link, &src->src.ssa->uses);
|
||||
}
|
||||
assert(list_empty(&ctx->phi_srcs));
|
||||
assert(list_is_empty(&ctx->phi_srcs));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -498,7 +498,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state)
|
||||
* conditions expect well-formed Booleans. If you want to compare with
|
||||
* NULL, an explicit comparison operation should be used.
|
||||
*/
|
||||
validate_assert(state, list_empty(&instr->dest.ssa.if_uses));
|
||||
validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
|
||||
|
||||
/* Only certain modes can be used as sources for phi instructions. */
|
||||
nir_foreach_use(use, &instr->dest.ssa) {
|
||||
|
@@ -124,7 +124,7 @@ nir_instr_worklist_length(nir_instr_worklist *wl)
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nir_instr_worklist_empty(nir_instr_worklist *wl)
|
||||
nir_instr_worklist_is_empty(nir_instr_worklist *wl)
|
||||
{
|
||||
return nir_instr_worklist_length(wl) == 0;
|
||||
}
|
||||
|
@@ -1002,7 +1002,7 @@ vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list,
|
||||
|
||||
vtn_emit_cf_list(b, &vtn_loop->body, NULL, NULL, handler);
|
||||
|
||||
if (!list_empty(&vtn_loop->cont_body)) {
|
||||
if (!list_is_empty(&vtn_loop->cont_body)) {
|
||||
/* If we have a non-trivial continue body then we need to put
|
||||
* it at the beginning of the loop with a flag to ensure that
|
||||
* it doesn't get executed in the first iteration.
|
||||
|
@@ -2506,7 +2506,7 @@ emit_function(struct ir3_context *ctx, nir_function_impl *impl)
|
||||
/* at this point, we should have a single empty block,
|
||||
* into which we emit the 'end' instruction.
|
||||
*/
|
||||
compile_assert(ctx, list_empty(&ctx->block->instr_list));
|
||||
compile_assert(ctx, list_is_empty(&ctx->block->instr_list));
|
||||
|
||||
/* If stream-out (aka transform-feedback) enabled, emit the
|
||||
* stream-out instructions, followed by a new empty block (into
|
||||
|
@@ -200,10 +200,10 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
|
||||
}
|
||||
|
||||
/* need to be able to set (ss) on first instruction: */
|
||||
if (list_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
|
||||
if (list_is_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
|
||||
ir3_NOP(block);
|
||||
|
||||
if (is_nop(n) && !list_empty(&block->instr_list)) {
|
||||
if (is_nop(n) && !list_is_empty(&block->instr_list)) {
|
||||
struct ir3_instruction *last = list_last_entry(&block->instr_list,
|
||||
struct ir3_instruction, node);
|
||||
if (is_nop(last) && (last->repeat < 5)) {
|
||||
@@ -410,7 +410,7 @@ resolve_dest_block(struct ir3_block *block)
|
||||
* (2) (block-is-empty || only-instr-is-jump)
|
||||
*/
|
||||
if (block->successors[1] == NULL) {
|
||||
if (list_empty(&block->instr_list)) {
|
||||
if (list_is_empty(&block->instr_list)) {
|
||||
return block->successors[0];
|
||||
} else if (list_length(&block->instr_list) == 1) {
|
||||
struct ir3_instruction *instr = list_first_entry(
|
||||
|
@@ -796,7 +796,7 @@ sched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
|
||||
}
|
||||
}
|
||||
|
||||
while (!list_empty(&ctx->depth_list)) {
|
||||
while (!list_is_empty(&ctx->depth_list)) {
|
||||
struct ir3_sched_notes notes = {0};
|
||||
struct ir3_instruction *instr;
|
||||
|
||||
|
@@ -1576,7 +1576,7 @@ tu_AllocateCommandBuffers(VkDevice _device,
|
||||
|
||||
for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
|
||||
|
||||
if (!list_empty(&pool->free_cmd_buffers)) {
|
||||
if (!list_is_empty(&pool->free_cmd_buffers)) {
|
||||
struct tu_cmd_buffer *cmd_buffer = list_first_entry(
|
||||
&pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
|
||||
|
||||
|
@@ -623,7 +623,7 @@ dd_context_destroy(struct pipe_context *_pipe)
|
||||
mtx_destroy(&dctx->mutex);
|
||||
cnd_destroy(&dctx->cond);
|
||||
|
||||
assert(list_empty(&dctx->records));
|
||||
assert(list_is_empty(&dctx->records));
|
||||
|
||||
if (pipe->set_log_context) {
|
||||
pipe->set_log_context(pipe, NULL);
|
||||
|
@@ -1103,7 +1103,7 @@ dd_thread_main(void *input)
|
||||
if (dctx->api_stalled)
|
||||
cnd_signal(&dctx->cond);
|
||||
|
||||
if (list_empty(&records)) {
|
||||
if (list_is_empty(&records)) {
|
||||
if (dctx->kill_thread)
|
||||
break;
|
||||
|
||||
@@ -1184,7 +1184,7 @@ dd_add_record(struct dd_context *dctx, struct dd_draw_record *record)
|
||||
dctx->api_stalled = false;
|
||||
}
|
||||
|
||||
if (list_empty(&dctx->records))
|
||||
if (list_is_empty(&dctx->records))
|
||||
cnd_signal(&dctx->cond);
|
||||
|
||||
list_addtail(&record->list, &dctx->records);
|
||||
|
@@ -150,7 +150,7 @@ etna_hw_begin_query(struct etna_context *ctx, struct etna_query *q)
|
||||
p->start(hq, ctx);
|
||||
|
||||
/* add to active list */
|
||||
assert(list_empty(&hq->node));
|
||||
assert(list_is_empty(&hq->node));
|
||||
list_addtail(&hq->node, &ctx->active_hw_queries);
|
||||
|
||||
return true;
|
||||
|
@@ -91,7 +91,7 @@ fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
|
||||
p->resume(aq, batch);
|
||||
|
||||
/* add to active list: */
|
||||
assert(list_empty(&aq->node));
|
||||
assert(list_is_empty(&aq->node));
|
||||
list_addtail(&aq->node, &ctx->acc_active_queries);
|
||||
|
||||
return true;
|
||||
|
@@ -147,7 +147,7 @@ fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
|
||||
resume_query(batch, hq, batch->draw);
|
||||
|
||||
/* add to active list: */
|
||||
assert(list_empty(&hq->list));
|
||||
assert(list_is_empty(&hq->list));
|
||||
list_addtail(&hq->list, &ctx->hw_active_queries);
|
||||
|
||||
return true;
|
||||
|
@@ -454,12 +454,12 @@ void gpir_node_print_prog_seq(gpir_compiler *comp);
|
||||
|
||||
static inline bool gpir_node_is_root(gpir_node *node)
|
||||
{
|
||||
return list_empty(&node->succ_list);
|
||||
return list_is_empty(&node->succ_list);
|
||||
}
|
||||
|
||||
static inline bool gpir_node_is_leaf(gpir_node *node)
|
||||
{
|
||||
return list_empty(&node->pred_list);
|
||||
return list_is_empty(&node->pred_list);
|
||||
}
|
||||
|
||||
#define gpir_node_to_alu(node) ((gpir_alu_node *)(node))
|
||||
|
@@ -128,7 +128,7 @@ static void schedule_insert_ready_list(struct list_head *ready_list,
|
||||
|
||||
static void schedule_ready_list(gpir_block *block, struct list_head *ready_list)
|
||||
{
|
||||
if (list_empty(ready_list))
|
||||
if (list_is_empty(ready_list))
|
||||
return;
|
||||
|
||||
gpir_node *node = list_first_entry(ready_list, gpir_node, list);
|
||||
|
@@ -1604,7 +1604,7 @@ static bool schedule_block(gpir_block *block)
|
||||
}
|
||||
|
||||
list_inithead(&block->node_list);
|
||||
while (!list_empty(&ctx.ready_list)) {
|
||||
while (!list_is_empty(&ctx.ready_list)) {
|
||||
if (!schedule_one_instr(&ctx))
|
||||
return false;
|
||||
}
|
||||
|
@@ -596,13 +596,13 @@ static void ppir_codegen_encode_branch(ppir_node *node, void *code)
|
||||
}
|
||||
|
||||
target = branch->target;
|
||||
while (list_empty(&target->instr_list)) {
|
||||
while (list_is_empty(&target->instr_list)) {
|
||||
if (!target->list.next)
|
||||
break;
|
||||
target = LIST_ENTRY(ppir_block, target->list.next, list);
|
||||
}
|
||||
|
||||
assert(!list_empty(&target->instr_list));
|
||||
assert(!list_is_empty(&target->instr_list));
|
||||
|
||||
target_instr = list_first_entry(&target->instr_list, ppir_instr, list);
|
||||
b->branch.target = target_instr->offset - node->instr->offset;
|
||||
|
@@ -398,12 +398,12 @@ ppir_node *ppir_node_insert_mov(ppir_node *node);
|
||||
|
||||
static inline bool ppir_node_is_root(ppir_node *node)
|
||||
{
|
||||
return list_empty(&node->succ_list);
|
||||
return list_is_empty(&node->succ_list);
|
||||
}
|
||||
|
||||
static inline bool ppir_node_is_leaf(ppir_node *node)
|
||||
{
|
||||
return list_empty(&node->pred_list);
|
||||
return list_is_empty(&node->pred_list);
|
||||
}
|
||||
|
||||
static inline bool ppir_node_has_single_succ(ppir_node *node)
|
||||
@@ -652,12 +652,12 @@ void ppir_instr_insert_mul_node(ppir_node *add, ppir_node *mul);
|
||||
|
||||
static inline bool ppir_instr_is_root(ppir_instr *instr)
|
||||
{
|
||||
return list_empty(&instr->succ_list);
|
||||
return list_is_empty(&instr->succ_list);
|
||||
}
|
||||
|
||||
static inline bool ppir_instr_is_leaf(ppir_instr *instr)
|
||||
{
|
||||
return list_empty(&instr->pred_list);
|
||||
return list_is_empty(&instr->pred_list);
|
||||
}
|
||||
|
||||
bool ppir_lower_prog(ppir_compiler *comp);
|
||||
|
@@ -688,7 +688,7 @@ bool ppir_regalloc_prog(ppir_compiler *comp)
|
||||
ppir_regalloc_update_reglist_ssa(comp);
|
||||
|
||||
/* No registers? Probably shader consists of discard instruction */
|
||||
if (list_empty(&comp->reg_list))
|
||||
if (list_is_empty(&comp->reg_list))
|
||||
return true;
|
||||
|
||||
/* this will most likely succeed in the first
|
||||
|
@@ -118,7 +118,7 @@ static void ppir_insert_ready_list(struct list_head *ready_list,
|
||||
static void ppir_schedule_ready_list(ppir_block *block,
|
||||
struct list_head *ready_list)
|
||||
{
|
||||
if (list_empty(ready_list))
|
||||
if (list_is_empty(ready_list))
|
||||
return;
|
||||
|
||||
ppir_instr *instr = list_first_entry(ready_list, ppir_instr, list);
|
||||
|
@@ -44,7 +44,7 @@ lp_cs_tpool_worker(void *data)
|
||||
while (!pool->shutdown) {
|
||||
struct lp_cs_tpool_task *task;
|
||||
|
||||
while (list_empty(&pool->workqueue) && !pool->shutdown)
|
||||
while (list_is_empty(&pool->workqueue) && !pool->shutdown)
|
||||
cnd_wait(&pool->new_work, &pool->m);
|
||||
|
||||
if (pool->shutdown)
|
||||
|
@@ -65,7 +65,7 @@ v3d_bo_dump_stats(struct v3d_screen *screen)
|
||||
fprintf(stderr, " BOs cached: %d\n", cache_count);
|
||||
fprintf(stderr, " BOs cached size: %dkb\n", cache_size / 1024);
|
||||
|
||||
if (!list_empty(&cache->time_list)) {
|
||||
if (!list_is_empty(&cache->time_list)) {
|
||||
struct v3d_bo *first = list_first_entry(&cache->time_list,
|
||||
struct v3d_bo,
|
||||
time_list);
|
||||
@@ -103,7 +103,7 @@ v3d_bo_from_cache(struct v3d_screen *screen, uint32_t size, const char *name)
|
||||
|
||||
struct v3d_bo *bo = NULL;
|
||||
mtx_lock(&cache->lock);
|
||||
if (!list_empty(&cache->size_list[page_index])) {
|
||||
if (!list_is_empty(&cache->size_list[page_index])) {
|
||||
bo = list_first_entry(&cache->size_list[page_index],
|
||||
struct v3d_bo, size_list);
|
||||
|
||||
@@ -170,7 +170,7 @@ v3d_bo_alloc(struct v3d_screen *screen, uint32_t size, const char *name)
|
||||
bo->offset = create.offset;
|
||||
|
||||
if (ret != 0) {
|
||||
if (!list_empty(&screen->bo_cache.time_list) &&
|
||||
if (!list_is_empty(&screen->bo_cache.time_list) &&
|
||||
!cleared_and_retried) {
|
||||
cleared_and_retried = true;
|
||||
v3d_bo_cache_free_all(&screen->bo_cache);
|
||||
@@ -300,7 +300,7 @@ v3d_bo_last_unreference_locked_timed(struct v3d_bo *bo, time_t time)
|
||||
*/
|
||||
for (int i = 0; i < cache->size_list_size; i++) {
|
||||
struct list_head *old_head = &cache->size_list[i];
|
||||
if (list_empty(old_head))
|
||||
if (list_is_empty(old_head))
|
||||
list_inithead(&new_list[i]);
|
||||
else {
|
||||
new_list[i].next = old_head->next;
|
||||
|
@@ -92,7 +92,7 @@ vc4_bo_dump_stats(struct vc4_screen *screen)
|
||||
fprintf(stderr, " BOs cached: %d\n", cache->bo_count);
|
||||
fprintf(stderr, " BOs cached size: %dkb\n", cache->bo_size / 1024);
|
||||
|
||||
if (!list_empty(&cache->time_list)) {
|
||||
if (!list_is_empty(&cache->time_list)) {
|
||||
struct vc4_bo *first = LIST_ENTRY(struct vc4_bo,
|
||||
cache->time_list.next,
|
||||
time_list);
|
||||
@@ -263,7 +263,7 @@ vc4_bo_alloc(struct vc4_screen *screen, uint32_t size, const char *name)
|
||||
bo->handle = create.handle;
|
||||
|
||||
if (ret != 0) {
|
||||
if (!list_empty(&screen->bo_cache.time_list) &&
|
||||
if (!list_is_empty(&screen->bo_cache.time_list) &&
|
||||
!cleared_and_retried) {
|
||||
cleared_and_retried = true;
|
||||
vc4_bo_cache_free_all(&screen->bo_cache);
|
||||
|
@@ -185,7 +185,7 @@ ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
|
||||
struct qreg result)
|
||||
{
|
||||
struct qinst *last_inst = NULL;
|
||||
if (!list_empty(&c->cur_block->instructions))
|
||||
if (!list_is_empty(&c->cur_block->instructions))
|
||||
last_inst = (struct qinst *)c->cur_block->instructions.prev;
|
||||
|
||||
assert(result.file == QFILE_UNIF ||
|
||||
@@ -832,7 +832,7 @@ ntq_src_is_only_ssa_def_user(nir_src *src)
|
||||
if (!src->is_ssa)
|
||||
return false;
|
||||
|
||||
if (!list_empty(&src->ssa->if_uses))
|
||||
if (!list_is_empty(&src->ssa->if_uses))
|
||||
return false;
|
||||
|
||||
return (src->ssa->uses.next == &src->use_link &&
|
||||
|
@@ -738,7 +738,7 @@ void
|
||||
qir_compile_destroy(struct vc4_compile *c)
|
||||
{
|
||||
qir_for_each_block(block, c) {
|
||||
while (!list_empty(&block->instructions)) {
|
||||
while (!list_is_empty(&block->instructions)) {
|
||||
struct qinst *qinst =
|
||||
list_first_entry(&block->instructions,
|
||||
struct qinst, link);
|
||||
@@ -798,7 +798,7 @@ qir_SF(struct vc4_compile *c, struct qreg src)
|
||||
{
|
||||
struct qinst *last_inst = NULL;
|
||||
|
||||
if (!list_empty(&c->cur_block->instructions))
|
||||
if (!list_is_empty(&c->cur_block->instructions))
|
||||
last_inst = (struct qinst *)c->cur_block->instructions.prev;
|
||||
|
||||
/* We don't have any way to guess which kind of MOV is implied. */
|
||||
|
@@ -622,7 +622,7 @@ schedule_instructions(struct vc4_compile *c,
|
||||
}
|
||||
|
||||
state->time = 0;
|
||||
while (!list_empty(&state->dag->heads)) {
|
||||
while (!list_is_empty(&state->dag->heads)) {
|
||||
struct schedule_node *chosen = choose_instruction(state);
|
||||
struct qinst *inst = chosen->inst;
|
||||
|
||||
|
@@ -874,7 +874,7 @@ schedule_instructions(struct vc4_compile *c,
|
||||
{
|
||||
uint32_t time = 0;
|
||||
|
||||
while (!list_empty(&scoreboard->dag->heads)) {
|
||||
while (!list_is_empty(&scoreboard->dag->heads)) {
|
||||
struct schedule_node *chosen =
|
||||
choose_instruction_to_schedule(scoreboard,
|
||||
schedule_list,
|
||||
@@ -995,7 +995,7 @@ qpu_schedule_instructions_block(struct vc4_compile *c,
|
||||
|
||||
/* Wrap each instruction in a scheduler structure. */
|
||||
uint32_t next_sched_uniform = *next_uniform;
|
||||
while (!list_empty(&block->qpu_inst_list)) {
|
||||
while (!list_is_empty(&block->qpu_inst_list)) {
|
||||
struct queued_qpu_inst *inst =
|
||||
(struct queued_qpu_inst *)block->qpu_inst_list.next;
|
||||
struct schedule_node *n = rzalloc(scoreboard->dag,
|
||||
|
@@ -961,7 +961,7 @@ static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
|
||||
fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
|
||||
}
|
||||
|
||||
while (!list_empty(&bo->u.sparse.backing)) {
|
||||
while (!list_is_empty(&bo->u.sparse.backing)) {
|
||||
struct amdgpu_sparse_backing *dummy = NULL;
|
||||
sparse_free_backing_buffer(bo,
|
||||
container_of(bo->u.sparse.backing.next,
|
||||
|
@@ -165,7 +165,7 @@ align_and_verify_space(struct gen_aux_map_context *ctx, uint32_t size,
|
||||
static void
|
||||
get_current_pos(struct gen_aux_map_context *ctx, uint64_t *gpu, uint64_t **map)
|
||||
{
|
||||
assert(!list_empty(&ctx->buffers));
|
||||
assert(!list_is_empty(&ctx->buffers));
|
||||
struct aux_map_buffer *tail =
|
||||
list_last_entry(&ctx->buffers, struct aux_map_buffer, link);
|
||||
if (gpu)
|
||||
|
@@ -36,7 +36,7 @@
|
||||
static inline bool
|
||||
are_all_uses_fadd(nir_ssa_def *def)
|
||||
{
|
||||
if (!list_empty(&def->if_uses))
|
||||
if (!list_is_empty(&def->if_uses))
|
||||
return false;
|
||||
|
||||
nir_foreach_use(use_src, def) {
|
||||
@@ -153,7 +153,7 @@ any_alu_src_is_a_constant(nir_alu_src srcs[])
|
||||
nir_instr_as_load_const (srcs[i].src.ssa->parent_instr);
|
||||
|
||||
if (list_is_singular(&load_const->def.uses) &&
|
||||
list_empty(&load_const->def.if_uses)) {
|
||||
list_is_empty(&load_const->def.if_uses)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -256,7 +256,7 @@ brw_nir_opt_peephole_ffma_block(nir_builder *b, nir_block *block)
|
||||
nir_src_for_ssa(&ffma->dest.dest.ssa));
|
||||
|
||||
nir_builder_instr_insert(b, &ffma->instr);
|
||||
assert(list_empty(&add->dest.dest.ssa.uses));
|
||||
assert(list_is_empty(&add->dest.dest.ssa.uses));
|
||||
nir_instr_remove(&add->instr);
|
||||
|
||||
progress = true;
|
||||
|
@@ -812,13 +812,13 @@ void
|
||||
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
|
||||
{
|
||||
/* Delete all but the first batch bo */
|
||||
assert(!list_empty(&cmd_buffer->batch_bos));
|
||||
assert(!list_is_empty(&cmd_buffer->batch_bos));
|
||||
while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
|
||||
struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
|
||||
list_del(&bbo->link);
|
||||
anv_batch_bo_destroy(bbo, cmd_buffer);
|
||||
}
|
||||
assert(!list_empty(&cmd_buffer->batch_bos));
|
||||
assert(!list_is_empty(&cmd_buffer->batch_bos));
|
||||
|
||||
anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
|
||||
&cmd_buffer->batch,
|
||||
|
@@ -68,7 +68,7 @@ compiler_debug_log(void *data, const char *fmt, ...)
|
||||
char str[MAX_DEBUG_MESSAGE_LENGTH];
|
||||
struct anv_device *device = (struct anv_device *)data;
|
||||
|
||||
if (list_empty(&device->instance->debug_report_callbacks.callbacks))
|
||||
if (list_is_empty(&device->instance->debug_report_callbacks.callbacks))
|
||||
return;
|
||||
|
||||
va_list args;
|
||||
|
@@ -531,7 +531,7 @@ bo_alloc_internal(struct brw_bufmgr *bufmgr,
|
||||
/* Get a buffer out of the cache if available */
|
||||
retry:
|
||||
alloc_from_cache = false;
|
||||
if (bucket != NULL && !list_empty(&bucket->head)) {
|
||||
if (bucket != NULL && !list_is_empty(&bucket->head)) {
|
||||
if (busy && !zeroed) {
|
||||
/* Allocate new render-target BOs from the tail (MRU)
|
||||
* of the list, as it will likely be hot in the GPU
|
||||
|
@@ -77,11 +77,11 @@ static inline void list_addtail(struct list_head *item, struct list_head *list)
|
||||
list->prev = item;
|
||||
}
|
||||
|
||||
static inline bool list_empty(const struct list_head *list);
|
||||
static inline bool list_is_empty(const struct list_head *list);
|
||||
|
||||
static inline void list_replace(struct list_head *from, struct list_head *to)
|
||||
{
|
||||
if (list_empty(from)) {
|
||||
if (list_is_empty(from)) {
|
||||
list_inithead(to);
|
||||
} else {
|
||||
to->prev = from->prev;
|
||||
@@ -106,7 +106,7 @@ static inline void list_delinit(struct list_head *item)
|
||||
item->prev = item;
|
||||
}
|
||||
|
||||
static inline bool list_empty(const struct list_head *list)
|
||||
static inline bool list_is_empty(const struct list_head *list)
|
||||
{
|
||||
return list->next == list;
|
||||
}
|
||||
@@ -130,7 +130,7 @@ static inline unsigned list_length(const struct list_head *list)
|
||||
|
||||
static inline void list_splice(struct list_head *src, struct list_head *dst)
|
||||
{
|
||||
if (list_empty(src))
|
||||
if (list_is_empty(src))
|
||||
return;
|
||||
|
||||
src->next->prev = dst;
|
||||
@@ -141,7 +141,7 @@ static inline void list_splice(struct list_head *src, struct list_head *dst)
|
||||
|
||||
static inline void list_splicetail(struct list_head *src, struct list_head *dst)
|
||||
{
|
||||
if (list_empty(src))
|
||||
if (list_is_empty(src))
|
||||
return;
|
||||
|
||||
src->prev->next = dst;
|
||||
|
@@ -493,7 +493,7 @@ static void destroy_swapchain_data(struct swapchain_data *data)
|
||||
struct overlay_draw *get_overlay_draw(struct swapchain_data *data)
|
||||
{
|
||||
struct device_data *device_data = data->device;
|
||||
struct overlay_draw *draw = list_empty(&data->draws) ?
|
||||
struct overlay_draw *draw = list_is_empty(&data->draws) ?
|
||||
NULL : list_first_entry(&data->draws, struct overlay_draw, link);
|
||||
|
||||
VkSemaphoreCreateInfo sem_info = {};
|
||||
@@ -2137,7 +2137,7 @@ static VkResult overlay_QueueSubmit(
|
||||
!cmd_buffer_data->timestamp_query_pool)
|
||||
continue;
|
||||
|
||||
if (list_empty(&cmd_buffer_data->link)) {
|
||||
if (list_is_empty(&cmd_buffer_data->link)) {
|
||||
list_addtail(&cmd_buffer_data->link,
|
||||
&queue_data->running_command_buffer);
|
||||
} else {
|
||||
|
@@ -99,7 +99,7 @@ vk_debug_report(struct vk_debug_report_instance *instance,
|
||||
const char *pMessage)
|
||||
{
|
||||
/* Allow NULL for convinience, return if no callbacks registered. */
|
||||
if (!instance || list_empty(&instance->callbacks))
|
||||
if (!instance || list_is_empty(&instance->callbacks))
|
||||
return;
|
||||
|
||||
pthread_mutex_lock(&instance->callbacks_mutex);
|
||||
|
Reference in New Issue
Block a user