nir: Stop using apostrophes to pluralize.

Reviewed-by: Dylan Baker <dylan@pnwbakers.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
This commit is contained in:
Matt Turner
2017-02-27 17:21:42 -08:00
parent 54f9f34181
commit d6e2bdfed3
21 changed files with 43 additions and 43 deletions

View File

@@ -1,5 +1,5 @@
New IR, or NIR, is an IR for Mesa intended to sit below GLSL IR and Mesa IR. New IR, or NIR, is an IR for Mesa intended to sit below GLSL IR and Mesa IR.
Its design inherits from the various IR's that Mesa has used in the past, as Its design inherits from the various IRs that Mesa has used in the past, as
well as Direct3D assembly, and it includes a few new ideas as well. It is a well as Direct3D assembly, and it includes a few new ideas as well. It is a
flat (in terms of using instructions instead of expressions), typeless IR, flat (in terms of using instructions instead of expressions), typeless IR,
similar to TGSI and Mesa IR. It also supports SSA (although it doesn't require similar to TGSI and Mesa IR. It also supports SSA (although it doesn't require

View File

@@ -345,7 +345,7 @@ nir_block_create(nir_shader *shader)
_mesa_key_pointer_equal); _mesa_key_pointer_equal);
block->imm_dom = NULL; block->imm_dom = NULL;
/* XXX maybe it would be worth it to defer allocation? This /* XXX maybe it would be worth it to defer allocation? This
* way it doesn't get allocated for shader ref's that never run * way it doesn't get allocated for shader refs that never run
* nir_calc_dominance? For example, state-tracker creates an * nir_calc_dominance? For example, state-tracker creates an
* initial IR, clones that, runs appropriate lowering pass, passes * initial IR, clones that, runs appropriate lowering pass, passes
* to driver which does common lowering/opt, and then stores ref * to driver which does common lowering/opt, and then stores ref

View File

@@ -368,13 +368,13 @@ typedef struct nir_register {
*/ */
bool is_packed; bool is_packed;
/** set of nir_src's where this register is used (read from) */ /** set of nir_srcs where this register is used (read from) */
struct list_head uses; struct list_head uses;
/** set of nir_dest's where this register is defined (written to) */ /** set of nir_dests where this register is defined (written to) */
struct list_head defs; struct list_head defs;
/** set of nir_if's where this register is used as a condition */ /** set of nir_ifs where this register is used as a condition */
struct list_head if_uses; struct list_head if_uses;
} nir_register; } nir_register;
@@ -453,10 +453,10 @@ typedef struct nir_ssa_def {
nir_instr *parent_instr; nir_instr *parent_instr;
/** set of nir_instr's where this register is used (read from) */ /** set of nir_instrs where this register is used (read from) */
struct list_head uses; struct list_head uses;
/** set of nir_if's where this register is used as a condition */ /** set of nir_ifs where this register is used as a condition */
struct list_head if_uses; struct list_head if_uses;
uint8_t num_components; uint8_t num_components;
@@ -1422,7 +1422,7 @@ typedef struct {
typedef struct { typedef struct {
nir_instr instr; nir_instr instr;
/* A list of nir_parallel_copy_entry's. The sources of all of the /* A list of nir_parallel_copy_entrys. The sources of all of the
* entries are copied to the corresponding destinations "in parallel". * entries are copied to the corresponding destinations "in parallel".
* In other words, if we have two entries: a -> b and b -> a, the values * In other words, if we have two entries: a -> b and b -> a, the values
* get swapped. * get swapped.
@@ -1506,7 +1506,7 @@ typedef struct nir_block {
unsigned num_dom_children; unsigned num_dom_children;
struct nir_block **dom_children; struct nir_block **dom_children;
/* Set of nir_block's on the dominance frontier of this block */ /* Set of nir_blocks on the dominance frontier of this block */
struct set *dom_frontier; struct set *dom_frontier;
/* /*

View File

@@ -495,7 +495,7 @@ nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
} }
/** /**
* Similar to nir_ssa_for_src(), but for alu src's, respecting the * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
* nir_alu_src's swizzle. * nir_alu_src's swizzle.
*/ */
static inline nir_ssa_def * static inline nir_ssa_def *

View File

@@ -140,7 +140,7 @@ nir_constant_clone(const nir_constant *c, nir_variable *nvar)
return nc; return nc;
} }
/* NOTE: for cloning nir_variable's, bypass nir_variable_create to avoid /* NOTE: for cloning nir_variables, bypass nir_variable_create to avoid
* having to deal with locals and globals separately: * having to deal with locals and globals separately:
*/ */
nir_variable * nir_variable *
@@ -185,7 +185,7 @@ clone_var_list(clone_state *state, struct exec_list *dst,
} }
} }
/* NOTE: for cloning nir_register's, bypass nir_global/local_reg_create() /* NOTE: for cloning nir_registers, bypass nir_global/local_reg_create()
* to avoid having to deal with locals and globals separately: * to avoid having to deal with locals and globals separately:
*/ */
static nir_register * static nir_register *
@@ -724,7 +724,7 @@ clone_function(clone_state *state, const nir_function *fxn, nir_shader *ns)
/* At first glance, it looks like we should clone the function_impl here. /* At first glance, it looks like we should clone the function_impl here.
* However, call instructions need to be able to reference at least the * However, call instructions need to be able to reference at least the
* function and those will get processed as we clone the function_impl's. * function and those will get processed as we clone the function_impls.
* We stop here and do function_impls as a second pass. * We stop here and do function_impls as a second pass.
*/ */
@@ -752,7 +752,7 @@ nir_shader_clone(void *mem_ctx, const nir_shader *s)
clone_function(&state, fxn, ns); clone_function(&state, fxn, ns);
/* Only after all functions are cloned can we clone the actual function /* Only after all functions are cloned can we clone the actual function
* implementations. This is because nir_call_instr's need to reference the * implementations. This is because nir_call_instrs need to reference the
* functions of other functions and we don't know what order the functions * functions of other functions and we don't know what order the functions
* will have in the list. * will have in the list.
*/ */

View File

@@ -203,7 +203,7 @@ split_block_beginning(nir_block *block)
} }
/* Any phi nodes must stay part of the new block, or else their /* Any phi nodes must stay part of the new block, or else their
* sourcse will be messed up. This will reverse the order of the phi's, but * sourcse will be messed up. This will reverse the order of the phis, but
* order shouldn't matter. * order shouldn't matter.
*/ */
nir_foreach_instr_safe(instr, block) { nir_foreach_instr_safe(instr, block) {

View File

@@ -36,11 +36,11 @@ extern "C" {
/** NIR Control Flow Modification /** NIR Control Flow Modification
* *
* This file contains various API's that make modifying control flow in NIR, * This file contains various APIs that make modifying control flow in NIR,
* while maintaining the invariants checked by the validator, much easier. * while maintaining the invariants checked by the validator, much easier.
* There are two parts to this: * There are two parts to this:
* *
* 1. Inserting control flow (if's and loops) in various places, for creating * 1. Inserting control flow (ifs and loops) in various places, for creating
* IR either from scratch or as part of some lowering pass. * IR either from scratch or as part of some lowering pass.
* 2. Taking existing pieces of the IR and either moving them around or * 2. Taking existing pieces of the IR and either moving them around or
* deleting them. * deleting them.
@@ -93,12 +93,12 @@ nir_cf_node_insert_end(struct exec_list *list, nir_cf_node *node)
* predecessors: * predecessors:
* *
* 1) After an if statement, if neither branch ends in a jump. * 1) After an if statement, if neither branch ends in a jump.
* 2) After a loop, if there are multiple break's. * 2) After a loop, if there are multiple breaks.
* 3) At the beginning of a loop. * 3) At the beginning of a loop.
* *
* For #1, the phi node is considered to be part of the if, and for #2 and * For #1, the phi node is considered to be part of the if, and for #2 and
* #3 the phi node is considered to be part of the loop. This allows us to * #3 the phi node is considered to be part of the loop. This allows us to
* keep phi's intact, but it means that phi nodes cannot be separated from * keep phis intact, but it means that phi nodes cannot be separated from
* the control flow they come from. For example, extracting an if without * the control flow they come from. For example, extracting an if without
* extracting all the phi nodes after it is not allowed, and neither is * extracting all the phi nodes after it is not allowed, and neither is
* extracting only some of the phi nodes at the beginning of a block. It * extracting only some of the phi nodes at the beginning of a block. It

View File

@@ -71,7 +71,7 @@ ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
* Each SSA definition is associated with a merge_node and the association * Each SSA definition is associated with a merge_node and the association
* is represented by a combination of a hash table and the "def" parameter * is represented by a combination of a hash table and the "def" parameter
* in the merge_node structure. The merge_set stores a linked list of * in the merge_node structure. The merge_set stores a linked list of
* merge_node's in dominence order of the ssa definitions. (Since the * merge_nodes in dominence order of the ssa definitions. (Since the
* liveness analysis pass indexes the SSA values in dominence order for us, * liveness analysis pass indexes the SSA values in dominence order for us,
* this is an easy thing to keep up.) It is assumed that no pair of the * this is an easy thing to keep up.) It is assumed that no pair of the
* nodes in a given set interfere. Merging two sets or checking for * nodes in a given set interfere. Merging two sets or checking for
@@ -313,7 +313,7 @@ isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
last_phi_instr = instr; last_phi_instr = instr;
} }
/* If we don't have any phi's, then there's nothing for us to do. */ /* If we don't have any phis, then there's nothing for us to do. */
if (last_phi_instr == NULL) if (last_phi_instr == NULL)
return true; return true;
@@ -558,7 +558,7 @@ emit_copy(nir_builder *b, nir_src src, nir_src dest_src)
nir_builder_instr_insert(b, &mov->instr); nir_builder_instr_insert(b, &mov->instr);
} }
/* Resolves a single parallel copy operation into a sequence of mov's /* Resolves a single parallel copy operation into a sequence of movs
* *
* This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for * This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
* Correctness, Code Quality, and Efficiency" by Boissinot et. al.. * Correctness, Code Quality, and Efficiency" by Boissinot et. al..
@@ -851,10 +851,10 @@ place_phi_read(nir_shader *shader, nir_register *reg,
nir_instr_insert(nir_after_block_before_jump(block), &mov->instr); nir_instr_insert(nir_after_block_before_jump(block), &mov->instr);
} }
/** Lower all of the phi nodes in a block to imov's to and from a register /** Lower all of the phi nodes in a block to imovs to and from a register
* *
* This provides a very quick-and-dirty out-of-SSA pass that you can run on a * This provides a very quick-and-dirty out-of-SSA pass that you can run on a
* single block to convert all of it's phis to a register and some imov's. * single block to convert all of its phis to a register and some imovs.
* The code that is generated, while not optimal for actual codegen in a * The code that is generated, while not optimal for actual codegen in a
* back-end, is easy to generate, correct, and will turn into the same set of * back-end, is easy to generate, correct, and will turn into the same set of
* phis after you call regs_to_ssa and do some copy propagation. * phis after you call regs_to_ssa and do some copy propagation.

View File

@@ -218,7 +218,7 @@ compute_induction_information(loop_info_state *state)
*/ */
assert(!var->in_control_flow && var->type != invariant); assert(!var->in_control_flow && var->type != invariant);
/* We are only interested in checking phi's for the basic induction /* We are only interested in checking phis for the basic induction
* variable case as its simple to detect. All basic induction variables * variable case as its simple to detect. All basic induction variables
* have a phi node * have a phi node
*/ */
@@ -707,7 +707,7 @@ static void
get_loop_info(loop_info_state *state, nir_function_impl *impl) get_loop_info(loop_info_state *state, nir_function_impl *impl)
{ {
/* Initialize all variables to "outside_loop". This also marks defs /* Initialize all variables to "outside_loop". This also marks defs
* invariant and constant if they are nir_instr_type_load_const's * invariant and constant if they are nir_instr_type_load_consts
*/ */
nir_foreach_block(block, impl) { nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block) nir_foreach_instr(instr, block)

View File

@@ -31,7 +31,7 @@
/* Generates the lowering code for user-clip-planes, generating CLIPDIST /* Generates the lowering code for user-clip-planes, generating CLIPDIST
* from UCP[n] + CLIPVERTEX or POSITION. Additionally, an optional pass * from UCP[n] + CLIPVERTEX or POSITION. Additionally, an optional pass
* for fragment shaders to insert conditional kill's based on the inter- * for fragment shaders to insert conditional kills based on the inter-
* polated CLIPDIST * polated CLIPDIST
* *
* NOTE: should be run after nir_lower_outputs_to_temporaries() (or at * NOTE: should be run after nir_lower_outputs_to_temporaries() (or at
@@ -163,7 +163,7 @@ lower_clip_vs(nir_function_impl *impl, unsigned ucp_enables,
* should be only a single predecessor block to end_block, which * should be only a single predecessor block to end_block, which
* makes the perfect place to insert the clipdist calculations. * makes the perfect place to insert the clipdist calculations.
* *
* NOTE: in case of early return's, these would have to be lowered * NOTE: in case of early returns, these would have to be lowered
* to jumps to end_block predecessor in a previous pass. Not sure * to jumps to end_block predecessor in a previous pass. Not sure
* if there is a good way to sanity check this, but for now the * if there is a good way to sanity check this, but for now the
* users of this pass don't support sub-routines. * users of this pass don't support sub-routines.
@@ -193,7 +193,7 @@ lower_clip_vs(nir_function_impl *impl, unsigned ucp_enables,
nir_metadata_preserve(impl, nir_metadata_dominance); nir_metadata_preserve(impl, nir_metadata_dominance);
} }
/* ucp_enables is bitmask of enabled ucp's. Actual ucp values are /* ucp_enables is bitmask of enabled ucps. Actual ucp values are
* passed in to shader via user_clip_plane system-values * passed in to shader via user_clip_plane system-values
*/ */
void void

View File

@@ -214,11 +214,11 @@ lower_drawpixels_block(lower_drawpixels_state *state, nir_block *block)
nir_variable *var = dvar->var; nir_variable *var = dvar->var;
if (var->data.location == VARYING_SLOT_COL0) { if (var->data.location == VARYING_SLOT_COL0) {
/* gl_Color should not have array/struct deref's: */ /* gl_Color should not have array/struct derefs: */
assert(dvar->deref.child == NULL); assert(dvar->deref.child == NULL);
lower_color(state, intr); lower_color(state, intr);
} else if (var->data.location == VARYING_SLOT_TEX0) { } else if (var->data.location == VARYING_SLOT_TEX0) {
/* gl_TexCoord should not have array/struct deref's: */ /* gl_TexCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL); assert(dvar->deref.child == NULL);
lower_texcoord(state, intr); lower_texcoord(state, intr);
} }

View File

@@ -49,7 +49,7 @@ nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
nir_foreach_variable(var, var_list) { nir_foreach_variable(var, var_list) {
/* /*
* UBO's have their own address spaces, so don't count them towards the * UBOs have their own address spaces, so don't count them towards the
* number of global uniforms * number of global uniforms
*/ */
if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) && if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&

View File

@@ -68,7 +68,7 @@ get_new_var(struct lower_io_types_state *state, nir_variable *var,
nvar->data = var->data; nvar->data = var->data;
nvar->data.location += off; nvar->data.location += off;
/* nir_variable_create is too clever for it's own good: */ /* nir_variable_create is too clever for its own good: */
exec_node_remove(&nvar->node); exec_node_remove(&nvar->node);
exec_node_self_link(&nvar->node); /* no delinit() :-( */ exec_node_self_link(&nvar->node); /* no delinit() :-( */

View File

@@ -645,7 +645,7 @@ swizzle_result(nir_builder *b, nir_tex_instr *tex, const uint8_t swizzle[4])
if (swizzle[0] < 4 && swizzle[1] < 4 && if (swizzle[0] < 4 && swizzle[1] < 4 &&
swizzle[2] < 4 && swizzle[3] < 4) { swizzle[2] < 4 && swizzle[3] < 4) {
unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] }; unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
/* We have no 0's or 1's, just emit a swizzling MOV */ /* We have no 0s or 1s, just emit a swizzling MOV */
swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false); swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
} else { } else {
nir_ssa_def *srcs[4]; nir_ssa_def *srcs[4];

View File

@@ -71,7 +71,7 @@ lower_wpos_center_block(nir_builder *b, nir_block *block)
if (var->data.mode == nir_var_shader_in && if (var->data.mode == nir_var_shader_in &&
var->data.location == VARYING_SLOT_POS) { var->data.location == VARYING_SLOT_POS) {
/* gl_FragCoord should not have array/struct deref's: */ /* gl_FragCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL); assert(dvar->deref.child == NULL);
add_half_to_fragcoord(b, intr); add_half_to_fragcoord(b, intr);
progress = true; progress = true;

View File

@@ -304,7 +304,7 @@ lower_wpos_ytransform_block(lower_wpos_ytransform_state *state, nir_block *block
if (var->data.mode == nir_var_shader_in && if (var->data.mode == nir_var_shader_in &&
var->data.location == VARYING_SLOT_POS) { var->data.location == VARYING_SLOT_POS) {
/* gl_FragCoord should not have array/struct deref's: */ /* gl_FragCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL); assert(dvar->deref.child == NULL);
lower_fragcoord(state, intr); lower_fragcoord(state, intr);
} else if (var->data.mode == nir_var_system_value && } else if (var->data.mode == nir_var_system_value &&

View File

@@ -41,7 +41,7 @@
* ssa_2 = fadd(ssa_1.x, ssa_1.y) * ssa_2 = fadd(ssa_1.x, ssa_1.y)
* *
* While this is "worse" because it adds a bunch of unneeded dependencies, it * While this is "worse" because it adds a bunch of unneeded dependencies, it
* actually makes it much easier for vec4-based backends to coalesce the MOV's * actually makes it much easier for vec4-based backends to coalesce the MOVs
* that result from the vec4 operation because it doesn't have to worry about * that result from the vec4 operation because it doesn't have to worry about
* quite as many reads. * quite as many reads.
*/ */

View File

@@ -33,7 +33,7 @@
*/ */
/* /*
* Visits and CSE's the given block and all its descendants in the dominance * Visits and CSEs the given block and all its descendants in the dominance
* tree recursively. Note that the instr_set is guaranteed to only ever * tree recursively. Note that the instr_set is guaranteed to only ever
* contain instructions that dominate the current block. * contain instructions that dominate the current block.
*/ */

View File

@@ -128,7 +128,7 @@ block_check_for_allowed_instrs(nir_block *block, unsigned *count, bool alu_ok)
if (!list_empty(&mov->dest.dest.ssa.if_uses)) if (!list_empty(&mov->dest.dest.ssa.if_uses))
return false; return false;
/* The only uses of this definition must be phi's in the successor */ /* The only uses of this definition must be phis in the successor */
nir_foreach_use(use, &mov->dest.dest.ssa) { nir_foreach_use(use, &mov->dest.dest.ssa) {
if (use->parent_instr->type != nir_instr_type_phi || if (use->parent_instr->type != nir_instr_type_phi ||
use->parent_instr->block != block->successors[0]) use->parent_instr->block != block->successors[0])

View File

@@ -115,11 +115,11 @@ remove_phis_block(nir_block *block, nir_builder *b)
assert(def != NULL); assert(def != NULL);
if (mov) { if (mov) {
/* If the sources were all mov's from the same source with the same /* If the sources were all movs from the same source with the same
* swizzle, then we can't just pick a random move because it may not * swizzle, then we can't just pick a random move because it may not
* dominate the phi node. Instead, we need to emit our own move after * dominate the phi node. Instead, we need to emit our own move after
* the phi which uses the shared source, and rewrite uses of the phi * the phi which uses the shared source, and rewrite uses of the phi
* to use the move instead. This is ok, because while the mov's may * to use the move instead. This is ok, because while the movs may
* not all dominate the phi node, their shared source does. * not all dominate the phi node, their shared source does.
*/ */

View File

@@ -41,7 +41,7 @@ is_pos_power_of_two(nir_alu_instr *instr, unsigned src, unsigned num_components,
{ {
nir_const_value *val = nir_src_as_const_value(instr->src[src].src); nir_const_value *val = nir_src_as_const_value(instr->src[src].src);
/* only constant src's: */ /* only constant srcs: */
if (!val) if (!val)
return false; return false;
@@ -71,7 +71,7 @@ is_neg_power_of_two(nir_alu_instr *instr, unsigned src, unsigned num_components,
{ {
nir_const_value *val = nir_src_as_const_value(instr->src[src].src); nir_const_value *val = nir_src_as_const_value(instr->src[src].src);
/* only constant src's: */ /* only constant srcs: */
if (!val) if (!val)
return false; return false;