diff --git a/src/intel/blorp/blorp_blit.c b/src/intel/blorp/blorp_blit.c
index f98b25931aa..f76c145b8be 100644
--- a/src/intel/blorp/blorp_blit.c
+++ b/src/intel/blorp/blorp_blit.c
@@ -1394,7 +1394,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp,
nir_imm_float(&b, 0.5f));
color = blorp_nir_tex(&b, &v, key, src_pos);
} else {
- /* Gfx7+ hardware doesn't automaticaly blend. */
+ /* Gfx7+ hardware doesn't automatically blend. */
color = blorp_nir_combine_samples(&b, &v, src_pos, key->src_samples,
key->tex_aux_usage,
key->texture_data_type,
diff --git a/src/intel/blorp/blorp_clear.c b/src/intel/blorp/blorp_clear.c
index a721a0ad7c3..31060c210d8 100644
--- a/src/intel/blorp/blorp_clear.c
+++ b/src/intel/blorp/blorp_clear.c
@@ -395,7 +395,7 @@ get_fast_clear_rect(const struct isl_device *dev,
* Target(s)", beneath the "MSAA Compression" bullet (p326):
*
* Clear pass for this case requires that scaled down primitive
- * is sent down with upper left co-ordinate to coincide with
+ * is sent down with upper left coordinate to coincide with
* actual rectangle being cleared. For MSAA, clear rectangle’s
* height and width need to as show in the following table in
* terms of (width,height) of the RT.
@@ -406,7 +406,7 @@ get_fast_clear_rect(const struct isl_device *dev,
* 8X Ceil(1/2*width) Ceil(1/2*height)
* 16X width Ceil(1/2*height)
*
- * The text "with upper left co-ordinate to coincide with actual
+ * The text "with upper left coordinate to coincide with actual
* rectangle being cleared" is a little confusing--it seems to imply
* that to clear a rectangle from (x,y) to (x+w,y+h), one needs to
* feed the pipeline using the rectangle (x,y) to
@@ -576,7 +576,7 @@ blorp_clear(struct blorp_batch *batch,
if (compute)
use_simd16_replicated_data = false;
- /* Constant color writes ignore everyting in blend and color calculator
+ /* Constant color writes ignore everything in blend and color calculator
* state. This is not documented.
*/
params.color_write_disable = color_write_disable & BITFIELD_MASK(4);
@@ -735,7 +735,7 @@ blorp_clear_stencil_as_rgba(struct blorp_batch *batch,
/* W-tiles and Y-tiles have the same layout as far as cache lines are
* concerned: both are 8x8 cache lines laid out Y-major. The difference is
- * entirely in how the data is arranged withing the cache line. W-tiling
+ * entirely in how the data is arranged within the cache line. W-tiling
* is 8x8 pixels in a swizzled pattern while Y-tiling is 16B by 4 rows
* regardless of image format size. As long as everything is aligned to 8,
* we can just treat the W-tiled image as Y-tiled, ignore the layout
diff --git a/src/intel/common/intel_decoder.c b/src/intel/common/intel_decoder.c
index 8c9d1eba56b..8290011764c 100644
--- a/src/intel/common/intel_decoder.c
+++ b/src/intel/common/intel_decoder.c
@@ -243,7 +243,7 @@ get_start_end_pos(int *start, int *end)
/* start value has to be mod with 32 as we need the relative
* start position in the first DWord. For the end position, add
* the length of the field to the start position to get the
- * relative postion in the 64 bit address.
+ * relative position in the 64 bit address.
*/
if (*end - *start > 32) {
int len = *end - *start;
diff --git a/src/intel/common/intel_gem.h b/src/intel/common/intel_gem.h
index 0f908797feb..c0cabcc879a 100644
--- a/src/intel/common/intel_gem.h
+++ b/src/intel/common/intel_gem.h
@@ -63,7 +63,7 @@ intel_48b_address(uint64_t v)
}
/**
- * Call ioctl, restarting if it is interupted
+ * Call ioctl, restarting if it is interrupted
*/
static inline int
intel_ioctl(int fd, unsigned long request, void *arg)
diff --git a/src/intel/common/intel_urb_config.c b/src/intel/common/intel_urb_config.c
index 2f7d72cc31f..04f25490dda 100644
--- a/src/intel/common/intel_urb_config.c
+++ b/src/intel/common/intel_urb_config.c
@@ -82,7 +82,7 @@ intel_get_urb_config(const struct intel_device_info *devinfo,
* Engine, the actual URB space available for operation is only 60KB
* (per bank). Similarly when URB space programmed is 128KB (per bank)
* for render engine, the actual URB space available for operation is
- * only 124KB (per bank). More detailed descripton available in "L3
+ * only 124KB (per bank). More detailed description available in "L3
* Cache" section of the B-Spec."
*/
if (devinfo->verx10 == 120) {
diff --git a/src/intel/common/tests/mi_builder_test.cpp b/src/intel/common/tests/mi_builder_test.cpp
index 4d0f868da6d..51021acc23b 100644
--- a/src/intel/common/tests/mi_builder_test.cpp
+++ b/src/intel/common/tests/mi_builder_test.cpp
@@ -603,7 +603,7 @@ TEST_F(mi_builder_test, add_imm)
mi_store(&b, out_mem64(88),
mi_iadd(&b, mi_inot(&b, mi_imm(add)), in_mem64(0)));
- // And som add_imm just for good measure
+ // And some add_imm just for good measure
mi_store(&b, out_mem64(96), mi_iadd_imm(&b, in_mem64(0), 0));
mi_store(&b, out_mem64(104), mi_iadd_imm(&b, in_mem64(0), add));
diff --git a/src/intel/compiler/brw_clip_tri.c b/src/intel/compiler/brw_clip_tri.c
index f6f7e307ddc..30d057cf31a 100644
--- a/src/intel/compiler/brw_clip_tri.c
+++ b/src/intel/compiler/brw_clip_tri.c
@@ -136,7 +136,7 @@ void brw_clip_tri_init_vertices( struct brw_clip_compile *c )
struct brw_codegen *p = &c->func;
struct brw_reg tmp0 = c->reg.loopcount; /* handy temporary */
- /* Initial list of indices for incoming vertexes:
+ /* Initial list of indices for incoming vertices:
*/
brw_AND(p, tmp0, get_element_ud(c->reg.R0, 2), brw_imm_ud(PRIM_MASK));
brw_CMP(p,
diff --git a/src/intel/compiler/brw_compile_sf.c b/src/intel/compiler/brw_compile_sf.c
index 7d57553887d..b8c8d8fcad7 100644
--- a/src/intel/compiler/brw_compile_sf.c
+++ b/src/intel/compiler/brw_compile_sf.c
@@ -38,7 +38,7 @@ struct brw_sf_compile {
struct brw_reg dy0;
struct brw_reg dy2;
- /* z and 1/w passed in seperately:
+ /* z and 1/w passed in separately:
*/
struct brw_reg z[3];
struct brw_reg inv_w[3];
@@ -161,7 +161,7 @@ static void do_twoside_color( struct brw_sf_compile *c )
/* Need to use BRW_EXECUTE_4 and also do an 4-wide compare in order
* to get all channels active inside the IF. In the clipping code
* we run with NoMask, so it's not an option and we can use
- * BRW_EXECUTE_1 for all comparisions.
+ * BRW_EXECUTE_1 for all comparisons.
*/
brw_CMP(p, vec4(brw_null_reg()), backface_conditional, c->det, brw_imm_f(0));
brw_IF(p, BRW_EXECUTE_4);
@@ -290,7 +290,7 @@ static void alloc_regs( struct brw_sf_compile *c )
c->dy0 = brw_vec1_grf(1, 5);
c->dy2 = brw_vec1_grf(1, 6);
- /* z and 1/w passed in seperately:
+ /* z and 1/w passed in separately:
*/
c->z[0] = brw_vec1_grf(2, 0);
c->inv_w[0] = brw_vec1_grf(2, 1);
@@ -376,7 +376,7 @@ calculate_masks(struct brw_sf_compile *c,
} else if (interp == INTERP_MODE_NOPERSPECTIVE)
*pc_linear = 0xf;
- /* Maybe only processs one attribute on the final round:
+ /* Maybe only process one attribute on the final round:
*/
if (vert_reg_to_varying(c, reg, 1) != BRW_VARYING_SLOT_COUNT) {
*pc |= 0xf0;
@@ -632,7 +632,7 @@ static void brw_emit_point_sprite_setup(struct brw_sf_compile *c, bool allocate)
*/
if (pc_coord_replace) {
set_predicate_control_flag_value(p, c, pc_coord_replace);
- /* Caculate 1.0/PointWidth */
+ /* Calculate 1.0/PointWidth */
gfx4_math(&c->func,
c->tmp,
BRW_MATH_FUNCTION_INV,
diff --git a/src/intel/compiler/brw_compiler.c b/src/intel/compiler/brw_compiler.c
index 0efb711c97a..601e60ed37b 100644
--- a/src/intel/compiler/brw_compiler.c
+++ b/src/intel/compiler/brw_compiler.c
@@ -151,7 +151,7 @@ brw_compiler_create(void *mem_ctx, const struct intel_device_info *devinfo)
if (!devinfo->has_64bit_int)
int64_options |= (nir_lower_int64_options)~0;
- /* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
+ /* The Bspec's section titled "Instruction_multiply[DevBDW+]" claims that
* destination type can be Quadword and source type Doubleword for Gfx8 and
* Gfx9. So, lower 64 bit multiply instruction on rest of the platforms.
*/
diff --git a/src/intel/compiler/brw_compiler.h b/src/intel/compiler/brw_compiler.h
index 62433320107..3f1743aaaec 100644
--- a/src/intel/compiler/brw_compiler.h
+++ b/src/intel/compiler/brw_compiler.h
@@ -311,7 +311,7 @@ struct brw_vs_prog_key {
*
* For each attribute, a combination of BRW_ATTRIB_WA_*.
*
- * For OpenGL, where we expose a maximum of 16 user input atttributes
+ * For OpenGL, where we expose a maximum of 16 user input attributes
* we only need up to VERT_ATTRIB_MAX slots, however, in Vulkan
* slots preceding VERT_ATTRIB_GENERIC0 are unused and we can
* expose up to 28 user input vertex attributes that are mapped to slots
@@ -1404,7 +1404,7 @@ struct brw_sf_prog_data {
uint32_t urb_read_length;
uint32_t total_grf;
- /* Each vertex may have upto 12 attributes, 4 components each,
+ /* Each vertex may have up to 12 attributes, 4 components each,
* except WPOS which requires only 2. (11*4 + 2) == 44 ==> 11
* rows.
*
diff --git a/src/intel/compiler/brw_eu_compact.c b/src/intel/compiler/brw_eu_compact.c
index 376eddd99da..e034d6f025f 100644
--- a/src/intel/compiler/brw_eu_compact.c
+++ b/src/intel/compiler/brw_eu_compact.c
@@ -2534,7 +2534,7 @@ brw_compact_instructions(struct brw_codegen *p, int start_offset,
dst = store + offset;
}
- /* If we didn't compact this intruction, we need to move it down into
+ /* If we didn't compact this instruction, we need to move it down into
* place.
*/
if (offset != src_offset) {
diff --git a/src/intel/compiler/brw_fs.cpp b/src/intel/compiler/brw_fs.cpp
index 2c426e3508e..ddb72c54b9a 100644
--- a/src/intel/compiler/brw_fs.cpp
+++ b/src/intel/compiler/brw_fs.cpp
@@ -1048,7 +1048,7 @@ unsigned
fs_inst::flags_written(const intel_device_info *devinfo) const
{
/* On Gfx4 and Gfx5, sel.l (for min) and sel.ge (for max) are implemented
- * using a separte cmpn and sel instruction. This lowering occurs in
+ * using a separate cmpn and sel instruction. This lowering occurs in
* fs_vistor::lower_minmax which is called very, very late.
*/
if ((conditional_mod && ((opcode != BRW_OPCODE_SEL || devinfo->ver <= 5) &&
@@ -1405,7 +1405,7 @@ fs_visitor::emit_sampleid_setup()
*/
/* SKL+ has an extra bit for the Starting Sample Pair Index to
- * accomodate 16x MSAA.
+ * accommodate 16x MSAA.
*/
abld.exec_all().group(1, 0)
.AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
diff --git a/src/intel/compiler/brw_fs_bank_conflicts.cpp b/src/intel/compiler/brw_fs_bank_conflicts.cpp
index 500c1a99f8f..ec8d699d7db 100644
--- a/src/intel/compiler/brw_fs_bank_conflicts.cpp
+++ b/src/intel/compiler/brw_fs_bank_conflicts.cpp
@@ -227,7 +227,7 @@ namespace {
}
/**
- * Substract two vectors with saturation.
+ * Subtract two vectors with saturation.
*/
vector_type
subs(vector_type v, vector_type w)
diff --git a/src/intel/compiler/brw_fs_cmod_propagation.cpp b/src/intel/compiler/brw_fs_cmod_propagation.cpp
index ed0c5aa0c70..3b38d0925a7 100644
--- a/src/intel/compiler/brw_fs_cmod_propagation.cpp
+++ b/src/intel/compiler/brw_fs_cmod_propagation.cpp
@@ -111,9 +111,9 @@ cmod_propagate_cmp_to_add(const intel_device_info *devinfo, bblock_t *block,
*
* For floating and unsigned types there two special cases,
* when we can remove inst even if scan_inst is saturated: G
- * and LE. Since conditional modifiers are just comparations
+ * and LE. Since conditional modifiers are just comparisons
* against zero, saturating positive values to the upper
- * limit never changes the result of comparation.
+ * limit never changes the result of comparison.
*
* For negative values:
* (sat(x) > 0) == (x > 0) --- false
diff --git a/src/intel/compiler/brw_fs_nir.cpp b/src/intel/compiler/brw_fs_nir.cpp
index 4117de8489e..a5aec283bc3 100644
--- a/src/intel/compiler/brw_fs_nir.cpp
+++ b/src/intel/compiler/brw_fs_nir.cpp
@@ -645,7 +645,7 @@ emit_find_msb_using_lzd(const fs_builder &bld,
* For all negative number cases, including 0x80000000 and
* 0xffffffff, the correct value is obtained from LZD if instead of
* negating the (already negative) value the logical-not is used. A
- * conditonal logical-not can be achieved in two instructions.
+ * conditional logical-not can be achieved in two instructions.
*/
temp = bld.vgrf(BRW_REGISTER_TYPE_D);
@@ -933,7 +933,7 @@ fs_visitor::emit_fsign(const fs_builder &bld, const nir_alu_instr *instr,
}
/**
- * Deteremine whether sources of a nir_op_fmul can be fused with a nir_op_fsign
+ * Determine whether sources of a nir_op_fmul can be fused with a nir_op_fsign
*
* Checks the operands of a \c nir_op_fmul to determine whether or not
* \c emit_fsign could fuse the multiplication with the \c sign() calculation.
@@ -1404,7 +1404,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
* turn that into a predicate. This leads us to an XOR.l instruction.
*
* Technically, according to the PRM, you're not allowed to use .l on a
- * XOR instruction. However, emperical experiments and Curro's reading
+ * XOR instruction. However, empirical experiments and Curro's reading
* of the simulator source both indicate that it's safe.
*/
fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
@@ -3696,7 +3696,7 @@ fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
/* Make a loop that sends a message to the pixel interpolater
* for the sample number in each live channel. If there are
* multiple channels with the same sample number then these
- * will be handled simultaneously with a single interation of
+ * will be handled simultaneously with a single iteration of
* the loop.
*/
bld.emit(BRW_OPCODE_DO);
@@ -6667,7 +6667,7 @@ setup_imm_df(const fs_builder &bld, double v)
if (devinfo->ver >= 8)
return brw_imm_df(v);
- /* gfx7.5 does not support DF immediates straighforward but the DIM
+ /* gfx7.5 does not support DF immediates straightforward but the DIM
* instruction allows to set the 64-bit immediate value.
*/
if (devinfo->platform == INTEL_PLATFORM_HSW) {
diff --git a/src/intel/compiler/brw_fs_reg_allocate.cpp b/src/intel/compiler/brw_fs_reg_allocate.cpp
index 9045e35fb7e..eb5db21eb00 100644
--- a/src/intel/compiler/brw_fs_reg_allocate.cpp
+++ b/src/intel/compiler/brw_fs_reg_allocate.cpp
@@ -560,7 +560,7 @@ fs_reg_alloc::setup_inst_interference(const fs_inst *inst)
*
* We are avoiding using grf127 as part of the destination of send
* messages adding a node interference to the grf127_send_hack_node.
- * This node has a fixed asignment to grf127.
+ * This node has a fixed assignment to grf127.
*
* We don't apply it to SIMD16 instructions because previous code avoids
* any register overlap between sources and destination.
@@ -570,7 +570,7 @@ fs_reg_alloc::setup_inst_interference(const fs_inst *inst)
ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
grf127_send_hack_node);
- /* Spilling instruction are genereated as SEND messages from MRF but as
+ /* Spilling instruction are generated as SEND messages from MRF but as
* Gfx7+ supports sending from GRF the driver will maps assingn these
* MRF registers to a GRF. Implementations reuses the dest of the send
* message as source. So as we will have an overlap for sure, we create
@@ -1141,7 +1141,7 @@ fs_reg_alloc::spill_reg(unsigned spill_reg)
spill_max_size(fs));
/* Spills should only write data initialized by the instruction for
- * whichever channels are enabled in the excution mask. If that's
+ * whichever channels are enabled in the execution mask. If that's
* not possible we'll have to emit a matching unspill before the
* instruction and set force_writemask_all on the spill.
*/
diff --git a/src/intel/compiler/brw_ir_vec4.h b/src/intel/compiler/brw_ir_vec4.h
index ec0952b4c04..78d34729c0b 100644
--- a/src/intel/compiler/brw_ir_vec4.h
+++ b/src/intel/compiler/brw_ir_vec4.h
@@ -94,7 +94,7 @@ add_byte_offset(backend_reg *reg, unsigned bytes)
}
}
-} /* namepace detail */
+} /* namespace detail */
static inline src_reg
byte_offset(src_reg reg, unsigned bytes)
diff --git a/src/intel/compiler/brw_nir_lower_shader_calls.c b/src/intel/compiler/brw_nir_lower_shader_calls.c
index 23a09c54473..9a7f330e70c 100644
--- a/src/intel/compiler/brw_nir_lower_shader_calls.c
+++ b/src/intel/compiler/brw_nir_lower_shader_calls.c
@@ -266,7 +266,7 @@ brw_nir_lower_shader_calls(nir_shader *shader)
* return to the caller.
*
* By default, our HW has the ability to handle the fact that a shader is not
- * available and will execute the next folowing shader in the tracing call.
+ * available and will execute the next following shader in the tracing call.
* For instance, a RAYGEN shader traces a ray, the tracing generates a hit,
* but there is no ANYHIT shader available. The HW should follow up by
* execution the CLOSESTHIT shader.
diff --git a/src/intel/compiler/brw_nir_lower_shading_rate_output.c b/src/intel/compiler/brw_nir_lower_shading_rate_output.c
index fc832b6cba1..7cecbe15b79 100644
--- a/src/intel/compiler/brw_nir_lower_shading_rate_output.c
+++ b/src/intel/compiler/brw_nir_lower_shading_rate_output.c
@@ -39,7 +39,7 @@
*
* According to the specification, the shading rate output can be read &
* written. A read after a write should report a different value if the
- * implemention decides on different primitive shading rate for some reason.
+ * implementation decides on different primitive shading rate for some reason.
* This is never the case in our implementation.
*/
diff --git a/src/intel/compiler/brw_nir_opt_peephole_ffma.c b/src/intel/compiler/brw_nir_opt_peephole_ffma.c
index fb5ff5c61f1..2d28daeddc7 100644
--- a/src/intel/compiler/brw_nir_opt_peephole_ffma.c
+++ b/src/intel/compiler/brw_nir_opt_peephole_ffma.c
@@ -79,7 +79,7 @@ get_mul_for_src(nir_alu_src *src, unsigned num_components,
nir_alu_instr *alu = nir_instr_as_alu(instr);
- /* We want to bail if any of the other ALU operations involved is labled
+ /* We want to bail if any of the other ALU operations involved is labeled
* exact. One reason for this is that, while the value that is changing is
* actually the result of the add and not the multiply, the intention of
* the user when they specify an exact multiply is that they want *that*
diff --git a/src/intel/compiler/brw_vec4.cpp b/src/intel/compiler/brw_vec4.cpp
index 7ee58fecad4..96978dbb317 100644
--- a/src/intel/compiler/brw_vec4.cpp
+++ b/src/intel/compiler/brw_vec4.cpp
@@ -2329,7 +2329,7 @@ vec4_visitor::apply_logical_swizzle(struct brw_reg *hw_reg,
* second half of a register and needs a vertical stride of 0 so we:
*
* 1. Don't violate register region restrictions.
- * 2. Activate the gfx7 instruction decompresion bug exploit when
+ * 2. Activate the gfx7 instruction decompression bug exploit when
* execsize > 4
*/
if (hw_reg->subnr % REG_SIZE == 16) {
@@ -2461,7 +2461,7 @@ vec4_visitor::run()
OPT(lower_64bit_mad_to_mul_add);
- /* Run this before payload setup because tesselation shaders
+ /* Run this before payload setup because tessellation shaders
* rely on it to prevent cross dvec2 regioning on DF attributes
* that are setup so that XY are on the second half of register and
* ZW are in the first half of the next.
diff --git a/src/intel/compiler/brw_vec4_copy_propagation.cpp b/src/intel/compiler/brw_vec4_copy_propagation.cpp
index 6c5dd28044b..717a7186b2c 100644
--- a/src/intel/compiler/brw_vec4_copy_propagation.cpp
+++ b/src/intel/compiler/brw_vec4_copy_propagation.cpp
@@ -389,7 +389,7 @@ try_copy_propagate(const struct intel_device_info *devinfo,
if (inst->is_send_from_grf())
return false;
- /* we can't generally copy-propagate UD negations becuse we
+ /* we can't generally copy-propagate UD negations because we
* end up accessing the resulting values as signed integers
* instead. See also resolve_ud_negate().
*/
diff --git a/src/intel/compiler/brw_vec4_generator.cpp b/src/intel/compiler/brw_vec4_generator.cpp
index 05b6f07f098..965216020fa 100644
--- a/src/intel/compiler/brw_vec4_generator.cpp
+++ b/src/intel/compiler/brw_vec4_generator.cpp
@@ -1992,7 +1992,7 @@ generate_code(struct brw_codegen *p,
brw_set_default_access_mode(p, BRW_ALIGN_1);
/* When converting from DF->F, we set destination's stride as 2 as an
- * aligment requirement. But in IVB/BYT, each DF implicitly writes
+ * alignment requirement. But in IVB/BYT, each DF implicitly writes
* two floats, being the first one the converted value. So we don't
* need to explicitly set stride 2, but 1.
*/
diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp
index 71dede7348b..10ff8a01387 100644
--- a/src/intel/compiler/brw_vec4_nir.cpp
+++ b/src/intel/compiler/brw_vec4_nir.cpp
@@ -285,7 +285,7 @@ setup_imm_df(const vec4_builder &bld, double v)
const intel_device_info *devinfo = bld.shader->devinfo;
assert(devinfo->ver == 7);
- /* gfx7.5 does not support DF immediates straighforward but the DIM
+ /* gfx7.5 does not support DF immediates straightforward but the DIM
* instruction allows to set the 64-bit immediate value.
*/
if (devinfo->verx10 == 75) {
@@ -851,7 +851,7 @@ emit_find_msb_using_lzd(const vec4_builder &bld,
* For all negative number cases, including 0x80000000 and
* 0xffffffff, the correct value is obtained from LZD if instead of
* negating the (already negative) value the logical-not is used. A
- * conditonal logical-not can be achieved in two instructions.
+ * conditional logical-not can be achieved in two instructions.
*/
temp = src_reg(bld.vgrf(BRW_REGISTER_TYPE_D));
@@ -1302,7 +1302,7 @@ vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
* turn that into a predicate. This leads us to an XOR.l instruction.
*
* Technically, according to the PRM, you're not allowed to use .l on a
- * XOR instruction. However, emperical experiments and Curro's reading
+ * XOR instruction. However, empirical experiments and Curro's reading
* of the simulator source both indicate that it's safe.
*/
src_reg tmp = src_reg(this, glsl_type::ivec4_type);
diff --git a/src/intel/compiler/gfx6_gs_visitor.cpp b/src/intel/compiler/gfx6_gs_visitor.cpp
index ef605bcf075..825ff7988ba 100644
--- a/src/intel/compiler/gfx6_gs_visitor.cpp
+++ b/src/intel/compiler/gfx6_gs_visitor.cpp
@@ -111,7 +111,7 @@ gfx6_gs_visitor::emit_prolog()
/* PrimitveID is delivered in r0.1 of the thread payload. If the program
* needs it we have to move it to a separate register where we can map
- * the atttribute.
+ * the attribute.
*
* Notice that we cannot use a virtual register for this, because we need to
* map all input attributes to hardware registers in setup_payload(),
@@ -155,7 +155,7 @@ gfx6_gs_visitor::gs_emit_vertex(int stream_id)
* each will generate a scratch write with the same offset into
* scratch space (thus, each one overwriting the previous). This is
* not what we want. What we will do instead is emit PSIZ to a
- * a regular temporary register, then move that resgister into the
+ * a regular temporary register, then move that register into the
* array. This way we only have one instruction with an array
* destination and we only produce a single scratch write.
*/
@@ -329,7 +329,7 @@ gfx6_gs_visitor::emit_thread_end()
}
/* Here we have to:
- * 1) Emit an FF_SYNC messsage to obtain an initial VUE handle.
+ * 1) Emit an FF_SYNC message to obtain an initial VUE handle.
* 2) Loop over all buffered vertex data and write it to corresponding
* URB entries.
* 3) Allocate new VUE handles for all vertices other than the first.
diff --git a/src/intel/dev/intel_device_info.c b/src/intel/dev/intel_device_info.c
index d807139b189..f97b11164eb 100644
--- a/src/intel/dev/intel_device_info.c
+++ b/src/intel/dev/intel_device_info.c
@@ -1190,7 +1190,7 @@ update_l3_banks(struct intel_device_info *devinfo)
/* At some point in time, some people decided to redefine what topology means,
* from useful HW related information (slice, subslice, etc...), to much less
- * useful generic stuff that noone cares about (a single slice with lots of
+ * useful generic stuff that no one cares about (a single slice with lots of
* subslices). Of course all of this was done without asking the people who
* defined the topology query in the first place, to solve a lack of
* information Gfx10+. This function is here to workaround the fact it's not
diff --git a/src/intel/dev/intel_device_info.h b/src/intel/dev/intel_device_info.h
index 379bb451adb..abfddd08c32 100644
--- a/src/intel/dev/intel_device_info.h
+++ b/src/intel/dev/intel_device_info.h
@@ -355,7 +355,7 @@ struct intel_device_info
* SKL (or scale factor of 83.33333333) and a frequency of 19200000Hz for
* BXT.
*
- * For simplicty to fit with the current code scaling by a single constant
+ * For simplicity to fit with the current code scaling by a single constant
* to map from raw timestamps to nanoseconds we now do the conversion in
* floating point instead of integer arithmetic.
*
diff --git a/src/intel/genxml/gen11.xml b/src/intel/genxml/gen11.xml
index 3112654c1c7..cdf4a508682 100644
--- a/src/intel/genxml/gen11.xml
+++ b/src/intel/genxml/gen11.xml
@@ -5517,7 +5517,7 @@
-
+
diff --git a/src/intel/genxml/gen12.xml b/src/intel/genxml/gen12.xml
index 412adf1286e..b839605035d 100644
--- a/src/intel/genxml/gen12.xml
+++ b/src/intel/genxml/gen12.xml
@@ -5692,7 +5692,7 @@
-
+
diff --git a/src/intel/genxml/gen125.xml b/src/intel/genxml/gen125.xml
index 1125d0cbf65..ba3ba2a1d8f 100644
--- a/src/intel/genxml/gen125.xml
+++ b/src/intel/genxml/gen125.xml
@@ -6023,7 +6023,7 @@
-
+
diff --git a/src/intel/genxml/gen9.xml b/src/intel/genxml/gen9.xml
index a009fe28331..1ec968a0aad 100644
--- a/src/intel/genxml/gen9.xml
+++ b/src/intel/genxml/gen9.xml
@@ -5156,7 +5156,7 @@
-
+
diff --git a/src/intel/genxml/gen_sort_tags.py b/src/intel/genxml/gen_sort_tags.py
index fef63ac9ec1..b320b76310f 100755
--- a/src/intel/genxml/gen_sort_tags.py
+++ b/src/intel/genxml/gen_sort_tags.py
@@ -116,7 +116,7 @@ def process(filename):
enum_dict[e.attrib['name']] = e
# Structs are a bit annoying because they can refer to each other. We sort
- # them alphabetically and then build a graph of depedencies. Finally we go
+ # them alphabetically and then build a graph of dependencies. Finally we go
# through the alphabetically sorted list and print out dependencies first.
structs = sorted(xml.findall('./struct'), key=get_name)
wrapped_struct_dict = {}
diff --git a/src/intel/isl/README b/src/intel/isl/README
index a6573e10d18..7a12217b1dd 100644
--- a/src/intel/isl/README
+++ b/src/intel/isl/README
@@ -102,7 +102,7 @@ Errata
ISL acquired the term 'surface element' from the Broadwell PRM [1], which
defines it as follows:
- An element is defined as a pixel in uncompresed surface formats, and as
+ An element is defined as a pixel in uncompressed surface formats, and as
a compression block in compressed surface formats. For MSFMT_DEPTH_STENCIL
type multisampled surfaces, an element is a sample.
diff --git a/src/intel/isl/gen_format_layout.py b/src/intel/isl/gen_format_layout.py
index dfc8a7e1825..72c8ad8f038 100644
--- a/src/intel/isl/gen_format_layout.py
+++ b/src/intel/isl/gen_format_layout.py
@@ -165,7 +165,7 @@ class Channel(object):
class Format(object):
- """Class taht contains all values needed by the template."""
+ """Class that contains all values needed by the template."""
def __init__(self, line):
# pylint: disable=invalid-name
self.name = line[0].strip()
diff --git a/src/intel/isl/isl.c b/src/intel/isl/isl.c
index ca8d26b78c7..14a53493b1f 100644
--- a/src/intel/isl/isl.c
+++ b/src/intel/isl/isl.c
@@ -680,7 +680,7 @@ isl_surf_choose_tiling(const struct isl_device *dev,
#undef CHOOSE
- /* No tiling mode accomodates the inputs. */
+ /* No tiling mode accommodates the inputs. */
return false;
}
@@ -877,7 +877,7 @@ isl_choose_image_alignment_el(const struct isl_device *dev,
*image_align_el = isl_extent3d(1, 1, 1);
} else if (ISL_GFX_VER(dev) < 12) {
/* On gfx7+, HiZ surfaces are always aligned to 16x8 pixels in the
- * primary surface which works out to 2x2 HiZ elments.
+ * primary surface which works out to 2x2 HiZ elements.
*/
*image_align_el = isl_extent3d(2, 2, 1);
} else {
@@ -1268,11 +1268,11 @@ isl_calc_phys_slice0_extent_sa_gfx4_2d(
* alignment here is safe because we later align the row pitch and array
* pitch to the tile boundary. It is safe even for
* ISL_MSAA_LAYOUT_INTERLEAVED, because phys_level0_sa is already scaled
- * to accomodate the interleaved samples.
+ * to accommodate the interleaved samples.
*
* For linear surfaces, reducing the alignment here permits us to later
* choose an arbitrary, non-aligned row pitch. If the surface backs
- * a VkBuffer, then an arbitrary pitch may be needed to accomodate
+ * a VkBuffer, then an arbitrary pitch may be needed to accommodate
* VkBufferImageCopy::bufferRowLength.
*/
*phys_slice0_sa = (struct isl_extent2d) {
@@ -2062,7 +2062,7 @@ isl_surf_get_hiz_surf(const struct isl_device *dev,
* from Sandy Bridge through Broadwell, HiZ compresses samples in the
* primary depth surface. On Sky Lake and onward, HiZ compresses pixels.
*
- * There are a number of different ways that this discrepency could be
+ * There are a number of different ways that this discrepancy could be
* handled. The way we have chosen is to simply make MSAA HiZ have the
* same number of samples as the parent surface pre-Sky Lake and always be
* single-sampled on Sky Lake and above. Since the block sizes of
diff --git a/src/intel/isl/isl.h b/src/intel/isl/isl.h
index 9a401e5c323..238841c277b 100644
--- a/src/intel/isl/isl.h
+++ b/src/intel/isl/isl.h
@@ -1161,7 +1161,7 @@ typedef uint32_t isl_sample_count_mask_t;
*/
enum isl_msaa_layout {
/**
- * @brief Suface is single-sampled.
+ * @brief Surface is single-sampled.
*/
ISL_MSAA_LAYOUT_NONE,
@@ -1389,7 +1389,7 @@ struct isl_tile_info {
/**
* The physical size of the tile in bytes and rows of bytes
*
- * This field determines how the tiles of a surface are physically layed
+ * This field determines how the tiles of a surface are physically laid
* out in memory. The logical and physical tile extent are frequently the
* same but this is not always the case. For instance, a W-tile (which is
* always used with ISL_FORMAT_R8) has a logical size of 64el x 64el but
@@ -1642,7 +1642,7 @@ struct isl_surf_fill_state_info {
uint32_t mocs;
/**
- * The auxilary surface or NULL if no auxilary surface is to be used.
+ * The auxiliary surface or NULL if no auxiliary surface is to be used.
*/
const struct isl_surf *aux_surf;
enum isl_aux_usage aux_usage;
diff --git a/src/intel/isl/isl_emit_depth_stencil.c b/src/intel/isl/isl_emit_depth_stencil.c
index 7bd6375cde5..07f430112ad 100644
--- a/src/intel/isl/isl_emit_depth_stencil.c
+++ b/src/intel/isl/isl_emit_depth_stencil.c
@@ -217,7 +217,7 @@ isl_genX(emit_depth_stencil_hiz_s)(const struct isl_device *dev, void *batch,
* to match the depth-buffer value for `Depth`. It may be a
* documentation bug, since the other fields don't require this.
*
- * TODO: Confirm documentation and remove seeting of `Depth` if not
+ * TODO: Confirm documentation and remove setting of `Depth` if not
* required.
*/
sb.Depth = db.Depth;
@@ -274,7 +274,7 @@ isl_genX(emit_depth_stencil_hiz_s)(const struct isl_device *dev, void *batch,
* value of RENDER_SURFACE_STATE::AuxiliarySurfaceMode say:
*
* "If Number of multisamples > 1, programming this value means MSAA
- * compression is enabled for that surface. Auxillary surface is MSC
+ * compression is enabled for that surface. Auxiliary surface is MSC
* with tile y."
*
* Since this interpretation ignores whether the surface is
diff --git a/src/intel/isl/isl_gfx7.c b/src/intel/isl/isl_gfx7.c
index e949110ed52..ee3bf268a91 100644
--- a/src/intel/isl/isl_gfx7.c
+++ b/src/intel/isl/isl_gfx7.c
@@ -113,7 +113,7 @@ isl_gfx7_choose_msaa_layout(const struct isl_device *dev,
return false;
/* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
- * Suface Storage Format:
+ * Surface Storage Format:
*
* +---------------------+----------------------------------------------------------------+
* | MSFMT_MSS | Multsampled surface was/is rendered as a render target |
@@ -128,7 +128,7 @@ isl_gfx7_choose_msaa_layout(const struct isl_device *dev,
require_interleaved = true;
/* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
- * Suface Storage Format:
+ * Surface Storage Format:
*
* If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8, Width
* is >= 8192 (meaning the actual surface width is >= 8193 pixels), this
@@ -138,7 +138,7 @@ isl_gfx7_choose_msaa_layout(const struct isl_device *dev,
require_array = true;
/* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
- * Suface Storage Format:
+ * Surface Storage Format:
*
* If the surface’s Number of Multisamples is MULTISAMPLECOUNT_8,
* ((Depth+1) * (Height+1)) is > 4,194,304, OR if the surface’s Number
@@ -150,7 +150,7 @@ isl_gfx7_choose_msaa_layout(const struct isl_device *dev,
require_interleaved = true;
/* From the Ivybridge PRM, Volume 4 Part 1 p72, SURFACE_STATE, Multisampled
- * Suface Storage Format:
+ * Surface Storage Format:
*
* This field must be set to MSFMT_DEPTH_STENCIL if Surface Format is
* one of the following: I24X8_UNORM, L24X8_UNORM, A24X8_UNORM, or
diff --git a/src/intel/isl/isl_gfx9.c b/src/intel/isl/isl_gfx9.c
index 286daa77234..c5674ece017 100644
--- a/src/intel/isl/isl_gfx9.c
+++ b/src/intel/isl/isl_gfx9.c
@@ -132,7 +132,7 @@ isl_gfx9_choose_image_alignment_el(const struct isl_device *dev,
*
* - For Sampling Engine and Render Target Surfaces: This field
* specifies the vertical alignment requirement in elements for the
- * surface. [...] An element is defined as a pixel in uncompresed
+ * surface. [...] An element is defined as a pixel in uncompressed
* surface formats, and as a compression block in compressed surface
* formats. For MSFMT_DEPTH_STENCIL type multisampled surfaces, an
* element is a sample.
@@ -144,7 +144,7 @@ isl_gfx9_choose_image_alignment_el(const struct isl_device *dev,
*
* See the appropriate Alignment table in the "Surface Layout and
* Tiling" section under Common Surface Formats for the table of
- * alignment values for Tiled Resrouces.
+ * alignment values for Tiled Resources.
*
* - For uncompressed surfaces, the units of "j" are rows of pixels on
* the physical surface. For compressed texture formats, the units of
diff --git a/src/intel/isl/isl_surface_state.c b/src/intel/isl/isl_surface_state.c
index 914889d8a90..8ff98ffa14a 100644
--- a/src/intel/isl/isl_surface_state.c
+++ b/src/intel/isl/isl_surface_state.c
@@ -575,7 +575,7 @@ isl_genX(surf_fill_state_s)(const struct isl_device *dev, void *state,
* say:
*
* "If Number of multisamples > 1, programming this value means
- * MSAA compression is enabled for that surface. Auxillary surface
+ * MSAA compression is enabled for that surface. Auxiliary surface
* is MSC with tile y."
*
* Since this interpretation ignores whether the surface is
@@ -656,7 +656,7 @@ isl_genX(surf_fill_state_s)(const struct isl_device *dev, void *state,
#endif
}
- /* The auxiliary buffer info is filled when it's useable by the HW.
+ /* The auxiliary buffer info is filled when it's usable by the HW.
*
* Starting with Gfx12, the only form of compression that can be used
* with RENDER_SURFACE_STATE which requires an aux surface is MCS.
@@ -710,7 +710,7 @@ isl_genX(surf_fill_state_s)(const struct isl_device *dev, void *state,
/* From the SKL PRM, Programming Note under Sampler Output Channel
* Mapping:
*
- * If a surface has an associated HiZ Auxilliary surface, the
+ * If a surface has an associated HiZ Auxiliary surface, the
* Sampler L2 Bypass Mode Disable field in the RENDER_SURFACE_STATE
* must be set.
*/
@@ -796,7 +796,7 @@ isl_genX(buffer_fill_state_s)(const struct isl_device *dev, void *state,
uint64_t buffer_size = info->size_B;
/* Uniform and Storage buffers need to have surface size not less that the
- * aligned 32-bit size of the buffer. To calculate the array lenght on
+ * aligned 32-bit size of the buffer. To calculate the array length on
* unsized arrays in StorageBuffer the last 2 bits store the padding size
* added to the surface, so we can calculate latter the original buffer
* size to know the number of elements.
diff --git a/src/intel/isl/tests/isl_surf_get_image_offset_test.c b/src/intel/isl/tests/isl_surf_get_image_offset_test.c
index 80bc29864f1..f2ab12520b5 100644
--- a/src/intel/isl/tests/isl_surf_get_image_offset_test.c
+++ b/src/intel/isl/tests/isl_surf_get_image_offset_test.c
@@ -32,7 +32,7 @@
#define BDW_GT2_DEVID 0x161a
-// An asssert that works regardless of NDEBUG.
+// An assert that works regardless of NDEBUG.
#define t_assert(cond) \
do { \
if (!(cond)) { \
diff --git a/src/intel/perf/intel_perf.c b/src/intel/perf/intel_perf.c
index a5ed622439f..d70eccf7a15 100644
--- a/src/intel/perf/intel_perf.c
+++ b/src/intel/perf/intel_perf.c
@@ -1204,7 +1204,7 @@ intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result
start + field->location,
end + field->location);
/* no_oa_accumulate=true is used when doing GL perf queries, we
- * manually parse the OA reports from the OA buffer and substract
+ * manually parse the OA reports from the OA buffer and subtract
* unrelated deltas, so don't accumulate the begin/end reports here.
*/
if (!no_oa_accumulate) {
diff --git a/src/intel/perf/oa-bdw.xml b/src/intel/perf/oa-bdw.xml
index 247fec4f678..215167b75da 100644
--- a/src/intel/perf/oa-bdw.xml
+++ b/src/intel/perf/oa-bdw.xml
@@ -7705,7 +7705,7 @@
0) {
/* First return divisor aligned and sized chunks. We start returning
- * larger blocks from the end fo the chunk, since they should already be
+ * larger blocks from the end of the chunk, since they should already be
* aligned to divisor. Also anv_state_pool_return_blocks() only accepts
* aligned chunks.
*/
@@ -1102,7 +1102,7 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
alloc_size,
pool->block_size,
&padding);
- /* Everytime we allocate a new state, add it to the state pool */
+ /* Every time we allocate a new state, add it to the state pool */
uint32_t idx;
UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
assert(result == VK_SUCCESS);
diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c
index 7b9faf94e49..246a92dce15 100644
--- a/src/intel/vulkan/anv_batch_chain.c
+++ b/src/intel/vulkan/anv_batch_chain.c
@@ -442,14 +442,14 @@ anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
static VkResult
anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
- struct anv_batch *batch, size_t aditional,
+ struct anv_batch *batch, size_t additional,
size_t batch_padding)
{
assert(batch->start == bbo->bo->map);
bbo->length = batch->next - batch->start;
size_t new_size = bbo->bo->size;
- while (new_size <= bbo->length + aditional + batch_padding)
+ while (new_size <= bbo->length + additional + batch_padding)
new_size *= 2;
if (new_size == bbo->bo->size)
@@ -571,7 +571,7 @@ static void
emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
struct anv_bo *bo, uint32_t offset)
{
- /* In gfx8+ the address field grew to two dwords to accomodate 48 bit
+ /* In gfx8+ the address field grew to two dwords to accommodate 48 bit
* offsets. The high 16 bits are in the last dword, so we can use the gfx8
* version in either case, as long as we set the instruction length in the
* header accordingly. This means that we always emit three dwords here
@@ -1689,7 +1689,7 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
/* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
* will get added automatically by processing relocations on the batch
* buffer. We have to add the surface state BO manually because it has
- * relocations of its own that we need to be sure are processsed.
+ * relocations of its own that we need to be sure are processed.
*/
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
ss_pool->block_pool.bo,
@@ -2060,7 +2060,7 @@ anv_queue_exec_utrace_locked(struct anv_queue *queue,
* with our list of BOs out of sync with our list of gem handles.
*
* 2) The algorithm we use for building the list of unique buffers isn't
- * thread-safe. While the client is supposed to syncronize around
+ * thread-safe. While the client is supposed to synchronize around
* QueueSubmit, this would be extremely difficult to debug if it ever came
* up in the wild due to a broken app. It's better to play it safe and
* just lock around QueueSubmit.
diff --git a/src/intel/vulkan/anv_descriptor_set.c b/src/intel/vulkan/anv_descriptor_set.c
index 64a0f899c3f..3b0648ec0d9 100644
--- a/src/intel/vulkan/anv_descriptor_set.c
+++ b/src/intel/vulkan/anv_descriptor_set.c
@@ -436,7 +436,7 @@ VkResult anv_CreateDescriptorSetLayout(
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
}
- /* We need to allocate decriptor set layouts off the device allocator
+ /* We need to allocate descriptor set layouts off the device allocator
* with DEVICE scope because they are reference counted and may not be
* destroyed when vkDestroyDescriptorSetLayout is called.
*/
@@ -846,11 +846,11 @@ void anv_DestroyPipelineLayout(
* view surface state. The spec allows us to fail to allocate due to
* fragmentation in all cases but two: 1) after pool reset, allocating up
* until the pool size with no freeing must succeed and 2) allocating and
- * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
+ * freeing only descriptor sets with the same layout. Case 1) is easy enough,
* and the free lists lets us recycle blocks for case 2).
*/
-/* The vma heap reserves 0 to mean NULL; we have to offset by some ammount to
+/* The vma heap reserves 0 to mean NULL; we have to offset by some amount to
* ensure we can allocate the entire BO without hitting zero. The actual
* amount doesn't matter.
*/
diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index 0bde811a1df..0d7d7dfface 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -4662,7 +4662,7 @@ VkResult anv_GetCalibratedTimestampsEXT(
* clock edges is when the sampled clock with the largest period is
* sampled at the end of that period but right at the beginning of the
* sampling interval and some other clock is sampled right at the
- * begining of its sampling period and right at the end of the
+ * beginning of its sampling period and right at the end of the
* sampling interval. Let's assume the GPU has the longest clock
* period and that the application is sampling GPU and monotonic:
*
diff --git a/src/intel/vulkan/anv_image.c b/src/intel/vulkan/anv_image.c
index 124b547e807..75840c3a5ac 100644
--- a/src/intel/vulkan/anv_image.c
+++ b/src/intel/vulkan/anv_image.c
@@ -326,7 +326,7 @@ add_surface(struct anv_device *device,
*
* If hardware limitations force us to use a shadow surface, then the same
* limitations may also constrain the tiling of the primary surface; therefore
- * paramater @a inout_primary_tiling_flags.
+ * parameter @a inout_primary_tiling_flags.
*
* If the image plane is a separate stencil plane and if the user provided
* VkImageStencilUsageCreateInfoEXT, then @a usage must be stencilUsage.
@@ -1804,7 +1804,7 @@ VkResult anv_BindImageMemory2(
}
case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR: {
/* Ignore this struct on Android, we cannot access swapchain
- * structures threre.
+ * structures there.
*/
#ifndef VK_USE_PLATFORM_ANDROID_KHR
const VkBindImageMemorySwapchainInfoKHR *swapchain_info =
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index db73bd9842b..7623c78a22e 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -476,7 +476,7 @@ struct anv_bo {
uint32_t refcount;
/* Index into the current validation list. This is used by the
- * validation list building alrogithm to track which buffers are already
+ * validation list building algorithm to track which buffers are already
* in the validation list so that we can ensure uniqueness.
*/
uint32_t exec_obj_index;
@@ -1824,7 +1824,7 @@ struct anv_descriptor_set_binding_layout {
*/
uint32_t array_size;
- /* Index into the flattend descriptor set */
+ /* Index into the flattened descriptor set */
uint32_t descriptor_index;
/* Index into the dynamic state array for a dynamic buffer */
@@ -2971,7 +2971,7 @@ struct anv_cmd_state {
*/
bool hiz_enabled;
- /* We ensure the registers for the gfx12 D16 fix are initalized at the
+ /* We ensure the registers for the gfx12 D16 fix are initialized at the
* first non-NULL depth stencil packet emission of every command buffer.
* For secondary command buffer execution, we transfer the state from the
* last command buffer to the primary (if known).
diff --git a/src/intel/vulkan/genX_blorp_exec.c b/src/intel/vulkan/genX_blorp_exec.c
index d216b2c55c8..eb749fde6b6 100644
--- a/src/intel/vulkan/genX_blorp_exec.c
+++ b/src/intel/vulkan/genX_blorp_exec.c
@@ -288,7 +288,7 @@ blorp_exec_on_render(struct blorp_batch *batch,
#if GFX_VER >= 11
/* The PIPE_CONTROL command description says:
*
- * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
+ * "Whenever a Binding Table Index (BTI) used by a Render Target Message
* points to a different RENDER_SURFACE_STATE, SW must issue a Render
* Target Cache Flush by enabling this bit. When render target flush
* is set due to new association of BTI, PS Scoreboard Stall bit must
@@ -321,7 +321,7 @@ blorp_exec_on_render(struct blorp_batch *batch,
#if GFX_VER >= 11
/* The PIPE_CONTROL command description says:
*
- * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
+ * "Whenever a Binding Table Index (BTI) used by a Render Target Message
* points to a different RENDER_SURFACE_STATE, SW must issue a Render
* Target Cache Flush by enabling this bit. When render target flush
* is set due to new association of BTI, PS Scoreboard Stall bit must
diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c
index 2cafe874d3f..e1cb33380f2 100644
--- a/src/intel/vulkan/genX_cmd_buffer.c
+++ b/src/intel/vulkan/genX_cmd_buffer.c
@@ -120,7 +120,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
/* Emit a render target cache flush.
*
* This isn't documented anywhere in the PRM. However, it seems to be
- * necessary prior to changing the surface state base adress. Without
+ * necessary prior to changing the surface state base address. Without
* this, we get GPU hangs when using multi-level command buffers which
* clear depth, reset state base address, and then go render stuff.
*/
@@ -237,7 +237,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
#endif /* GFX_VERx10 < 125 */
/* After re-setting the surface state base address, we have to do some
- * cache flusing so that the sampler engine will pick up the new
+ * cache flushing so that the sampler engine will pick up the new
* SURFACE_STATE objects and binding tables. From the Broadwell PRM,
* Shared Function > 3D Sampler > State > State Caching (page 96):
*
@@ -1678,7 +1678,7 @@ genX(BeginCommandBuffer)(
}
/* We send an "Indirect State Pointers Disable" packet at
- * EndCommandBuffer, so all push contant packets are ignored during a
+ * EndCommandBuffer, so all push constant packets are ignored during a
* context restore. Documentation says after that command, we need to
* emit push constants again before any rendering operation. So we
* flag them dirty here to make sure they get emitted.
@@ -6926,7 +6926,7 @@ void genX(CmdBeginRendering)(
#if GFX_VER >= 11
/* The PIPE_CONTROL command description says:
*
- * "Whenever a Binding Table Index (BTI) used by a Render Taget Message
+ * "Whenever a Binding Table Index (BTI) used by a Render Target Message
* points to a different RENDER_SURFACE_STATE, SW must issue a Render
* Target Cache Flush by enabling this bit. When render target flush
* is set due to new association of BTI, PS Scoreboard Stall bit must
diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c
index a71a71446a0..bcee8787a79 100644
--- a/src/intel/vulkan/genX_pipeline.c
+++ b/src/intel/vulkan/genX_pipeline.c
@@ -476,7 +476,7 @@ emit_3dstate_sbe(struct anv_graphics_pipeline *pipeline)
continue;
}
- /* We have to subtract two slots to accout for the URB entry output
+ /* We have to subtract two slots to account for the URB entry output
* read offset in the VS and GS stages.
*/
const int source_attr = slot - 2 * urb_entry_read_offset;