broadcom: fix typos

Reviewed-by: Alejandro Piñeiro <apinheiro@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22591>
This commit is contained in:
Harri Nieminen
2023-04-20 13:15:05 +03:00
committed by Marge Bot
parent 177c92fe7c
commit c3c63cb1d8
28 changed files with 47 additions and 47 deletions

View File

@@ -508,7 +508,7 @@ spec@oes_shader_io_blocks@compiler@layout-location-aliasing.vert,Fail
# https://gitlab.freedesktop.org/mesa/piglit/-/merge_requests/800
spec@!opengl es 3.0@gles-3.0-transform-feedback-uniform-buffer-object,Fail
# Precission differences between expected and obtained; works if
# Precision differences between expected and obtained; works if
# exporting V3D_DEBUG=tmu32.
spec@oes_texture_view@rendering-formats,Fail
spec@oes_texture_view@rendering-formats@clear GL_R8 as GL_R8I,Fail
@@ -521,7 +521,7 @@ spec@oes_texture_view@rendering-formats@clear GL_RGBA8 as GL_RG16F,Fail
spec@oes_texture_view@rendering-formats@clear GL_RGBA8 as GL_RG16I,Fail
spec@oes_texture_view@rendering-formats@clear GL_RGBA8 as GL_RGBA8I,Fail
# Also related with precission issues
# Also related with precision issues
spec@oes_texture_view@rendering-formats@clear GL_RGB10_A2 as GL_R32F,Fail
spec@oes_texture_view@rendering-formats@clear GL_RGB10_A2 as GL_R32I,Fail
spec@oes_texture_view@rendering-formats@clear GL_RGB10_A2 as GL_RG16F,Fail

View File

@@ -299,7 +299,7 @@
<value name="packed complete patches" value="2"/>
</enum>
<enum name="Primitve counters" prefix="V3D_PRIM_COUNTS">
<enum name="Primitive counters" prefix="V3D_PRIM_COUNTS">
<value name="tf_words_buffer0" value="0"/>
<value name="tf_words_buffer1" value="1"/>
<value name="tf_words_buffer2" value="2"/>

View File

@@ -49,7 +49,7 @@
#define V3D_MAX_BUFFER_RANGE (1 << 30)
/* Sub-pixel precission bits in the rasterizer */
/* Sub-pixel precision bits in the rasterizer */
#define V3D_COORD_SHIFT 6
/* Size of a cache line */

View File

@@ -41,7 +41,7 @@ static const char *v3d_performance_counters[][3] = {
{"TLB", "TLB-quads-written-to-color-buffer", "[TLB] Quads with valid pixels written to colour buffer"},
{"PTB", "PTB-primitives-discarded-outside-viewport", "[PTB] Primitives discarded by being outside the viewport"},
{"PTB", "PTB-primitives-need-clipping", "[PTB] Primitives that need clipping"},
{"PTB", "PTB-primitives-discared-reversed", "[PTB] Primitives that are discarded because they are reversed"},
{"PTB", "PTB-primitives-discarded-reversed", "[PTB] Primitives that are discarded because they are reversed"},
{"QPU", "QPU-total-idle-clk-cycles", "[QPU] Total idle clock cycles for all QPUs"},
{"QPU", "QPU-total-active-clk-cycles-vertex-coord-shading", "[QPU] Total active clock cycles for all QPUs doing vertex/coordinate/user shading (counts only when QPU is not stalled)"},
{"QPU", "QPU-total-active-clk-cycles-fragment-shading", "[QPU] Total active clock cycles for all QPUs doing fragment shading (counts only when QPU is not stalled)"},

View File

@@ -164,7 +164,7 @@ vir_emit_thrsw(struct v3d_compile *c)
c->last_thrsw->qpu.sig.thrsw = true;
c->last_thrsw_at_top_level = !c->in_control_flow;
/* We need to lock the scoreboard before any tlb acess happens. If this
/* We need to lock the scoreboard before any tlb access happens. If this
* thread switch comes after we have emitted a tlb load, then it means
* that we can't lock on the last thread switch any more.
*/
@@ -304,7 +304,7 @@ ntq_flush_tmu(struct v3d_compile *c)
/**
* Queues a pending thread switch + LDTMU/TMUWT for a TMU operation. The caller
* is reponsible for ensuring that doing this doesn't overflow the TMU fifos,
* is responsible for ensuring that doing this doesn't overflow the TMU fifos,
* and more specifically, the output fifo, since that can't stall.
*/
void
@@ -1741,7 +1741,7 @@ ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr)
/* Each TLB read/write setup (a render target or depth buffer) takes an 8-bit
* specifier. They come from a register that's preloaded with 0xffffffff
* (0xff gets you normal vec4 f16 RT0 writes), and when one is neaded the low
* (0xff gets you normal vec4 f16 RT0 writes), and when one is needed the low
* 8 bits are shifted off the bottom and 0xff shifted in from the top.
*/
#define TLB_TYPE_F16_COLOR (3 << 6)
@@ -2555,7 +2555,7 @@ vir_emit_tlb_color_read(struct v3d_compile *c, nir_intrinsic_instr *instr)
*
* To fix that, we make sure we always emit a thread switch before the
* first tlb color read. If that happens to be the last thread switch
* we emit, then everything is fine, but otherwsie, if any code after
* we emit, then everything is fine, but otherwise, if any code after
* this point needs to emit additional thread switches, then we will
* switch the strategy to locking the scoreboard on the first thread
* switch instead -- see vir_emit_thrsw().

View File

@@ -629,7 +629,7 @@ mux_read_stalls(struct choose_scoreboard *scoreboard,
}
/* We define a max schedule priority to allow negative priorities as result of
* substracting this max when an instruction stalls. So instructions that
* subtracting this max when an instruction stalls. So instructions that
* stall have lower priority than regular instructions. */
#define MAX_SCHEDULE_PRIORITY 16
@@ -1196,13 +1196,13 @@ retry:
if (pixel_scoreboard_too_soon(c, scoreboard, inst))
continue;
/* When we succesfully pair up an ldvary we then try
/* When we successfully pair up an ldvary we then try
* to merge it into the previous instruction if
* possible to improve pipelining. Don't pick up the
* ldvary now if the follow-up fixup would place
* it in the delay slots of a thrsw, which is not
* allowed and would prevent the fixup from being
* successul.
* successful.
*/
if (inst->sig.ldvary &&
scoreboard->last_thrsw_tick + 2 >= scoreboard->tick - 1) {
@@ -1687,7 +1687,7 @@ qpu_inst_after_thrsw_valid_in_delay_slot(struct v3d_compile *c,
assert(slot <= 2);
/* We merge thrsw instructions back into the instruction stream
* manually, so any instructions scheduled after a thrsw shold be
* manually, so any instructions scheduled after a thrsw should be
* in the actual delay slots and not in the same slot as the thrsw.
*/
assert(slot >= 1);
@@ -2122,7 +2122,7 @@ fixup_pipelined_ldvary(struct v3d_compile *c,
struct qblock *block,
struct v3d_qpu_instr *inst)
{
/* We only call this if we have successfuly merged an ldvary into a
/* We only call this if we have successfully merged an ldvary into a
* previous instruction.
*/
assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
@@ -2209,7 +2209,7 @@ fixup_pipelined_ldvary(struct v3d_compile *c,
/* By moving ldvary to the previous instruction we make it update
* r5 in the current one, so nothing else in it should write r5.
* This should've been prevented by our depedency tracking, which
* This should've been prevented by our dependency tracking, which
* would not allow ldvary to be paired up with an instruction that
* writes r5 (since our dependency tracking doesn't know that the
* ldvary write r5 happens in the next instruction).

View File

@@ -128,7 +128,7 @@ qpu_validate_inst(struct v3d_qpu_validate_state *state, struct qinst *qinst)
*
* FIXME: This would not check correctly for V3D 4.2 versions lower
* than V3D 4.2.14, but that is not a real issue because the simulator
* will still catch this, and we are not really targetting any such
* will still catch this, and we are not really targeting any such
* versions anyway.
*/
if (state->c->devinfo->ver < 42) {

View File

@@ -35,7 +35,7 @@ v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
{
/* FIXME: We don't bother implementing pipelining for texture reads
* for any pre 4.x hardware. It should be straight forward to do but
* we are not really testing or even targetting this hardware at
* we are not really testing or even targeting this hardware at
* present.
*/
ntq_flush_tmu(c);

View File

@@ -87,7 +87,7 @@ enum qfile {
/** A physical register, such as the W coordinate payload. */
QFILE_REG,
/** One of the regsiters for fixed function interactions. */
/** One of the registers for fixed function interactions. */
QFILE_MAGIC,
/**
@@ -490,7 +490,7 @@ struct v3d_vs_key {
bool clamp_color;
};
/** A basic block of VIR intructions. */
/** A basic block of VIR instructions. */
struct qblock {
struct list_head link;

View File

@@ -216,7 +216,7 @@ v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
}
/* Scalarize outputs if it hasn't happened already, since we want to
* schedule each VPM write individually. We can skip any outut
* schedule each VPM write individually. We can skip any output
* components not read by the FS.
*/
for (int i = 0; i < intr->num_components; i++) {
@@ -304,7 +304,7 @@ v3d_nir_lower_end_primitive(struct v3d_compile *c, nir_builder *b,
* doesn't provide means to do that, so we need to apply the swizzle in the
* vertex shader.
*
* This is required at least in Vulkan to support madatory vertex attribute
* This is required at least in Vulkan to support mandatory vertex attribute
* format VK_FORMAT_B8G8R8A8_UNORM.
*/
static void
@@ -679,7 +679,7 @@ emit_gs_vpm_output_header_prolog(struct v3d_compile *c, nir_builder *b,
* have a variable just to keep track of the number of vertices we
* emitted and instead we can just compute it here from the header
* offset variable by removing the one generic header slot that always
* goes at the begining of out header.
* goes at the beginning of out header.
*/
nir_ssa_def *header_offset =
nir_load_var(b, state->gs.header_offset_var);

View File

@@ -89,7 +89,7 @@ vir_has_side_effects(struct v3d_compile *c, struct qinst *inst)
* pointer, so each read has a side effect (we don't care for ldunif
* because we reconstruct the uniform stream buffer after compiling
* with the surviving uniforms), so allowing DCE to remove
* one would break follow-up loads. We could fix this by emiting a
* one would break follow-up loads. We could fix this by emitting a
* unifa for each ldunifa, but each unifa requires 3 delay slots
* before a ldunifa, so that would be quite expensive.
*/
@@ -1159,7 +1159,7 @@ v3d_instr_delay_cb(nir_instr *instr, void *data)
/* We should not use very large delays for TMU instructions. Typically,
* thread switches will be sufficient to hide all or most of the latency,
* so we typically only need a little bit of extra room. If we over-estimate
* the latency here we may end up unnecesarily delaying the critical path in
* the latency here we may end up unnecessarily delaying the critical path in
* the shader, which would have a negative effect in performance, so here
* we are trying to strike a balance based on empirical testing.
*/
@@ -1629,7 +1629,7 @@ v3d_attempt_compile(struct v3d_compile *c)
.threshold = c->threads == 4 ? 24 : 48,
/* Vertex shaders share the same memory for inputs and outputs,
* fragement and geometry shaders do not.
* fragment and geometry shaders do not.
*/
.stages_with_shared_io_memory =
(((1 << MESA_ALL_SHADER_STAGES) - 1) &
@@ -1727,7 +1727,7 @@ static const struct v3d_compiler_strategy strategies[] = {
/**
* If a particular optimization didn't make any progress during a compile
* attempt disabling it alone won't allow us to compile the shader successfuly,
* attempt disabling it alone won't allow us to compile the shader successfully,
* since we'll end up with the same code. Detect these scenarios so we can
* avoid wasting time with useless compiles. We should also consider if the
* gy changes other aspects of the compilation process though, like

View File

@@ -364,7 +364,7 @@ handle_mmu_interruptions(struct v3d_hw *v3d,
uint64_t vio_addr = ((uint64_t)V3D_READ(V3D_MMU_VIO_ADDR) <<
(va_width - 32));
/* Difference with the kernal: here were are going to abort after
/* Difference with the kernel: here were are going to abort after
* logging, so we don't bother with some stuff that the kernel does,
* like restoring the MMU ctrl bits
*/

View File

@@ -1434,7 +1434,7 @@ cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer *cmd_buffer)
"VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
} else if (subpass->do_depth_clear_with_draw ||
subpass->do_stencil_clear_with_draw) {
perf_debug("Subpass clears DEPTH but loads STENCIL (or viceversa), "
perf_debug("Subpass clears DEPTH but loads STENCIL (or vice versa), "
"falling back to vkCmdClearAttachments for "
"VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
}

View File

@@ -1174,7 +1174,7 @@ v3dv_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
.uniformAndStorageBuffer8BitAccess = true,
.uniformBufferStandardLayout = true,
/* V3D 4.2 wraps TMU vector accesses to 16-byte boundaries, so loads and
* stores of vectors that cross these boundaries would not work correcly
* stores of vectors that cross these boundaries would not work correctly
* with scalarBlockLayout and would need to be split into smaller vectors
* (and/or scalars) that don't cross these boundaries. For load/stores
* with dynamic offsets where we can't identify if the offset is
@@ -2436,7 +2436,7 @@ v3dv_AllocateMemory(VkDevice _device,
/* If this memory can be used via VK_KHR_buffer_device_address then we
* will need to manually add the BO to any job submit that makes use of
* VK_KHR_buffer_device_address, since such jobs may produde buffer
* VK_KHR_buffer_device_address, since such jobs may produce buffer
* load/store operations that may access any buffer memory allocated with
* this flag and we don't have any means to tell which buffers will be
* accessed through this mechanism since they don't even have to be bound

View File

@@ -75,7 +75,7 @@ v3d_setup_plane_slices(struct v3dv_image *image, uint8_t plane,
uint32_t plane_offset)
{
assert(image->planes[plane].cpp > 0);
/* Texture Base Adress needs to be 64-byte aligned */
/* Texture Base Address needs to be 64-byte aligned */
assert(plane_offset % 64 == 0);
uint32_t width = image->planes[plane].width;

View File

@@ -234,7 +234,7 @@ v3dv_CreateRenderPass2(VkDevice _device,
.layout = desc->pDepthStencilAttachment->layout,
};
/* GFXH-1461: if depth is cleared but stencil is loaded (or viceversa),
/* GFXH-1461: if depth is cleared but stencil is loaded (or vice versa),
* the clear might get lost. If a subpass has this then we can't emit
* the clear using the TLB and we have to do it as a draw call.
*

View File

@@ -658,7 +658,7 @@ lower_tex_src(nir_builder *b,
uint32_t set = deref->var->data.descriptor_set;
uint32_t binding = deref->var->data.binding;
/* FIXME: this is a really simplified check for the precision to be used
* for the sampling. Right now we are ony checking for the variables used
* for the sampling. Right now we are only checking for the variables used
* on the operation itself, but there are other cases that we could use to
* infer the precision requirement.
*/
@@ -1790,7 +1790,7 @@ pipeline_stage_get_nir(struct v3dv_pipeline_stage *p_stage,
if (nir) {
assert(nir->info.stage == broadcom_shader_stage_to_gl(p_stage->stage));
/* A NIR cach hit doesn't avoid the large majority of pipeline stage
/* A NIR cache hit doesn't avoid the large majority of pipeline stage
* creation so the cache hit is not recorded in the pipeline feedback
* flags
*/

View File

@@ -916,7 +916,7 @@ struct v3dv_framebuffer {
uint32_t layers;
/* Typically, edge tiles in the framebuffer have padding depending on the
* underlying tiling layout. One consequnce of this is that when the
* underlying tiling layout. One consequence of this is that when the
* framebuffer dimensions are not aligned to tile boundaries, tile stores
* would still write full tiles on the edges and write to the padded area.
* If the framebuffer is aliasing a smaller region of a larger image, then
@@ -1482,7 +1482,7 @@ struct v3dv_cmd_buffer_state {
/* FIXME: we have just one client-side BO for the push constants,
* independently of the stageFlags in vkCmdPushConstants, and the
* pipelineBindPoint in vkCmdBindPipeline. We could probably do more stage
* tunning in the future if it makes sense.
* tuning in the future if it makes sense.
*/
uint32_t push_constants_size;
uint32_t push_constants_data[MAX_PUSH_CONSTANTS_SIZE / 4];

View File

@@ -757,7 +757,7 @@ handle_cl_job(struct v3dv_queue *queue,
if (job->tmu_dirty_rcl)
submit.flags |= DRM_V3D_SUBMIT_CL_FLUSH_CACHE;
/* If the job uses VK_KHR_buffer_device_addess we need to ensure all
/* If the job uses VK_KHR_buffer_device_address we need to ensure all
* buffers flagged with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR
* are included.
*/
@@ -923,7 +923,7 @@ handle_csd_job(struct v3dv_queue *queue,
struct drm_v3d_submit_csd *submit = &job->csd.submit;
/* If the job uses VK_KHR_buffer_device_addess we need to ensure all
/* If the job uses VK_KHR_buffer_device_address we need to ensure all
* buffers flagged with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR
* are included.
*/

View File

@@ -87,7 +87,7 @@ push_constants_bo_free(VkDevice _device,
* This method checks if the ubo used for push constants is needed to be
* updated or not.
*
* push contants ubo is only used for push constants accessed by a non-const
* push constants ubo is only used for push constants accessed by a non-const
* index.
*/
static void

View File

@@ -277,7 +277,7 @@ v3d_flush_jobs_reading_resource(struct v3d_context *v3d,
}
/**
* Returns a v3d_job struture for tracking V3D rendering to a particular FBO.
* Returns a v3d_job structure for tracking V3D rendering to a particular FBO.
*
* If we've already started rendering to this FBO, then return the same job,
* otherwise make a new one. If we're beginning rendering to an FBO, make

View File

@@ -1410,7 +1410,7 @@ v3d_create_image_view_texture_shader_state(struct v3d_context *v3d,
#else /* V3D_VERSION < 40 */
/* V3D 3.x doesn't use support shader image load/store operations on
* textures, so it would get lowered in the shader to general memory
* acceses.
* accesses.
*/
#endif
}

View File

@@ -163,7 +163,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
* our math.
*/
if (width > 4096 || height > 4096) {
DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
DRM_ERROR("Surface dimensions (%d,%d) too large", width, height);
return false;
}

View File

@@ -156,7 +156,7 @@ vc4_flush_jobs_reading_resource(struct vc4_context *vc4,
}
/**
* Returns a vc4_job struture for tracking V3D rendering to a particular FBO.
* Returns a vc4_job structure for tracking V3D rendering to a particular FBO.
*
* If we've already started rendering to this FBO, then return old same job,
* otherwise make a new one. If we're beginning rendering to an FBO, make

View File

@@ -350,7 +350,7 @@ struct vc4_vs_key {
bool per_vertex_point_size;
};
/** A basic block of QIR intructions. */
/** A basic block of QIR instructions. */
struct qblock {
struct list_head link;

View File

@@ -161,7 +161,7 @@ qir_lower_uniforms(struct vc4_compile *c)
if (count <= 1)
continue;
/* If the block doesn't have a load of hte
/* If the block doesn't have a load of the
* uniform yet, add it. We could potentially
* do better and CSE MOVs from multiple blocks
* into dominating blocks, except that may

View File

@@ -73,7 +73,7 @@ struct schedule_state {
enum direction { F, R };
/**
* Marks a dependency between two intructions, that \p after must appear after
* Marks a dependency between two instructions, that \p after must appear after
* \p before.
*
* Our dependencies are tracked as a DAG. Since we're scheduling bottom-up,

View File

@@ -50,7 +50,7 @@ static const char *v3d_counter_names[] = {
"TLB-quads-written-to-color-buffer",
"PTB-primitives-discarded-outside-viewport",
"PTB-primitives-need-clipping",
"PTB-primitives-discared-reversed",
"PTB-primitives-discarded-reversed",
"QPU-total-idle-clk-cycles",
"QPU-total-clk-cycles-vertex-coord-shading",
"QPU-total-clk-cycles-fragment-shading",