agx: Defeature indirect vbufs
vb_mask can include garbage vbufs, we can't rely on it. This will prevent a regression when switching to u_blitter based clears. This is also simpler and shrinks the VS shader key so all in all a good thing. Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18380>
This commit is contained in:

committed by
Marge Bot

parent
0fccd564b6
commit
44d79d39b6
@@ -311,10 +311,7 @@ agx_emit_load_attr(agx_builder *b, agx_index *dests, nir_intrinsic_instr *instr)
|
|||||||
agx_index offset = agx_imad(b, element_id, shifted_stride, src_offset, 0);
|
agx_index offset = agx_imad(b, element_id, shifted_stride, src_offset, 0);
|
||||||
|
|
||||||
/* Each VBO has a 64-bit = 4 x 16-bit address, lookup the base address as a sysval */
|
/* Each VBO has a 64-bit = 4 x 16-bit address, lookup the base address as a sysval */
|
||||||
unsigned num_vbos = key->vs.num_vbufs;
|
agx_index base = agx_vbo_base(b->shader, buf);
|
||||||
unsigned base_length = (num_vbos * 4);
|
|
||||||
agx_index base = agx_indexed_sysval(b->shader,
|
|
||||||
AGX_PUSH_VBO_BASES, AGX_SIZE_64, buf * 4, base_length);
|
|
||||||
|
|
||||||
/* Load the data */
|
/* Load the data */
|
||||||
assert(instr->num_components <= 4);
|
assert(instr->num_components <= 4);
|
||||||
|
@@ -33,11 +33,13 @@ enum agx_push_type {
|
|||||||
* 16-bit sizes for optional bounds checking (SIZES) */
|
* 16-bit sizes for optional bounds checking (SIZES) */
|
||||||
AGX_PUSH_UBO_BASES,
|
AGX_PUSH_UBO_BASES,
|
||||||
AGX_PUSH_UBO_SIZES,
|
AGX_PUSH_UBO_SIZES,
|
||||||
AGX_PUSH_VBO_BASES,
|
|
||||||
AGX_PUSH_VBO_SIZES,
|
AGX_PUSH_VBO_SIZES,
|
||||||
AGX_PUSH_SSBO_BASES,
|
AGX_PUSH_SSBO_BASES,
|
||||||
AGX_PUSH_SSBO_SIZES,
|
AGX_PUSH_SSBO_SIZES,
|
||||||
|
|
||||||
|
/* 64-bit VBO base pointer */
|
||||||
|
AGX_PUSH_VBO_BASE,
|
||||||
|
|
||||||
/* Push the attached constant memory */
|
/* Push the attached constant memory */
|
||||||
AGX_PUSH_CONSTANTS,
|
AGX_PUSH_CONSTANTS,
|
||||||
|
|
||||||
@@ -79,6 +81,8 @@ struct agx_push {
|
|||||||
uint16_t ubo;
|
uint16_t ubo;
|
||||||
uint16_t offset;
|
uint16_t offset;
|
||||||
} ubo_data;
|
} ubo_data;
|
||||||
|
|
||||||
|
uint32_t vbo;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -696,6 +696,9 @@ agx_index
|
|||||||
agx_indexed_sysval(agx_context *ctx, enum agx_push_type type, enum agx_size size,
|
agx_indexed_sysval(agx_context *ctx, enum agx_push_type type, enum agx_size size,
|
||||||
unsigned index, unsigned length);
|
unsigned index, unsigned length);
|
||||||
|
|
||||||
|
agx_index
|
||||||
|
agx_vbo_base(agx_context *ctx, unsigned vbo);
|
||||||
|
|
||||||
/* Routines defined for AIR */
|
/* Routines defined for AIR */
|
||||||
|
|
||||||
void agx_print_instr(agx_instr *I, FILE *fp);
|
void agx_print_instr(agx_instr *I, FILE *fp);
|
||||||
|
@@ -62,3 +62,33 @@ agx_indexed_sysval(agx_context *ctx, enum agx_push_type type,
|
|||||||
|
|
||||||
return agx_uniform(base + index, size);
|
return agx_uniform(base + index, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
agx_index
|
||||||
|
agx_vbo_base(agx_context *ctx, unsigned vbo)
|
||||||
|
{
|
||||||
|
/* Check if we already pushed */
|
||||||
|
for (unsigned i = 0; i < ctx->out->push_ranges; ++i) {
|
||||||
|
struct agx_push push = ctx->out->push[i];
|
||||||
|
|
||||||
|
if (push.type == AGX_PUSH_VBO_BASE && push.vbo == vbo) {
|
||||||
|
return agx_uniform(push.base, AGX_SIZE_64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Otherwise, push */
|
||||||
|
assert(ctx->out->push_ranges < AGX_MAX_PUSH_RANGES);
|
||||||
|
|
||||||
|
ctx->push_base = ALIGN_POT(ctx->push_base, 4);
|
||||||
|
|
||||||
|
unsigned base = ctx->push_base;
|
||||||
|
ctx->push_base += 4;
|
||||||
|
|
||||||
|
ctx->out->push[ctx->out->push_ranges++] = (struct agx_push) {
|
||||||
|
.type = AGX_PUSH_VBO_BASE,
|
||||||
|
.base = base,
|
||||||
|
.length = 4,
|
||||||
|
.vbo = vbo,
|
||||||
|
};
|
||||||
|
|
||||||
|
return agx_uniform(base, AGX_SIZE_64);
|
||||||
|
}
|
||||||
|
@@ -1083,9 +1083,7 @@ agx_update_shader(struct agx_context *ctx, struct agx_compiled_shader **out,
|
|||||||
static bool
|
static bool
|
||||||
agx_update_vs(struct agx_context *ctx)
|
agx_update_vs(struct agx_context *ctx)
|
||||||
{
|
{
|
||||||
struct agx_vs_shader_key key = {
|
struct agx_vs_shader_key key = { 0 };
|
||||||
.num_vbufs = util_last_bit(ctx->vb_mask),
|
|
||||||
};
|
|
||||||
|
|
||||||
memcpy(key.attributes, ctx->attributes,
|
memcpy(key.attributes, ctx->attributes,
|
||||||
sizeof(key.attributes[0]) * AGX_MAX_ATTRIBS);
|
sizeof(key.attributes[0]) * AGX_MAX_ATTRIBS);
|
||||||
|
@@ -66,21 +66,19 @@ agx_push_location_direct(struct agx_context *ctx, struct agx_push push,
|
|||||||
return ptr.gpu;
|
return ptr.gpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
case AGX_PUSH_VBO_BASES: {
|
case AGX_PUSH_VBO_BASE: {
|
||||||
unsigned count = util_last_bit(ctx->vb_mask);
|
struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, sizeof(uint64_t), 8);
|
||||||
struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, count * sizeof(uint64_t), 8);
|
uint64_t *address = ptr.cpu;
|
||||||
uint64_t *addresses = ptr.cpu;
|
|
||||||
|
|
||||||
u_foreach_bit(i, ctx->vb_mask) {
|
assert(ctx->vb_mask & BITFIELD_BIT(push.vbo) && "oob");
|
||||||
struct pipe_vertex_buffer vb = ctx->vertex_buffers[i];
|
|
||||||
assert(!vb.is_user_buffer);
|
|
||||||
|
|
||||||
struct agx_bo *bo = agx_resource(vb.buffer.resource)->bo;
|
struct pipe_vertex_buffer vb = ctx->vertex_buffers[push.vbo];
|
||||||
agx_batch_add_bo(batch, bo);
|
assert(!vb.is_user_buffer);
|
||||||
|
|
||||||
addresses[i] = bo->ptr.gpu + vb.buffer_offset;
|
struct agx_bo *bo = agx_resource(vb.buffer.resource)->bo;
|
||||||
}
|
agx_batch_add_bo(batch, bo);
|
||||||
|
|
||||||
|
*address = bo->ptr.gpu + vb.buffer_offset;
|
||||||
return ptr.gpu;
|
return ptr.gpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user