radeonsi: don't use struct si_descriptors for vertex buffer descriptors

VBO descriptor code will change a lot one day.

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák
2018-02-03 16:10:25 +01:00
parent 61d6ff3ba3
commit 8d6e6b1d7c
6 changed files with 46 additions and 33 deletions

View File

@@ -86,7 +86,7 @@ void si_blitter_end(struct pipe_context *ctx)
/* Restore shader pointers because the VS blit shader changed all
* non-global VS user SGPRs. */
sctx->shader_pointers_dirty |= SI_DESCS_SHADER_MASK(VERTEX);
sctx->vertex_buffer_pointer_dirty = true;
sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
}

View File

@@ -516,9 +516,8 @@ static void cik_prefetch_VBO_descriptors(struct si_context *sctx)
if (!sctx->vertex_elements)
return;
cik_prefetch_TC_L2_async(sctx, &sctx->vertex_buffers.buffer->b.b,
sctx->vertex_buffers.gpu_address -
sctx->vertex_buffers.buffer->gpu_address,
cik_prefetch_TC_L2_async(sctx, &sctx->vb_descriptors_buffer->b.b,
sctx->vb_descriptors_offset,
sctx->vertex_elements->desc_list_byte_size);
}

View File

@@ -740,10 +740,20 @@ static void si_dump_descriptors(struct si_context *sctx,
enabled_images = sctx->images[processor].enabled_mask;
}
if (processor == PIPE_SHADER_VERTEX) {
if (processor == PIPE_SHADER_VERTEX &&
sctx->vb_descriptors_buffer &&
sctx->vb_descriptors_gpu_list &&
sctx->vertex_elements) {
assert(info); /* only CS may not have an info struct */
struct si_descriptors desc = {};
si_dump_descriptor_list(sctx->screen, &sctx->vertex_buffers, name,
desc.buffer = sctx->vb_descriptors_buffer;
desc.list = sctx->vb_descriptors_gpu_list;
desc.gpu_list = sctx->vb_descriptors_gpu_list;
desc.element_dw_size = 4;
desc.num_active_slots = sctx->vertex_elements->desc_list_byte_size / 16;
si_dump_descriptor_list(sctx->screen, &desc, name,
" - Vertex buffer", 4, info->num_inputs,
si_identity, log);
}

View File

@@ -986,7 +986,6 @@ static void si_get_buffer_from_descriptors(struct si_buffer_resources *buffers,
static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
{
struct si_descriptors *desc = &sctx->vertex_buffers;
int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
int i;
@@ -1003,17 +1002,16 @@ static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
if (!desc->buffer)
if (!sctx->vb_descriptors_buffer)
return;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
desc->buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
}
bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
{
struct si_vertex_elements *velems = sctx->vertex_elements;
struct si_descriptors *desc = &sctx->vertex_buffers;
unsigned i, count;
unsigned desc_list_byte_size;
unsigned first_vb_use_mask;
@@ -1034,22 +1032,22 @@ bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
* directly through a staging buffer and don't go through
* the fine-grained upload path.
*/
unsigned buffer_offset = 0;
u_upload_alloc(sctx->b.b.const_uploader, 0,
desc_list_byte_size,
si_optimal_tcc_alignment(sctx, desc_list_byte_size),
&buffer_offset,
(struct pipe_resource**)&desc->buffer, (void**)&ptr);
if (!desc->buffer) {
desc->gpu_address = 0;
&sctx->vb_descriptors_offset,
(struct pipe_resource**)&sctx->vb_descriptors_buffer,
(void**)&ptr);
if (!sctx->vb_descriptors_buffer) {
sctx->vb_descriptors_offset = 0;
sctx->vb_descriptors_gpu_list = NULL;
return false;
}
desc->gpu_address = desc->buffer->gpu_address + buffer_offset;
desc->list = ptr;
sctx->vb_descriptors_gpu_list = ptr;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
desc->buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
sctx->vb_descriptors_buffer, RADEON_USAGE_READ,
RADEON_PRIO_DESCRIPTORS);
assert(count <= SI_MAX_ATTRIBS);
@@ -1926,7 +1924,7 @@ static void si_mark_shader_pointers_dirty(struct si_context *sctx,
SI_NUM_SHADER_DESCS);
if (shader == PIPE_SHADER_VERTEX)
sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
}
@@ -1934,7 +1932,7 @@ static void si_mark_shader_pointers_dirty(struct si_context *sctx,
static void si_shader_pointers_begin_new_cs(struct si_context *sctx)
{
sctx->shader_pointers_dirty = u_bit_consecutive(0, SI_NUM_DESCS);
sctx->vertex_buffer_pointer_dirty = sctx->vertex_buffers.buffer != NULL;
sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
si_mark_atom_dirty(sctx, &sctx->shader_pointers.atom);
sctx->graphics_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
@@ -2128,8 +2126,14 @@ void si_emit_graphics_shader_pointers(struct si_context *sctx,
~u_bit_consecutive(SI_DESCS_RW_BUFFERS, SI_DESCS_FIRST_COMPUTE);
if (sctx->vertex_buffer_pointer_dirty) {
si_emit_shader_pointer(sctx, &sctx->vertex_buffers,
sh_base[PIPE_SHADER_VERTEX]);
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned sh_offset = sh_base[PIPE_SHADER_VERTEX] +
SI_SGPR_VERTEX_BUFFERS * 4;
si_emit_shader_pointer_head(cs, sh_offset, 1);
si_emit_shader_pointer_body(sctx->screen, cs,
sctx->vb_descriptors_buffer->gpu_address +
sctx->vb_descriptors_offset);
sctx->vertex_buffer_pointer_dirty = false;
}
@@ -2665,11 +2669,6 @@ void si_init_all_descriptors(struct si_context *sctx)
RADEON_PRIO_SHADER_RINGS, RADEON_PRIO_CONST_BUFFER);
sctx->descriptors[SI_DESCS_RW_BUFFERS].num_active_slots = SI_NUM_RW_BUFFERS;
si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
4, SI_NUM_VERTEX_BUFFERS);
FREE(sctx->vertex_buffers.list); /* not used */
sctx->vertex_buffers.list = NULL;
/* Initialize an array of 1024 bindless descriptors, when the limit is
* reached, just make it larger and re-upload the whole array.
*/
@@ -2771,8 +2770,9 @@ void si_release_all_descriptors(struct si_context *sctx)
for (i = 0; i < SI_NUM_DESCS; ++i)
si_release_descriptors(&sctx->descriptors[i]);
sctx->vertex_buffers.list = NULL; /* points into a mapped buffer */
si_release_descriptors(&sctx->vertex_buffers);
r600_resource_reference(&sctx->vb_descriptors_buffer, NULL);
sctx->vb_descriptors_gpu_list = NULL; /* points into a mapped buffer */
si_release_bindless_descriptors(sctx);
}

View File

@@ -216,7 +216,7 @@ void si_begin_new_cs(struct si_context *ctx)
ctx->prefetch_L2_mask |= SI_PREFETCH_VS;
if (ctx->queued.named.ps)
ctx->prefetch_L2_mask |= SI_PREFETCH_PS;
if (ctx->vertex_buffers.buffer && ctx->vertex_elements)
if (ctx->vb_descriptors_buffer && ctx->vertex_elements)
ctx->prefetch_L2_mask |= SI_PREFETCH_VBO_DESCRIPTORS;
/* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */

View File

@@ -497,8 +497,12 @@ struct si_context {
bool flatshade;
bool do_update_shaders;
/* vertex buffer descriptors */
uint32_t *vb_descriptors_gpu_list;
struct r600_resource *vb_descriptors_buffer;
unsigned vb_descriptors_offset;
/* shader descriptors */
struct si_descriptors vertex_buffers;
struct si_descriptors descriptors[SI_NUM_DESCS];
unsigned descriptors_dirty;
unsigned shader_pointers_dirty;