winsys/amdgpu: add REWIND emulation via INDIRECT_BUFFER into cs_check_space

Acked-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák
2019-04-04 10:02:27 -04:00
parent 4eb377d1c3
commit 187f1c999f
9 changed files with 29 additions and 18 deletions

View File

@@ -382,7 +382,7 @@ static void r300_clear(struct pipe_context* pipe,
r300_get_num_cs_end_dwords(r300);
/* Reserve CS space. */
if (!r300->rws->cs_check_space(r300->cs, dwords)) {
if (!r300->rws->cs_check_space(r300->cs, dwords, false)) {
r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
}

View File

@@ -215,7 +215,7 @@ static boolean r300_reserve_cs_dwords(struct r300_context *r300,
cs_dwords += r300_get_num_cs_end_dwords(r300);
/* Reserve requested CS space. */
if (!r300->rws->cs_check_space(r300->cs, cs_dwords)) {
if (!r300->rws->cs_check_space(r300->cs, cs_dwords, false)) {
r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
flushed = TRUE;
}

View File

@@ -84,7 +84,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
num_dw += 10;
/* Flush if there's not enough space. */
if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw, false)) {
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
}

View File

@@ -286,7 +286,7 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw, false) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);

View File

@@ -572,8 +572,12 @@ struct radeon_winsys {
*
* \param cs A command stream.
* \param dw Number of CS dwords requested by the caller.
* \param force_chaining Chain the IB into a new buffer now to discard
* the CP prefetch cache (to emulate PKT3_REWIND)
* \return true if there is enough space
*/
bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw);
bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw,
bool force_chaining);
/**
* Return the buffer list.

View File

@@ -164,7 +164,7 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
*/
num_dw++; /* for emit_wait_idle below */
if (!ctx->sdma_uploads_in_progress &&
(!ws->cs_check_space(ctx->dma_cs, num_dw) ||
(!ws->cs_check_space(ctx->dma_cs, num_dw, false) ||
ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt))) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);

View File

@@ -55,7 +55,7 @@ void si_need_gfx_cs_space(struct si_context *ctx)
ctx->vram = 0;
unsigned need_dwords = si_get_minimum_num_gfx_cs_dwords(ctx);
if (!ctx->ws->cs_check_space(cs, need_dwords))
if (!ctx->ws->cs_check_space(cs, need_dwords, false))
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}

View File

@@ -1031,7 +1031,8 @@ static bool amdgpu_cs_validate(struct radeon_cmdbuf *rcs)
return true;
}
static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw)
static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,
bool force_chaining)
{
struct amdgpu_ib *ib = amdgpu_ib(rcs);
struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
@@ -1048,16 +1049,21 @@ static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw)
ib->max_check_space_size = MAX2(ib->max_check_space_size,
safe_byte_size);
if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
return false;
ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
if (rcs->current.max_dw - rcs->current.cdw >= dw)
return true;
if (!amdgpu_cs_has_chaining(cs))
/* If force_chaining is true, we can't return. We have to chain. */
if (!force_chaining) {
if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
return false;
ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
if (rcs->current.max_dw - rcs->current.cdw >= dw)
return true;
}
if (!amdgpu_cs_has_chaining(cs)) {
assert(!force_chaining);
return false;
}
/* Allocate a new chunk */
if (rcs->num_prev >= rcs->max_prev) {

View File

@@ -424,7 +424,8 @@ static bool radeon_drm_cs_validate(struct radeon_cmdbuf *rcs)
return status;
}
static bool radeon_drm_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw)
static bool radeon_drm_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,
bool force_chaining)
{
assert(rcs->current.cdw <= rcs->current.max_dw);
return rcs->current.max_dw - rcs->current.cdw >= dw;