freedreno: fence_server_sync() fixes

Two potential problems, batch re-ordering doesn't really play nicely
with fence_server_sync(), so when we switch away from one batch, detect
the case that we need to sync, and if so flush.  The alternative of
trying to track that later batches depend on an earlier batch that had
an in-fence is hairy, and the normal use-case would be to sync at the
beginning of the frame.

But this brings up the second problem, which is that typically we'll get
told to sync on an in-fence before the first draw, which means before
mesa/st flushes down the framebuffer state to the driver.  Which means
we don't yet have the correct batch to attach the fence to.  So we need
to track the in-fence on the context, and transfer it to the batch
before draws, etc.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6575>
This commit is contained in:
Rob Clark
2020-09-01 15:24:38 -07:00
committed by Marge Bot
parent aae1e68637
commit bf23ff83e6
5 changed files with 64 additions and 2 deletions

View File

@@ -317,6 +317,13 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool non
struct fd_batch *batch;
uint32_t idx;
/* For normal draw batches, pctx->set_framebuffer_state() handles
* this, but for nondraw batches, this is a nice central location
* to handle them all.
*/
if (nondraw)
fd_context_switch_from(ctx);
fd_screen_lock(ctx->screen);
while ((idx = ffs(~cache->batch_mask)) == 0) {
@@ -382,6 +389,9 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool non
debug_assert(cache->batches[idx] == NULL);
cache->batches[idx] = batch;
if (nondraw)
fd_context_switch_to(ctx, batch);
out:
fd_screen_unlock(ctx->screen);

View File

@@ -220,6 +220,9 @@ fd_context_destroy(struct pipe_context *pctx)
fd_fence_ref(&ctx->last_fence, NULL);
if (ctx->in_fence_fd != -1)
close(ctx->in_fence_fd);
util_copy_framebuffer_state(&ctx->framebuffer, NULL);
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
fd_bc_invalidate_context(ctx);
@@ -400,6 +403,8 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
ctx->screen = screen;
ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio);
ctx->in_fence_fd = -1;
if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) {
ctx->context_reset_count = fd_get_reset_count(ctx, true);
ctx->global_reset_count = fd_get_reset_count(ctx, false);

View File

@@ -27,6 +27,8 @@
#ifndef FREEDRENO_CONTEXT_H_
#define FREEDRENO_CONTEXT_H_
#include <libsync.h>
#include "pipe/p_context.h"
#include "indices/u_primconvert.h"
#include "util/u_blitter.h"
@@ -259,6 +261,21 @@ struct fd_context {
*/
struct pipe_fence_handle *last_fence;
/* Fence fd we are told to wait on via ->fence_server_sync() (or -1
* if none). The in-fence is transferred over to the batch on the
* next draw/blit/grid.
*
* The reason for this extra complexity is that apps will typically
* do eglWaitSyncKHR()/etc at the beginning of the frame, before the
* first draw. But mesa/st doesn't flush down framebuffer state
* change until we hit a draw, so at ->fence_server_sync() time, we
* don't yet have the correct batch. If we created a batch at that
* point, it would be the wrong one, and we'd have to flush it pre-
* maturely, causing us to stall early in the frame where we could
* be building up cmdstream.
*/
int in_fence_fd;
/* track last known reset status globally and per-context to
* determine if more resets occurred since then. If global reset
* count increases, it means some other context crashed. If
@@ -478,6 +495,34 @@ fd_supported_prim(struct fd_context *ctx, unsigned prim)
return (1 << prim) & ctx->primtype_mask;
}
/**
* If we have a pending fence_server_sync() (GPU side sync), flush now.
* The alternative to try to track this with batch dependencies gets
* hairy quickly.
*
* Call this before switching to a different batch, to handle this case.
*/
static inline void
fd_context_switch_from(struct fd_context *ctx)
{
if (ctx->batch && (ctx->batch->in_fence_fd != -1))
fd_batch_flush(ctx->batch);
}
/**
* If there is a pending fence-fd that we need to sync on, this will
* transfer the reference to the next batch we are going to render
* to.
*/
static inline void
fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch)
{
if (ctx->in_fence_fd != -1) {
sync_accumulate("freedreno", &batch->in_fence_fd, ctx->in_fence_fd);
ctx->in_fence_fd = -1;
}
}
static inline struct fd_batch *
fd_context_batch(struct fd_context *ctx)
{
@@ -488,6 +533,7 @@ fd_context_batch(struct fd_context *ctx)
ctx->batch = batch;
fd_context_all_dirty(ctx);
}
fd_context_switch_to(ctx, ctx->batch);
return ctx->batch;
}

View File

@@ -155,7 +155,6 @@ void fd_fence_server_sync(struct pipe_context *pctx,
struct pipe_fence_handle *fence)
{
struct fd_context *ctx = fd_context(pctx);
struct fd_batch *batch = fd_context_batch(ctx);
fence_flush(fence);
@@ -163,7 +162,7 @@ void fd_fence_server_sync(struct pipe_context *pctx,
if (fence->fence_fd == -1)
return;
if (sync_accumulate("freedreno", &batch->in_fence_fd, fence->fence_fd)) {
if (sync_accumulate("freedreno", &ctx->in_fence_fd, fence->fence_fd)) {
/* error */
}
}

View File

@@ -220,6 +220,8 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
framebuffer->width, framebuffer->height,
framebuffer->layers, framebuffer->samples);
fd_context_switch_from(ctx);
cso = &ctx->framebuffer;
if (util_framebuffer_state_equal(cso, framebuffer))