freedreno/drm: Return fence from submit flush
This moves away from embedding the submit fence inside the pipe fence, which lets us start refcnt'ing the fence. This will enable several cleanups and improvements: 1. Get rid of fd_bo_fence, and just have fd_bo hold pending fd_fence refs instead, which will be needed for cpu_prep implementation of sub-allocated buffers. 2. For merged submits, we can just return a new reference to an existing fence. Note that this temporarily defeats submit-merging, which will be fixed (and improved) in a following commit. Signed-off-by: Rob Clark <robdclark@chromium.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20263>
This commit is contained in:
@@ -281,12 +281,10 @@ main(int argc, char **argv)
|
||||
|
||||
backend->emit_grid(kernel, grid, submit);
|
||||
|
||||
struct fd_fence fence = {};
|
||||
util_queue_fence_init(&fence.ready);
|
||||
struct fd_fence *fence = fd_submit_flush(submit, -1, false);
|
||||
|
||||
fd_submit_flush(submit, -1, &fence);
|
||||
|
||||
util_queue_fence_wait(&fence.ready);
|
||||
fd_fence_flush(fence);
|
||||
fd_fence_del(fence);
|
||||
|
||||
for (int i = 0; i < kernel->num_bufs; i++) {
|
||||
fd_bo_cpu_prep(kernel->bufs[i], pipe, FD_BO_PREP_READ);
|
||||
|
@@ -99,6 +99,14 @@ fd_fence_after(uint32_t a, uint32_t b)
|
||||
* fd_pipe_wait(). So this struct encapsulates the two.
|
||||
*/
|
||||
struct fd_fence {
|
||||
/**
|
||||
* Note refcnt is *not* atomic, but protected by fence_lock, since the
|
||||
* fence_lock is held in fd_bo_add_fence(), which is the hotpath.
|
||||
*/
|
||||
int32_t refcnt;
|
||||
|
||||
struct fd_pipe *pipe;
|
||||
|
||||
/**
|
||||
* The ready fence is signaled once the submit is actually flushed down
|
||||
* to the kernel, and fence/fence_fd are populated. You must wait for
|
||||
@@ -116,6 +124,13 @@ struct fd_fence {
|
||||
bool use_fence_fd;
|
||||
};
|
||||
|
||||
struct fd_fence *fd_fence_new(struct fd_pipe *pipe, bool use_fence_fd);
|
||||
struct fd_fence *fd_fence_ref(struct fd_fence *f);
|
||||
struct fd_fence *fd_fence_ref_locked(struct fd_fence *f);
|
||||
void fd_fence_del(struct fd_fence *f);
|
||||
void fd_fence_del_locked(struct fd_fence *f);
|
||||
void fd_fence_flush(struct fd_fence *f);
|
||||
|
||||
/*
|
||||
* bo flags:
|
||||
*/
|
||||
|
@@ -211,3 +211,73 @@ fd_pipe_emit_fence(struct fd_pipe *pipe, struct fd_ringbuffer *ring)
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
struct fd_fence *
|
||||
fd_fence_new(struct fd_pipe *pipe, bool use_fence_fd)
|
||||
{
|
||||
struct fd_fence *f = calloc(1, sizeof(*f));
|
||||
|
||||
f->refcnt = 1;
|
||||
f->pipe = fd_pipe_ref(pipe);
|
||||
util_queue_fence_init(&f->ready);
|
||||
f->use_fence_fd = use_fence_fd;
|
||||
f->fence_fd = -1;
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
struct fd_fence *
|
||||
fd_fence_ref(struct fd_fence *f)
|
||||
{
|
||||
simple_mtx_lock(&fence_lock);
|
||||
fd_fence_ref_locked(f);
|
||||
simple_mtx_unlock(&fence_lock);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
struct fd_fence *
|
||||
fd_fence_ref_locked(struct fd_fence *f)
|
||||
{
|
||||
simple_mtx_assert_locked(&fence_lock);
|
||||
f->refcnt++;
|
||||
return f;
|
||||
}
|
||||
|
||||
void
|
||||
fd_fence_del(struct fd_fence *f)
|
||||
{
|
||||
simple_mtx_lock(&fence_lock);
|
||||
fd_fence_del_locked(f);
|
||||
simple_mtx_unlock(&fence_lock);
|
||||
}
|
||||
|
||||
void
|
||||
fd_fence_del_locked(struct fd_fence *f)
|
||||
{
|
||||
simple_mtx_assert_locked(&fence_lock);
|
||||
|
||||
if (--f->refcnt)
|
||||
return;
|
||||
|
||||
fd_pipe_del_locked(f->pipe);
|
||||
|
||||
if (f->use_fence_fd && (f->fence_fd != -1))
|
||||
close(f->fence_fd);
|
||||
|
||||
free(f);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until corresponding submit is flushed to kernel
|
||||
*/
|
||||
void
|
||||
fd_fence_flush(struct fd_fence *f)
|
||||
{
|
||||
/*
|
||||
* TODO we could simplify this to remove the flush_sync part of
|
||||
* fd_pipe_sp_flush() and just rely on the util_queue_fence_wait()
|
||||
*/
|
||||
fd_pipe_flush(f->pipe, f->ufence);
|
||||
util_queue_fence_wait(&f->ready);
|
||||
}
|
||||
|
@@ -292,8 +292,8 @@ struct fd_submit_funcs {
|
||||
struct fd_ringbuffer *(*new_ringbuffer)(struct fd_submit *submit,
|
||||
uint32_t size,
|
||||
enum fd_ringbuffer_flags flags);
|
||||
int (*flush)(struct fd_submit *submit, int in_fence_fd,
|
||||
struct fd_fence *out_fence);
|
||||
struct fd_fence *(*flush)(struct fd_submit *submit, int in_fence_fd,
|
||||
bool use_fence_fd);
|
||||
void (*destroy)(struct fd_submit *submit);
|
||||
};
|
||||
|
||||
|
@@ -60,12 +60,11 @@ fd_submit_ref(struct fd_submit *submit)
|
||||
return submit;
|
||||
}
|
||||
|
||||
int
|
||||
fd_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
struct fd_fence *out_fence)
|
||||
struct fd_fence *
|
||||
fd_submit_flush(struct fd_submit *submit, int in_fence_fd, bool use_fence_fd)
|
||||
{
|
||||
submit->fence = fd_pipe_emit_fence(submit->pipe, submit->primary);
|
||||
return submit->funcs->flush(submit, in_fence_fd, out_fence);
|
||||
return submit->funcs->flush(submit, in_fence_fd, use_fence_fd);
|
||||
}
|
||||
|
||||
struct fd_ringbuffer *
|
||||
|
@@ -93,10 +93,10 @@ struct fd_ringbuffer *fd_submit_new_ringbuffer(struct fd_submit *submit,
|
||||
enum fd_ringbuffer_flags flags);
|
||||
|
||||
/* in_fence_fd: -1 for no in-fence, else fence fd
|
||||
* out_fence can be NULL if no output fence is required
|
||||
* if use_fence_fd is true the output fence will be dma_fence fd backed
|
||||
*/
|
||||
int fd_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
struct fd_fence *out_fence);
|
||||
struct fd_fence *fd_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
bool use_fence_fd);
|
||||
|
||||
struct fd_ringbuffer;
|
||||
struct fd_reloc;
|
||||
|
@@ -186,7 +186,7 @@ fd_submit_sp_flush_prep(struct fd_submit *submit, int in_fence_fd,
|
||||
}
|
||||
simple_mtx_unlock(&fence_lock);
|
||||
|
||||
fd_submit->out_fence = out_fence;
|
||||
fd_submit->out_fence = fd_fence_ref(out_fence);
|
||||
fd_submit->in_fence_fd = (in_fence_fd == -1) ?
|
||||
-1 : os_dupfd_cloexec(in_fence_fd);
|
||||
|
||||
@@ -218,7 +218,7 @@ fd_submit_sp_flush_cleanup(void *job, void *gdata, int thread_index)
|
||||
fd_submit_del(submit);
|
||||
}
|
||||
|
||||
static int
|
||||
static void
|
||||
enqueue_submit_list(struct list_head *submit_list)
|
||||
{
|
||||
struct fd_submit *submit = last_submit(submit_list);
|
||||
@@ -227,13 +227,7 @@ enqueue_submit_list(struct list_head *submit_list)
|
||||
list_replace(submit_list, &fd_submit->submit_list);
|
||||
list_inithead(submit_list);
|
||||
|
||||
struct util_queue_fence *fence;
|
||||
if (fd_submit->out_fence) {
|
||||
fence = &fd_submit->out_fence->ready;
|
||||
} else {
|
||||
util_queue_fence_init(&fd_submit->fence);
|
||||
fence = &fd_submit->fence;
|
||||
}
|
||||
struct util_queue_fence *fence = &fd_submit->out_fence->ready;
|
||||
|
||||
DEBUG_MSG("enqueue: %u", submit->fence);
|
||||
|
||||
@@ -242,8 +236,6 @@ enqueue_submit_list(struct list_head *submit_list)
|
||||
fd_submit_sp_flush_execute,
|
||||
fd_submit_sp_flush_cleanup,
|
||||
0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -266,9 +258,8 @@ should_defer(struct fd_submit *submit)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
fd_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
struct fd_fence *out_fence)
|
||||
static struct fd_fence *
|
||||
fd_submit_sp_flush(struct fd_submit *submit, int in_fence_fd, bool use_fence_fd)
|
||||
{
|
||||
struct fd_device *dev = submit->pipe->dev;
|
||||
struct fd_pipe *pipe = submit->pipe;
|
||||
@@ -295,6 +286,8 @@ fd_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
|
||||
list_addtail(&fd_submit_ref(submit)->node, &dev->deferred_submits);
|
||||
|
||||
struct fd_fence *out_fence = fd_fence_new(submit->pipe, use_fence_fd);
|
||||
|
||||
bool has_shared = fd_submit_sp_flush_prep(submit, in_fence_fd, out_fence);
|
||||
|
||||
/* The rule about skipping submit merging with shared buffers is only
|
||||
@@ -318,7 +311,7 @@ fd_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
assert(dev->deferred_cmds == fd_dev_count_deferred_cmds(dev));
|
||||
simple_mtx_unlock(&dev->submit_lock);
|
||||
|
||||
return 0;
|
||||
return out_fence;
|
||||
}
|
||||
|
||||
struct list_head submit_list;
|
||||
@@ -329,7 +322,9 @@ fd_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
|
||||
simple_mtx_unlock(&dev->submit_lock);
|
||||
|
||||
return enqueue_submit_list(&submit_list);
|
||||
enqueue_submit_list(&submit_list);
|
||||
|
||||
return out_fence;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -400,6 +395,9 @@ fd_submit_sp_destroy(struct fd_submit *submit)
|
||||
|
||||
fd_bo_del_array(fd_submit->bos, fd_submit->nr_bos);
|
||||
|
||||
if (fd_submit->out_fence)
|
||||
fd_fence_del(fd_submit->out_fence);
|
||||
|
||||
free(fd_submit->bos);
|
||||
free(fd_submit);
|
||||
}
|
||||
|
@@ -73,11 +73,7 @@ struct fd_submit_sp {
|
||||
*/
|
||||
struct list_head submit_list; /* includes this submit as last element */
|
||||
|
||||
/* Used in case out_fence==NULL: */
|
||||
struct util_queue_fence fence;
|
||||
|
||||
/* Used by retire_queue, if used by backend: */
|
||||
int out_fence_fd;
|
||||
struct util_queue_fence retire_fence;
|
||||
|
||||
flush_submit_list_fn flush_submit_list;
|
||||
|
@@ -265,9 +265,8 @@ handle_stateobj_relocs(struct msm_submit *submit, struct msm_ringbuffer *ring)
|
||||
return relocs;
|
||||
}
|
||||
|
||||
static int
|
||||
msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
struct fd_fence *out_fence)
|
||||
static struct fd_fence *
|
||||
msm_submit_flush(struct fd_submit *submit, int in_fence_fd, bool use_fence_fd)
|
||||
{
|
||||
struct msm_submit *msm_submit = to_msm_submit(submit);
|
||||
struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
|
||||
@@ -342,6 +341,8 @@ msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
}
|
||||
}
|
||||
|
||||
struct fd_fence *out_fence = fd_fence_new(submit->pipe, use_fence_fd);
|
||||
|
||||
simple_mtx_lock(&fence_lock);
|
||||
for (unsigned j = 0; j < msm_submit->nr_bos; j++) {
|
||||
fd_bo_add_fence(msm_submit->bos[j], submit->pipe, submit->fence);
|
||||
@@ -353,7 +354,7 @@ msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
req.fence_fd = in_fence_fd;
|
||||
}
|
||||
|
||||
if (out_fence && out_fence->use_fence_fd) {
|
||||
if (out_fence->use_fence_fd) {
|
||||
req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
|
||||
}
|
||||
|
||||
@@ -368,6 +369,8 @@ msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
sizeof(req));
|
||||
if (ret) {
|
||||
ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
|
||||
fd_fence_del(out_fence);
|
||||
out_fence = NULL;
|
||||
msm_dump_submit(&req);
|
||||
} else if (!ret && out_fence) {
|
||||
out_fence->kfence = req.fence;
|
||||
@@ -378,7 +381,7 @@ msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
|
||||
for (unsigned o = 0; o < nr_objs; o++)
|
||||
free(obj_relocs[o]);
|
||||
|
||||
return ret;
|
||||
return out_fence;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -112,7 +112,7 @@ flush_submit_list(struct list_head *submit_list)
|
||||
req.flags |= MSM_SUBMIT_NO_IMPLICIT;
|
||||
}
|
||||
|
||||
if (fd_submit->out_fence && fd_submit->out_fence->use_fence_fd) {
|
||||
if (fd_submit->out_fence->use_fence_fd) {
|
||||
req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ flush_submit_list(struct list_head *submit_list)
|
||||
if (ret) {
|
||||
ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
|
||||
msm_dump_submit(&req);
|
||||
} else if (!ret && fd_submit->out_fence) {
|
||||
} else if (!ret) {
|
||||
fd_submit->out_fence->kfence = req.fence;
|
||||
fd_submit->out_fence->ufence = fd_submit->base.fence;
|
||||
fd_submit->out_fence->fence_fd = req.fence_fd;
|
||||
|
@@ -38,8 +38,7 @@ retire_execute(void *job, void *gdata, int thread_index)
|
||||
|
||||
MESA_TRACE_FUNC();
|
||||
|
||||
sync_wait(fd_submit->out_fence_fd, -1);
|
||||
close(fd_submit->out_fence_fd);
|
||||
fd_fence_wait(fd_submit->out_fence);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -175,24 +174,15 @@ flush_submit_list(struct list_head *submit_list)
|
||||
memcpy(req->payload + bos_len, cmds, cmd_len);
|
||||
|
||||
struct fd_fence *out_fence = fd_submit->out_fence;
|
||||
int *out_fence_fd = NULL;
|
||||
|
||||
if (out_fence) {
|
||||
out_fence->kfence = kfence;
|
||||
out_fence->ufence = fd_submit->base.fence;
|
||||
/* Even if gallium driver hasn't requested a fence-fd, request one.
|
||||
* This way, if we have to block waiting for the fence, we can do
|
||||
* it in the guest, rather than in the single-threaded host.
|
||||
*/
|
||||
out_fence->use_fence_fd = true;
|
||||
out_fence_fd = &out_fence->fence_fd;
|
||||
} else {
|
||||
/* we are using retire_queue, so we need an out-fence for each
|
||||
* submit.. we can just re-use fd_submit->out_fence_fd for temporary
|
||||
* storage.
|
||||
*/
|
||||
out_fence_fd = &fd_submit->out_fence_fd;
|
||||
}
|
||||
out_fence->kfence = kfence;
|
||||
out_fence->ufence = fd_submit->base.fence;
|
||||
|
||||
/* Even if gallium driver hasn't requested a fence-fd, request one.
|
||||
* This way, if we have to block waiting for the fence, we can do
|
||||
* it in the guest, rather than in the single-threaded host.
|
||||
*/
|
||||
out_fence->use_fence_fd = true;
|
||||
|
||||
if (fd_submit->in_fence_fd != -1) {
|
||||
pipe->no_implicit_sync = true;
|
||||
@@ -203,7 +193,7 @@ flush_submit_list(struct list_head *submit_list)
|
||||
}
|
||||
|
||||
virtio_execbuf_fenced(dev, &req->hdr, guest_handles, req->nr_bos,
|
||||
fd_submit->in_fence_fd, out_fence_fd,
|
||||
fd_submit->in_fence_fd, &out_fence->fence_fd,
|
||||
virtio_pipe->ring_idx);
|
||||
|
||||
free(req);
|
||||
@@ -216,9 +206,6 @@ flush_submit_list(struct list_head *submit_list)
|
||||
if (fd_submit->in_fence_fd != -1)
|
||||
close(fd_submit->in_fence_fd);
|
||||
|
||||
if (out_fence_fd != &fd_submit->out_fence_fd)
|
||||
fd_submit->out_fence_fd = os_dupfd_cloexec(*out_fence_fd);
|
||||
|
||||
fd_submit_ref(&fd_submit->base);
|
||||
|
||||
util_queue_fence_init(&fd_submit->retire_fence);
|
||||
|
@@ -412,18 +412,16 @@ FreedrenoDriver::configure_counters(bool reset, bool wait)
|
||||
for (const auto &countable : countables)
|
||||
countable.configure(ring, reset);
|
||||
|
||||
struct fd_fence fence = {};
|
||||
util_queue_fence_init(&fence.ready);
|
||||
struct fd_fence *fence = fd_submit_flush(submit, -1, false);
|
||||
|
||||
fd_submit_flush(submit, -1, &fence);
|
||||
|
||||
util_queue_fence_wait(&fence.ready);
|
||||
fd_fence_flush(fence);
|
||||
fd_fence_del(fence);
|
||||
|
||||
fd_ringbuffer_del(ring);
|
||||
fd_submit_del(submit);
|
||||
|
||||
if (wait)
|
||||
fd_pipe_wait(pipe, &fence);
|
||||
fd_pipe_wait(pipe, fence);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -189,19 +189,16 @@ find_device(void)
|
||||
static void
|
||||
flush_ring(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!dev.submit)
|
||||
return;
|
||||
|
||||
struct fd_fence fence = {};
|
||||
util_queue_fence_init(&fence.ready);
|
||||
struct fd_fence *fence = fd_submit_flush(dev.submit, -1, false);
|
||||
|
||||
ret = fd_submit_flush(dev.submit, -1, &fence);
|
||||
if (!fence)
|
||||
errx(1, "submit failed");
|
||||
|
||||
if (ret)
|
||||
errx(1, "submit failed: %d", ret);
|
||||
util_queue_fence_wait(&fence.ready);
|
||||
fd_fence_flush(fence);
|
||||
fd_fence_del(fence);
|
||||
fd_ringbuffer_del(dev.ring);
|
||||
fd_submit_del(dev.submit);
|
||||
|
||||
|
@@ -121,7 +121,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
|
||||
fd_pipe_fence_ref(&fence, batch->fence);
|
||||
|
||||
if (flags & PIPE_FLUSH_FENCE_FD)
|
||||
fence->submit_fence.use_fence_fd = true;
|
||||
fence->use_fence_fd = true;
|
||||
|
||||
fd_bc_dump(ctx, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx,
|
||||
batch, batch->seqno, flags);
|
||||
|
@@ -59,7 +59,8 @@ fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
|
||||
}
|
||||
}
|
||||
|
||||
util_queue_fence_wait(&fence->submit_fence.ready);
|
||||
if (fence->fence)
|
||||
fd_fence_flush(fence->fence);
|
||||
|
||||
/* We've already waited for batch to be flushed and fence->batch
|
||||
* to be cleared:
|
||||
@@ -71,7 +72,8 @@ fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
|
||||
if (fence->batch)
|
||||
fd_batch_flush(fence->batch);
|
||||
|
||||
util_queue_fence_wait(&fence->submit_fence.ready);
|
||||
if (fence->fence)
|
||||
fd_fence_flush(fence->fence);
|
||||
|
||||
assert(!fence->batch);
|
||||
|
||||
@@ -88,7 +90,7 @@ fd_pipe_fence_repopulate(struct pipe_fence_handle *fence,
|
||||
/* The fence we are re-populating must not be an fd-fence (but last_fince
|
||||
* might have been)
|
||||
*/
|
||||
assert(!fence->submit_fence.use_fence_fd);
|
||||
assert(!fence->use_fence_fd);
|
||||
assert(!last_fence->batch);
|
||||
|
||||
fd_pipe_fence_ref(&fence->last_fence, last_fence);
|
||||
@@ -106,16 +108,11 @@ fd_fence_destroy(struct pipe_fence_handle *fence)
|
||||
|
||||
tc_unflushed_batch_token_reference(&fence->tc_token, NULL);
|
||||
|
||||
/* If the submit is enqueued to the submit_queue, we need to wait until
|
||||
* the fence_fd is valid before cleaning up.
|
||||
*/
|
||||
util_queue_fence_wait(&fence->submit_fence.ready);
|
||||
|
||||
if (fence->submit_fence.use_fence_fd)
|
||||
close(fence->submit_fence.fence_fd);
|
||||
if (fence->syncobj)
|
||||
drmSyncobjDestroy(fd_device_fd(fence->screen->dev), fence->syncobj);
|
||||
fd_pipe_del(fence->pipe);
|
||||
if (fence->fence)
|
||||
fd_fence_del(fence->fence);
|
||||
|
||||
FREE(fence);
|
||||
}
|
||||
@@ -147,12 +144,13 @@ fd_pipe_fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
|
||||
if (fence->last_fence)
|
||||
fence = fence->last_fence;
|
||||
|
||||
if (fence->submit_fence.use_fence_fd) {
|
||||
int ret = sync_wait(fence->submit_fence.fence_fd, timeout / 1000000);
|
||||
if (fence->use_fence_fd) {
|
||||
assert(fence->fence);
|
||||
int ret = sync_wait(fence->fence->fence_fd, timeout / 1000000);
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
if (fd_pipe_wait_timeout(fence->pipe, &fence->submit_fence, timeout))
|
||||
if (fd_pipe_wait_timeout(fence->pipe, fence->fence, timeout))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@@ -170,16 +168,19 @@ fence_create(struct fd_context *ctx, struct fd_batch *batch, int fence_fd,
|
||||
|
||||
pipe_reference_init(&fence->reference, 1);
|
||||
util_queue_fence_init(&fence->ready);
|
||||
util_queue_fence_init(&fence->submit_fence.ready);
|
||||
|
||||
fence->ctx = ctx;
|
||||
fd_pipe_fence_set_batch(fence, batch);
|
||||
fence->pipe = fd_pipe_ref(ctx->pipe);
|
||||
fence->screen = ctx->screen;
|
||||
fence->submit_fence.fence_fd = fence_fd;
|
||||
fence->submit_fence.use_fence_fd = (fence_fd != -1);
|
||||
fence->use_fence_fd = (fence_fd != -1);
|
||||
fence->syncobj = syncobj;
|
||||
|
||||
if (fence_fd != -1) {
|
||||
fence->fence = fd_fence_new(fence->pipe, fence->use_fence_fd);
|
||||
fence->fence->fence_fd = fence_fd;
|
||||
}
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
@@ -227,10 +228,11 @@ fd_pipe_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *f
|
||||
}
|
||||
|
||||
/* if not an external fence, then nothing more to do without preemption: */
|
||||
if (!fence->submit_fence.use_fence_fd)
|
||||
if (!fence->use_fence_fd)
|
||||
return;
|
||||
|
||||
if (sync_accumulate("freedreno", &ctx->in_fence_fd, fence->submit_fence.fence_fd)) {
|
||||
assert(fence->fence);
|
||||
if (sync_accumulate("freedreno", &ctx->in_fence_fd, fence->fence->fence_fd)) {
|
||||
/* error */
|
||||
}
|
||||
}
|
||||
@@ -252,20 +254,21 @@ fd_pipe_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fenc
|
||||
/* We don't expect deferred flush to be combined with fence-fd: */
|
||||
assert(!fence->last_fence);
|
||||
|
||||
assert(fence->submit_fence.use_fence_fd);
|
||||
assert(fence->use_fence_fd);
|
||||
|
||||
/* NOTE: in the deferred fence case, the pctx we want is the threaded-ctx
|
||||
* but if TC is not used, this will be null. Which is fine, we won't call
|
||||
* threaded_context_flush() in that case
|
||||
*/
|
||||
fence_flush(&fence->ctx->tc->base, fence, PIPE_TIMEOUT_INFINITE);
|
||||
return os_dupfd_cloexec(fence->submit_fence.fence_fd);
|
||||
assert(fence->fence);
|
||||
return os_dupfd_cloexec(fence->fence->fence_fd);
|
||||
}
|
||||
|
||||
bool
|
||||
fd_pipe_fence_is_fd(struct pipe_fence_handle *fence)
|
||||
{
|
||||
return fence->submit_fence.use_fence_fd;
|
||||
return fence->use_fence_fd;
|
||||
}
|
||||
|
||||
struct pipe_fence_handle *
|
||||
@@ -294,6 +297,16 @@ fd_pipe_fence_set_batch(struct pipe_fence_handle *fence, struct fd_batch *batch)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
fd_pipe_fence_set_submit_fence(struct pipe_fence_handle *fence,
|
||||
struct fd_fence *submit_fence)
|
||||
{
|
||||
/* Take ownership of the drm fence after batch/submit is flushed: */
|
||||
assert(!fence->fence);
|
||||
fence->fence = submit_fence;
|
||||
fd_pipe_fence_set_batch(fence, NULL);
|
||||
}
|
||||
|
||||
struct pipe_fence_handle *
|
||||
fd_pipe_fence_create_unflushed(struct pipe_context *pctx,
|
||||
struct tc_unflushed_batch_token *tc_token)
|
||||
|
@@ -30,7 +30,7 @@
|
||||
#include "pipe/p_context.h"
|
||||
#include "util/u_queue.h"
|
||||
|
||||
#include "drm/freedreno_ringbuffer.h"
|
||||
#include "drm/freedreno_drmif.h"
|
||||
|
||||
struct pipe_fence_handle {
|
||||
struct pipe_reference reference;
|
||||
@@ -73,7 +73,9 @@ struct pipe_fence_handle {
|
||||
struct fd_context *ctx;
|
||||
struct fd_pipe *pipe;
|
||||
struct fd_screen *screen;
|
||||
struct fd_fence submit_fence;
|
||||
struct fd_fence *fence;
|
||||
|
||||
bool use_fence_fd;
|
||||
uint32_t syncobj;
|
||||
};
|
||||
|
||||
@@ -99,6 +101,8 @@ struct pipe_fence_handle *fd_pipe_fence_create(struct fd_batch *batch);
|
||||
|
||||
void fd_pipe_fence_set_batch(struct pipe_fence_handle *fence,
|
||||
struct fd_batch *batch);
|
||||
void fd_pipe_fence_set_submit_fence(struct pipe_fence_handle *fence,
|
||||
struct fd_fence *submit_fence);
|
||||
|
||||
struct tc_unflushed_batch_token;
|
||||
struct pipe_fence_handle *
|
||||
|
@@ -671,11 +671,18 @@ flush_ring(struct fd_batch *batch)
|
||||
if (FD_DBG(NOHW))
|
||||
return;
|
||||
|
||||
fd_submit_flush(batch->submit, batch->in_fence_fd,
|
||||
batch->fence ? &batch->fence->submit_fence : NULL);
|
||||
|
||||
bool use_fence_fd = false;
|
||||
if (batch->fence)
|
||||
fd_pipe_fence_set_batch(batch->fence, NULL);
|
||||
use_fence_fd = batch->fence;
|
||||
|
||||
struct fd_fence *fence =
|
||||
fd_submit_flush(batch->submit, batch->in_fence_fd, use_fence_fd);
|
||||
|
||||
if (batch->fence) {
|
||||
fd_pipe_fence_set_submit_fence(batch->fence, fence);
|
||||
} else {
|
||||
fd_fence_del(fence);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
Reference in New Issue
Block a user