gallium/u_queue: add optional cleanup callback
Adds a second optional cleanup callback, called after the fence is signaled. This is needed if, for example, the queue has the last reference to the object that embeds the util_queue_fence. In this case we cannot drop the ref in the main callback, since that would result in the fence being destroyed before it is signaled. Signed-off-by: Rob Clark <robdclark@gmail.com> Reviewed-by: Marek Olšák <marek.olsak@amd.com> Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
@@ -91,6 +91,8 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
|
||||
if (job.job) {
|
||||
job.execute(job.job, thread_index);
|
||||
util_queue_fence_signal(job.fence);
|
||||
if (job.cleanup)
|
||||
job.cleanup(job.job, thread_index);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,7 +215,8 @@ void
|
||||
util_queue_add_job(struct util_queue *queue,
|
||||
void *job,
|
||||
struct util_queue_fence *fence,
|
||||
util_queue_execute_func execute)
|
||||
util_queue_execute_func execute,
|
||||
util_queue_execute_func cleanup)
|
||||
{
|
||||
struct util_queue_job *ptr;
|
||||
|
||||
@@ -232,6 +235,7 @@ util_queue_add_job(struct util_queue *queue,
|
||||
ptr->job = job;
|
||||
ptr->fence = fence;
|
||||
ptr->execute = execute;
|
||||
ptr->cleanup = cleanup;
|
||||
queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
|
||||
|
||||
queue->num_queued++;
|
||||
|
@@ -50,6 +50,7 @@ struct util_queue_job {
|
||||
void *job;
|
||||
struct util_queue_fence *fence;
|
||||
util_queue_execute_func execute;
|
||||
util_queue_execute_func cleanup;
|
||||
};
|
||||
|
||||
/* Put this into your context. */
|
||||
@@ -75,10 +76,13 @@ void util_queue_destroy(struct util_queue *queue);
|
||||
void util_queue_fence_init(struct util_queue_fence *fence);
|
||||
void util_queue_fence_destroy(struct util_queue_fence *fence);
|
||||
|
||||
/* optional cleanup callback is called after fence is signaled: */
|
||||
void util_queue_add_job(struct util_queue *queue,
|
||||
void *job,
|
||||
struct util_queue_fence *fence,
|
||||
util_queue_execute_func execute);
|
||||
util_queue_execute_func execute,
|
||||
util_queue_execute_func cleanup);
|
||||
|
||||
void util_queue_job_wait(struct util_queue_fence *fence);
|
||||
|
||||
/* util_queue needs to be cleared to zeroes for this to work */
|
||||
|
@@ -1330,7 +1330,8 @@ static void *si_create_shader_selector(struct pipe_context *ctx,
|
||||
si_init_shader_selector_async(sel, -1);
|
||||
else
|
||||
util_queue_add_job(&sscreen->shader_compiler_queue, sel,
|
||||
&sel->ready, si_init_shader_selector_async);
|
||||
&sel->ready, si_init_shader_selector_async,
|
||||
NULL);
|
||||
|
||||
return sel;
|
||||
}
|
||||
|
@@ -1058,7 +1058,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
|
||||
if ((flags & RADEON_FLUSH_ASYNC) &&
|
||||
util_queue_is_initialized(&ws->cs_queue)) {
|
||||
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
|
||||
amdgpu_cs_submit_ib);
|
||||
amdgpu_cs_submit_ib, NULL);
|
||||
} else {
|
||||
amdgpu_cs_submit_ib(cs, 0);
|
||||
error_code = cs->cst->error_code;
|
||||
|
@@ -587,7 +587,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
|
||||
|
||||
if (util_queue_is_initialized(&cs->ws->cs_queue)) {
|
||||
util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
|
||||
radeon_drm_cs_emit_ioctl_oneshot);
|
||||
radeon_drm_cs_emit_ioctl_oneshot, NULL);
|
||||
if (!(flags & RADEON_FLUSH_ASYNC))
|
||||
radeon_drm_cs_sync_flush(rcs);
|
||||
} else {
|
||||
|
Reference in New Issue
Block a user