util: Replace all usage of PIPE_TIMEOUT_INFINITE with OS_TIMEOUT_INFINITE
They are exactly the same, so it's safe to do the replace Also gen OS_TIMEOUT_INFINITE var with rusticl_mesa_bindings_rs by OS_ prefix and include "util/os_time.h" in rusticl/rusticl_mesa_bindings.h Reviewed-by: Jesse Natalie <jenatali@microsoft.com> Signed-off-by: Yonggang Luo <luoyonggang@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23401>
This commit is contained in:
@@ -339,7 +339,7 @@ v3dv_bo_map(struct v3dv_device *device, struct v3dv_bo *bo, uint32_t size)
|
||||
if (!ok)
|
||||
return false;
|
||||
|
||||
ok = v3dv_bo_wait(device, bo, PIPE_TIMEOUT_INFINITE);
|
||||
ok = v3dv_bo_wait(device, bo, OS_TIMEOUT_INFINITE);
|
||||
if (!ok) {
|
||||
fprintf(stderr, "memory wait for map failed\n");
|
||||
return false;
|
||||
|
@@ -3873,7 +3873,7 @@ v3dv_cmd_buffer_rewrite_indirect_csd_job(
|
||||
/* Make sure the GPU is not currently accessing the indirect CL for this
|
||||
* job, since we are about to overwrite some of the uniform data.
|
||||
*/
|
||||
v3dv_bo_wait(job->device, job->indirect.bo, PIPE_TIMEOUT_INFINITE);
|
||||
v3dv_bo_wait(job->device, job->indirect.bo, OS_TIMEOUT_INFINITE);
|
||||
|
||||
for (uint32_t i = 0; i < 3; i++) {
|
||||
if (info->wg_uniform_offsets[i]) {
|
||||
|
@@ -135,7 +135,7 @@ handle_reset_query_cpu_job(struct v3dv_queue *queue, struct v3dv_job *job,
|
||||
* we handle those in the CPU.
|
||||
*/
|
||||
if (info->pool->query_type == VK_QUERY_TYPE_OCCLUSION)
|
||||
v3dv_bo_wait(job->device, info->pool->occlusion.bo, PIPE_TIMEOUT_INFINITE);
|
||||
v3dv_bo_wait(job->device, info->pool->occlusion.bo, OS_TIMEOUT_INFINITE);
|
||||
|
||||
if (info->pool->query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
|
||||
struct vk_sync_wait waits[info->count];
|
||||
@@ -392,7 +392,7 @@ handle_csd_indirect_cpu_job(struct v3dv_queue *queue,
|
||||
|
||||
/* Make sure the GPU is no longer using the indirect buffer*/
|
||||
assert(info->buffer && info->buffer->mem && info->buffer->mem->bo);
|
||||
v3dv_bo_wait(queue->device, info->buffer->mem->bo, PIPE_TIMEOUT_INFINITE);
|
||||
v3dv_bo_wait(queue->device, info->buffer->mem->bo, OS_TIMEOUT_INFINITE);
|
||||
|
||||
/* Map the indirect buffer and read the dispatch parameters */
|
||||
assert(info->buffer && info->buffer->mem && info->buffer->mem->bo);
|
||||
|
@@ -223,7 +223,7 @@ haiku_swap_buffers(_EGLDisplay *disp, _EGLSurface *surf)
|
||||
struct pipe_fence_handle *new_fence = NULL;
|
||||
st_context_flush(st, ST_FLUSH_FRONT, &new_fence, NULL, NULL);
|
||||
if (hgl_surf->throttle_fence) {
|
||||
screen->fence_finish(screen, NULL, hgl_surf->throttle_fence, PIPE_TIMEOUT_INFINITE);
|
||||
screen->fence_finish(screen, NULL, hgl_surf->throttle_fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, &hgl_surf->throttle_fence, NULL);
|
||||
}
|
||||
hgl_surf->throttle_fence = new_fence;
|
||||
|
@@ -73,7 +73,7 @@ msm_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
|
||||
.op = op,
|
||||
};
|
||||
|
||||
get_abs_timeout(&req.timeout, PIPE_TIMEOUT_INFINITE);
|
||||
get_abs_timeout(&req.timeout, OS_TIMEOUT_INFINITE);
|
||||
|
||||
return drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_PREP, &req, sizeof(req));
|
||||
}
|
||||
|
@@ -106,7 +106,7 @@ get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
|
||||
{
|
||||
struct timespec t;
|
||||
|
||||
if (ns == PIPE_TIMEOUT_INFINITE)
|
||||
if (ns == OS_TIMEOUT_INFINITE)
|
||||
ns = 3600ULL * NSEC_PER_SEC; /* 1 hour timeout is almost infinite */
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &t);
|
||||
|
@@ -145,7 +145,7 @@ virtio_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence, uint64_t ti
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if ((timeout != PIPE_TIMEOUT_INFINITE) &&
|
||||
if ((timeout != OS_TIMEOUT_INFINITE) &&
|
||||
(os_time_get_nano() >= end_time))
|
||||
break;
|
||||
|
||||
|
@@ -338,7 +338,7 @@ util_wait_for_idle(struct pipe_context *ctx)
|
||||
struct pipe_fence_handle *fence = NULL;
|
||||
|
||||
ctx->flush(ctx, &fence, 0);
|
||||
ctx->screen->fence_finish(ctx->screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_finish(ctx->screen, NULL, fence, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -424,7 +424,7 @@ util_throttle_memory_usage(struct pipe_context *pipe,
|
||||
|
||||
/* Wait for the fence to decrease memory usage. */
|
||||
if (fence) {
|
||||
screen->fence_finish(screen, pipe, *fence, PIPE_TIMEOUT_INFINITE);
|
||||
screen->fence_finish(screen, pipe, *fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, fence, NULL);
|
||||
}
|
||||
|
||||
@@ -452,7 +452,7 @@ util_throttle_memory_usage(struct pipe_context *pipe,
|
||||
t->wait_index = (t->wait_index + 1) % ring_size;
|
||||
|
||||
assert(*fence);
|
||||
screen->fence_finish(screen, pipe, *fence, PIPE_TIMEOUT_INFINITE);
|
||||
screen->fence_finish(screen, pipe, *fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, fence, NULL);
|
||||
}
|
||||
|
||||
|
@@ -181,7 +181,7 @@ d3d12_reset_batch(struct d3d12_context *ctx, struct d3d12_batch *batch, uint64_t
|
||||
void
|
||||
d3d12_destroy_batch(struct d3d12_context *ctx, struct d3d12_batch *batch)
|
||||
{
|
||||
d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
|
||||
batch->cmdalloc->Release();
|
||||
d3d12_descriptor_heap_free(batch->sampler_heap);
|
||||
d3d12_descriptor_heap_free(batch->view_heap);
|
||||
@@ -201,7 +201,7 @@ d3d12_start_batch(struct d3d12_context *ctx, struct d3d12_batch *batch)
|
||||
ID3D12DescriptorHeap* heaps[2] = { d3d12_descriptor_heap_get(batch->view_heap),
|
||||
d3d12_descriptor_heap_get(batch->sampler_heap) };
|
||||
|
||||
d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
|
||||
|
||||
/* Create or reset global command list */
|
||||
if (ctx->cmdlist) {
|
||||
|
@@ -2002,9 +2002,9 @@ d3d12_flush_cmdlist_and_wait(struct d3d12_context *ctx)
|
||||
struct d3d12_batch *batch = d3d12_current_batch(ctx);
|
||||
|
||||
d3d12_foreach_submitted_batch(ctx, old_batch)
|
||||
d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
|
||||
d3d12_flush_cmdlist(ctx);
|
||||
d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -50,7 +50,7 @@ d3d12_fence_create_event(int *fd)
|
||||
inline bool
|
||||
d3d12_fence_wait_event(HANDLE event, int event_fd, uint64_t timeout_ns)
|
||||
{
|
||||
DWORD timeout_ms = (timeout_ns == PIPE_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? INFINITE : timeout_ns / NsPerMs;
|
||||
DWORD timeout_ms = (timeout_ns == OS_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? INFINITE : timeout_ns / NsPerMs;
|
||||
return WaitForSingleObject(event, timeout_ms) == WAIT_OBJECT_0;
|
||||
}
|
||||
#else
|
||||
@@ -75,7 +75,7 @@ d3d12_fence_create_event(int *fd)
|
||||
inline bool
|
||||
d3d12_fence_wait_event(HANDLE event, int event_fd, uint64_t timeout_ns)
|
||||
{
|
||||
int timeout_ms = (timeout_ns == PIPE_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? -1 : timeout_ns / NsPerMs;
|
||||
int timeout_ms = (timeout_ns == OS_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? -1 : timeout_ns / NsPerMs;
|
||||
return sync_wait(event_fd, timeout_ms) == 0;
|
||||
}
|
||||
#endif
|
||||
|
@@ -363,7 +363,7 @@ begin_subquery(struct d3d12_context *ctx, struct d3d12_query *q_parent, unsigned
|
||||
query_ensure_ready(d3d12_screen(ctx->base.screen), ctx, q_parent, false);
|
||||
d3d12_foreach_submitted_batch(ctx, old_batch) {
|
||||
if (old_batch->fence && old_batch->fence->value <= q_parent->fence_value)
|
||||
d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
/* Accumulate current results and store in first slot */
|
||||
@@ -410,7 +410,7 @@ begin_timer_query(struct d3d12_context *ctx, struct d3d12_query *q_parent, bool
|
||||
query_ensure_ready(d3d12_screen(ctx->base.screen), ctx, q_parent, false);
|
||||
d3d12_foreach_submitted_batch(ctx, old_batch) {
|
||||
if (old_batch->fence && old_batch->fence->value <= q_parent->fence_value)
|
||||
d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
accumulate_subresult(ctx, q_parent, 0, &result, true);
|
||||
@@ -594,7 +594,7 @@ d3d12_render_condition(struct pipe_context *pctx,
|
||||
query_ensure_ready(d3d12_screen(ctx->base.screen), ctx, query, false);
|
||||
d3d12_foreach_submitted_batch(ctx, old_batch) {
|
||||
if (old_batch->fence && old_batch->fence->value <= query->fence_value)
|
||||
d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
union pipe_query_result result;
|
||||
|
@@ -120,7 +120,7 @@ d3d12_resource_wait_idle(struct d3d12_context *ctx,
|
||||
} else {
|
||||
d3d12_foreach_submitted_batch(ctx, batch) {
|
||||
if (d3d12_batch_has_references(batch, res->bo, want_to_write))
|
||||
d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -420,7 +420,7 @@ d3d12_video_decoder_end_frame(struct pipe_video_codec *codec,
|
||||
assert(pUploadGPUCompletionFence);
|
||||
debug_printf("[d3d12_video_decoder] d3d12_video_decoder_end_frame - Waiting on GPU completion fence for "
|
||||
"buffer_subdata to upload compressed bitstream.\n");
|
||||
pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, pUploadGPUCompletionFence, PIPE_TIMEOUT_INFINITE);
|
||||
pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, pUploadGPUCompletionFence, OS_TIMEOUT_INFINITE);
|
||||
pD3D12Screen->base.fence_reference(&pD3D12Screen->base, &pUploadGPUCompletionFence, NULL);
|
||||
pipe_resource_reference(&pPipeCompressedBufferObj, NULL);
|
||||
|
||||
@@ -661,7 +661,7 @@ d3d12_video_decoder_end_frame(struct pipe_video_codec *codec,
|
||||
assert(completion_fence);
|
||||
debug_printf("[d3d12_video_decoder] d3d12_video_decoder_end_frame - Waiting on GPU completion fence for "
|
||||
"resource_copy_region on decoded frame.\n");
|
||||
pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, completion_fence, PIPE_TIMEOUT_INFINITE);
|
||||
pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, completion_fence, OS_TIMEOUT_INFINITE);
|
||||
pD3D12Screen->base.fence_reference(&pD3D12Screen->base, &completion_fence, NULL);
|
||||
pipe_resource_reference(&pPipeSrc, NULL);
|
||||
}
|
||||
|
@@ -232,7 +232,7 @@ d3d12_video_encoder_destroy(struct pipe_video_codec *codec)
|
||||
if(pD3D12Enc->m_bPendingWorkNotFlushed){
|
||||
uint64_t curBatchFence = pD3D12Enc->m_fenceValue;
|
||||
d3d12_video_encoder_flush(codec);
|
||||
d3d12_video_encoder_sync_completion(codec, curBatchFence, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_video_encoder_sync_completion(codec, curBatchFence, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
// Call d3d12_video_encoder dtor to make ComPtr and other member's destructors work
|
||||
@@ -1263,7 +1263,7 @@ d3d12_video_encoder_begin_frame(struct pipe_video_codec * codec,
|
||||
debug_printf("[d3d12_video_encoder] d3d12_video_encoder_begin_frame Waiting for completion of in flight resource sets with previous work with fenceValue: %" PRIu64 "\n",
|
||||
fenceValueToWaitOn);
|
||||
|
||||
d3d12_video_encoder_ensure_fence_finished(codec, fenceValueToWaitOn, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_video_encoder_ensure_fence_finished(codec, fenceValueToWaitOn, OS_TIMEOUT_INFINITE);
|
||||
|
||||
if (!d3d12_video_encoder_reconfigure_session(pD3D12Enc, target, picture)) {
|
||||
debug_printf("[d3d12_video_encoder] d3d12_video_encoder_begin_frame - Failure on "
|
||||
@@ -1686,7 +1686,7 @@ d3d12_video_encoder_get_feedback(struct pipe_video_codec *codec, void *feedback,
|
||||
assert(pD3D12Enc);
|
||||
|
||||
uint64_t requested_metadata_fence = ((uint64_t) feedback);
|
||||
d3d12_video_encoder_sync_completion(codec, requested_metadata_fence, PIPE_TIMEOUT_INFINITE);
|
||||
d3d12_video_encoder_sync_completion(codec, requested_metadata_fence, OS_TIMEOUT_INFINITE);
|
||||
|
||||
uint64_t current_metadata_slot = (requested_metadata_fence % D3D12_VIDEO_ENC_METADATA_BUFFERS_COUNT);
|
||||
|
||||
|
@@ -55,7 +55,7 @@ fence_flush(struct pipe_context *pctx, struct pipe_fence_handle *fence,
|
||||
if (!timeout)
|
||||
return false;
|
||||
|
||||
if (timeout == PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout == OS_TIMEOUT_INFINITE) {
|
||||
util_queue_fence_wait(&fence->ready);
|
||||
} else {
|
||||
int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
|
||||
@@ -267,7 +267,7 @@ fd_pipe_fence_get_fd(struct pipe_screen *pscreen, struct pipe_fence_handle *fenc
|
||||
* but if TC is not used, this will be null. Which is fine, we won't call
|
||||
* threaded_context_flush() in that case
|
||||
*/
|
||||
fence_flush(&fence->ctx->tc->base, fence, PIPE_TIMEOUT_INFINITE);
|
||||
fence_flush(&fence->ctx->tc->base, fence, OS_TIMEOUT_INFINITE);
|
||||
assert(fence->fence);
|
||||
return os_dupfd_cloexec(fence->fence->fence_fd);
|
||||
}
|
||||
|
@@ -643,7 +643,7 @@ iris_get_query_result(struct pipe_context *ctx,
|
||||
struct pipe_screen *screen = ctx->screen;
|
||||
|
||||
result->b = screen->fence_finish(screen, ctx, q->fence,
|
||||
wait ? PIPE_TIMEOUT_INFINITE : 0);
|
||||
wait ? OS_TIMEOUT_INFINITE : 0);
|
||||
return result->b;
|
||||
}
|
||||
|
||||
|
@@ -908,7 +908,7 @@ lima_do_job(struct lima_job *job)
|
||||
fprintf(stderr, "gp job error\n");
|
||||
|
||||
if (job->dump) {
|
||||
if (lima_job_wait(job, LIMA_PIPE_GP, PIPE_TIMEOUT_INFINITE)) {
|
||||
if (lima_job_wait(job, LIMA_PIPE_GP, OS_TIMEOUT_INFINITE)) {
|
||||
if (ctx->gp_output) {
|
||||
float *pos = lima_bo_map(ctx->gp_output);
|
||||
lima_dump_command_stream_print(
|
||||
@@ -989,7 +989,7 @@ lima_do_job(struct lima_job *job)
|
||||
}
|
||||
|
||||
if (job->dump) {
|
||||
if (!lima_job_wait(job, LIMA_PIPE_PP, PIPE_TIMEOUT_INFINITE)) {
|
||||
if (!lima_job_wait(job, LIMA_PIPE_PP, OS_TIMEOUT_INFINITE)) {
|
||||
fprintf(stderr, "pp wait error\n");
|
||||
exit(1);
|
||||
}
|
||||
|
@@ -652,7 +652,7 @@ lima_transfer_map(struct pipe_context *pctx,
|
||||
|
||||
unsigned op = usage & PIPE_MAP_WRITE ?
|
||||
LIMA_GEM_WAIT_WRITE : LIMA_GEM_WAIT_READ;
|
||||
lima_bo_wait(bo, op, PIPE_TIMEOUT_INFINITE);
|
||||
lima_bo_wait(bo, op, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
if (!lima_bo_map(bo))
|
||||
@@ -917,7 +917,7 @@ lima_texture_subdata(struct pipe_context *pctx,
|
||||
};
|
||||
|
||||
lima_flush_job_accessing_bo(ctx, res->bo, true);
|
||||
lima_bo_wait(res->bo, LIMA_GEM_WAIT_WRITE, PIPE_TIMEOUT_INFINITE);
|
||||
lima_bo_wait(res->bo, LIMA_GEM_WAIT_WRITE, OS_TIMEOUT_INFINITE);
|
||||
if (!lima_bo_map(res->bo))
|
||||
return;
|
||||
|
||||
|
@@ -45,7 +45,7 @@ bool lima_get_absolute_timeout(uint64_t *timeout)
|
||||
struct timespec current;
|
||||
uint64_t current_ns;
|
||||
|
||||
if (*timeout == PIPE_TIMEOUT_INFINITE)
|
||||
if (*timeout == OS_TIMEOUT_INFINITE)
|
||||
return true;
|
||||
|
||||
if (clock_gettime(CLOCK_MONOTONIC, ¤t))
|
||||
|
@@ -95,7 +95,7 @@ llvmpipe_finish(struct pipe_context *pipe,
|
||||
llvmpipe_flush(pipe, &fence, reason);
|
||||
if (fence) {
|
||||
pipe->screen->fence_finish(pipe->screen, NULL, fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
pipe->screen->fence_reference(pipe->screen, &fence, NULL);
|
||||
}
|
||||
}
|
||||
|
@@ -909,7 +909,7 @@ llvmpipe_fence_finish(struct pipe_screen *screen,
|
||||
return lp_fence_signalled(f);
|
||||
|
||||
if (!lp_fence_signalled(f)) {
|
||||
if (timeout != PIPE_TIMEOUT_INFINITE)
|
||||
if (timeout != OS_TIMEOUT_INFINITE)
|
||||
return lp_fence_timedwait(f, timeout);
|
||||
|
||||
lp_fence_wait(f);
|
||||
|
@@ -149,7 +149,7 @@ static bool r300_get_query_result(struct pipe_context* pipe,
|
||||
|
||||
if (q->type == PIPE_QUERY_GPU_FINISHED) {
|
||||
if (wait) {
|
||||
r300->rws->buffer_wait(r300->rws, q->buf, PIPE_TIMEOUT_INFINITE,
|
||||
r300->rws->buffer_wait(r300->rws, q->buf, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_READWRITE);
|
||||
vresult->b = TRUE;
|
||||
} else {
|
||||
|
@@ -1094,7 +1094,7 @@ static bool r600_fence_finish(struct pipe_screen *screen,
|
||||
return false;
|
||||
|
||||
/* Recompute the timeout after waiting. */
|
||||
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout && timeout != OS_TIMEOUT_INFINITE) {
|
||||
int64_t time = os_time_get_nano();
|
||||
timeout = abs_timeout > time ? abs_timeout - time : 0;
|
||||
}
|
||||
@@ -1114,7 +1114,7 @@ static bool r600_fence_finish(struct pipe_screen *screen,
|
||||
return false;
|
||||
|
||||
/* Recompute the timeout after all that. */
|
||||
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout && timeout != OS_TIMEOUT_INFINITE) {
|
||||
int64_t time = os_time_get_nano();
|
||||
timeout = abs_timeout > time ? abs_timeout - time : 0;
|
||||
}
|
||||
|
@@ -410,7 +410,7 @@ static bool r600_query_sw_get_result(struct r600_common_context *rctx,
|
||||
struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
|
||||
|
||||
result->b = screen->fence_finish(screen, ctx, query->fence,
|
||||
wait ? PIPE_TIMEOUT_INFINITE : 0);
|
||||
wait ? OS_TIMEOUT_INFINITE : 0);
|
||||
return result->b;
|
||||
}
|
||||
|
||||
|
@@ -287,14 +287,14 @@ static bool si_fence_finish(struct pipe_screen *screen, struct pipe_context *ctx
|
||||
if (!timeout)
|
||||
return false;
|
||||
|
||||
if (timeout == PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout == OS_TIMEOUT_INFINITE) {
|
||||
util_queue_fence_wait(&sfence->ready);
|
||||
} else {
|
||||
if (!util_queue_fence_wait_timeout(&sfence->ready, abs_timeout))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout && timeout != OS_TIMEOUT_INFINITE) {
|
||||
int64_t time = os_time_get_nano();
|
||||
timeout = abs_timeout > time ? abs_timeout - time : 0;
|
||||
}
|
||||
@@ -342,7 +342,7 @@ static bool si_fence_finish(struct pipe_screen *screen, struct pipe_context *ctx
|
||||
return false;
|
||||
|
||||
/* Recompute the timeout after all that. */
|
||||
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout && timeout != OS_TIMEOUT_INFINITE) {
|
||||
int64_t time = os_time_get_nano();
|
||||
timeout = abs_timeout > time ? abs_timeout - time : 0;
|
||||
}
|
||||
|
@@ -430,7 +430,7 @@ static bool si_query_sw_get_result(struct si_context *sctx, struct si_query *squ
|
||||
struct pipe_screen *screen = sctx->b.screen;
|
||||
struct pipe_context *ctx = squery->b.flushed ? NULL : &sctx->b;
|
||||
|
||||
result->b = screen->fence_finish(screen, ctx, query->fence, wait ? PIPE_TIMEOUT_INFINITE : 0);
|
||||
result->b = screen->fence_finish(screen, ctx, query->fence, wait ? OS_TIMEOUT_INFINITE : 0);
|
||||
return result->b;
|
||||
}
|
||||
|
||||
|
@@ -729,7 +729,7 @@ void si_handle_sqtt(struct si_context *sctx, struct radeon_cmdbuf *rcs) {
|
||||
if (frame_trigger || file_trigger) {
|
||||
/* Wait for last submission */
|
||||
sctx->ws->fence_wait(sctx->ws, sctx->last_gfx_fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
|
||||
/* Start SQTT */
|
||||
si_begin_sqtt(sctx, rcs);
|
||||
@@ -753,7 +753,7 @@ void si_handle_sqtt(struct si_context *sctx, struct radeon_cmdbuf *rcs) {
|
||||
|
||||
/* Wait for SQTT to finish and read back the bo */
|
||||
if (sctx->ws->fence_wait(sctx->ws, sctx->last_sqtt_fence,
|
||||
PIPE_TIMEOUT_INFINITE) &&
|
||||
OS_TIMEOUT_INFINITE) &&
|
||||
si_get_sqtt_trace(sctx, &sqtt_trace)) {
|
||||
struct ac_spm_trace spm_trace;
|
||||
|
||||
|
@@ -154,7 +154,7 @@ softpipe_flush_resource(struct pipe_context *pipe,
|
||||
* have fences.
|
||||
*/
|
||||
pipe->screen->fence_finish(pipe->screen, NULL, fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
pipe->screen->fence_reference(pipe->screen, &fence, NULL);
|
||||
}
|
||||
} else {
|
||||
|
@@ -476,7 +476,7 @@ svga_context_flush(struct svga_context *svga,
|
||||
if (SVGA_DEBUG & DEBUG_SYNC) {
|
||||
if (fence)
|
||||
svga->pipe.screen->fence_finish(svga->pipe.screen, NULL, fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
|
||||
if (pfence)
|
||||
@@ -500,7 +500,7 @@ svga_context_finish(struct svga_context *svga)
|
||||
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CONTEXTFINISH);
|
||||
|
||||
svga_context_flush(svga, &fence);
|
||||
screen->fence_finish(screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
|
||||
screen->fence_finish(screen, NULL, fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, &fence, NULL);
|
||||
|
||||
SVGA_STATS_TIME_POP(svga_sws(svga));
|
||||
|
@@ -174,7 +174,7 @@ get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
|
||||
if (state == SVGA3D_QUERYSTATE_PENDING) {
|
||||
if (!wait)
|
||||
return false;
|
||||
sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
|
||||
sws->fence_finish(sws, sq->fence, OS_TIMEOUT_INFINITE,
|
||||
SVGA_FENCE_FLAG_QUERY);
|
||||
state = sq->queryResult->state;
|
||||
}
|
||||
@@ -607,7 +607,7 @@ get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
|
||||
queryState == SVGA3D_QUERYSTATE_NEW) {
|
||||
if (!wait)
|
||||
return false;
|
||||
sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
|
||||
sws->fence_finish(sws, sq->fence, OS_TIMEOUT_INFINITE,
|
||||
SVGA_FENCE_FLAG_QUERY);
|
||||
sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
|
||||
}
|
||||
@@ -1257,7 +1257,7 @@ svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
|
||||
|
||||
if ((mode == PIPE_RENDER_COND_WAIT ||
|
||||
mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
|
||||
sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
|
||||
sws->fence_finish(sws, sq->fence, OS_TIMEOUT_INFINITE,
|
||||
SVGA_FENCE_FLAG_QUERY);
|
||||
}
|
||||
}
|
||||
|
@@ -122,7 +122,7 @@ svga_transfer_dma(struct svga_context *svga,
|
||||
|
||||
if (transfer == SVGA3D_READ_HOST_VRAM) {
|
||||
svga_context_flush(svga, &fence);
|
||||
sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
|
||||
sws->fence_finish(sws, fence, OS_TIMEOUT_INFINITE, 0);
|
||||
sws->fence_reference(sws, &fence, NULL);
|
||||
}
|
||||
}
|
||||
@@ -181,7 +181,7 @@ svga_transfer_dma(struct svga_context *svga,
|
||||
|
||||
if (transfer == SVGA3D_READ_HOST_VRAM) {
|
||||
svga_context_flush(svga, &fence);
|
||||
sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
|
||||
sws->fence_finish(sws, fence, OS_TIMEOUT_INFINITE, 0);
|
||||
|
||||
hw = sws->buffer_map(sws, st->hwbuf, PIPE_MAP_READ);
|
||||
assert(hw);
|
||||
|
@@ -678,7 +678,7 @@ struct svga_winsys_screen
|
||||
|
||||
/**
|
||||
* Wait for the fence to finish.
|
||||
* \param timeout in nanoseconds (may be PIPE_TIMEOUT_INFINITE).
|
||||
* \param timeout in nanoseconds (may be OS_TIMEOUT_INFINITE).
|
||||
* 0 to return immediately, if the API suports it.
|
||||
* \param flags driver-specific meaning
|
||||
* \return zero on success.
|
||||
|
@@ -537,7 +537,7 @@ v3d_bo_map(struct v3d_bo *bo)
|
||||
{
|
||||
void *map = v3d_bo_map_unsynchronized(bo);
|
||||
|
||||
bool ok = v3d_bo_wait(bo, PIPE_TIMEOUT_INFINITE, "bo map");
|
||||
bool ok = v3d_bo_wait(bo, OS_TIMEOUT_INFINITE, "bo map");
|
||||
if (!ok) {
|
||||
fprintf(stderr, "BO wait for map failed\n");
|
||||
abort();
|
||||
|
@@ -464,7 +464,7 @@ v3d_read_and_accumulate_primitive_counters(struct v3d_context *v3d)
|
||||
|
||||
perf_debug("stalling on TF counts readback\n");
|
||||
struct v3d_resource *rsc = v3d_resource(v3d->prim_counts);
|
||||
if (v3d_bo_wait(rsc->bo, PIPE_TIMEOUT_INFINITE, "prim-counts")) {
|
||||
if (v3d_bo_wait(rsc->bo, OS_TIMEOUT_INFINITE, "prim-counts")) {
|
||||
uint32_t *map = v3d_bo_map(rsc->bo) + v3d->prim_counts_offset;
|
||||
v3d->tf_prims_generated += map[V3D_PRIM_COUNTS_TF_WRITTEN];
|
||||
/* When we only have a vertex shader with no primitive
|
||||
|
@@ -196,7 +196,7 @@ v3d_get_query_result_perfcnt(struct v3d_context *v3d, struct v3d_query *query,
|
||||
if (pquery->perfmon->job_submitted) {
|
||||
if (!v3d_fence_wait(v3d->screen,
|
||||
pquery->perfmon->last_job_fence,
|
||||
wait ? PIPE_TIMEOUT_INFINITE : 0))
|
||||
wait ? OS_TIMEOUT_INFINITE : 0))
|
||||
return false;
|
||||
|
||||
req.id = pquery->perfmon->kperfmon_id;
|
||||
|
@@ -657,7 +657,7 @@ vc4_bo_map(struct vc4_bo *bo)
|
||||
{
|
||||
void *map = vc4_bo_map_unsynchronized(bo);
|
||||
|
||||
bool ok = vc4_bo_wait(bo, PIPE_TIMEOUT_INFINITE, "bo map");
|
||||
bool ok = vc4_bo_wait(bo, OS_TIMEOUT_INFINITE, "bo map");
|
||||
if (!ok) {
|
||||
fprintf(stderr, "BO wait for map failed\n");
|
||||
abort();
|
||||
|
@@ -505,7 +505,7 @@ vc4_job_submit(struct vc4_context *vc4, struct vc4_job *job)
|
||||
if (vc4->last_emit_seqno - vc4->screen->finished_seqno > 5) {
|
||||
if (!vc4_wait_seqno(vc4->screen,
|
||||
vc4->last_emit_seqno - 5,
|
||||
PIPE_TIMEOUT_INFINITE,
|
||||
OS_TIMEOUT_INFINITE,
|
||||
"job throttling")) {
|
||||
fprintf(stderr, "Job throttling failed\n");
|
||||
}
|
||||
@@ -513,7 +513,7 @@ vc4_job_submit(struct vc4_context *vc4, struct vc4_job *job)
|
||||
|
||||
if (VC4_DBG(ALWAYS_SYNC)) {
|
||||
if (!vc4_wait_seqno(vc4->screen, vc4->last_emit_seqno,
|
||||
PIPE_TIMEOUT_INFINITE, "sync")) {
|
||||
OS_TIMEOUT_INFINITE, "sync")) {
|
||||
fprintf(stderr, "Wait failed.\n");
|
||||
abort();
|
||||
}
|
||||
|
@@ -262,7 +262,7 @@ vc4_get_query_result(struct pipe_context *pctx, struct pipe_query *pquery,
|
||||
}
|
||||
|
||||
if (!vc4_wait_seqno(ctx->screen, query->hwperfmon->last_seqno,
|
||||
wait ? PIPE_TIMEOUT_INFINITE : 0, "perfmon"))
|
||||
wait ? OS_TIMEOUT_INFINITE : 0, "perfmon"))
|
||||
return false;
|
||||
|
||||
req.id = query->hwperfmon->id;
|
||||
|
@@ -1010,7 +1010,7 @@ static void virgl_submit_cmd(struct virgl_winsys *vws,
|
||||
|
||||
vws->submit_cmd(vws, cbuf, &sync_fence);
|
||||
|
||||
vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
|
||||
vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
|
||||
vws->fence_reference(vws, &sync_fence, NULL);
|
||||
} else {
|
||||
vws->submit_cmd(vws, cbuf, fence);
|
||||
@@ -1631,7 +1631,7 @@ static void virgl_link_shader(struct pipe_context *ctx, void **handles)
|
||||
struct virgl_winsys *vws = rs->vws;
|
||||
struct pipe_fence_handle *sync_fence;
|
||||
virgl_flush_eq(vctx, vctx, &sync_fence);
|
||||
vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
|
||||
vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
|
||||
vws->fence_reference(vws, &sync_fence, NULL);
|
||||
}
|
||||
}
|
||||
|
@@ -747,7 +747,7 @@ static void virgl_video_flush(struct pipe_video_codec *codec)
|
||||
|
||||
ctx->flush(ctx, &fence, 0);
|
||||
if (fence) {
|
||||
ctx->screen->fence_finish(ctx->screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_finish(ctx->screen, NULL, fence, OS_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
|
||||
}
|
||||
}
|
||||
|
@@ -514,7 +514,7 @@ post_submit(void *data, void *gdata, int thread_index)
|
||||
screen->device_lost = true;
|
||||
} else if (bs->ctx->batch_states_count > 5000) {
|
||||
/* throttle in case something crazy is happening */
|
||||
zink_screen_timeline_wait(screen, bs->fence.batch_id - 2500, PIPE_TIMEOUT_INFINITE);
|
||||
zink_screen_timeline_wait(screen, bs->fence.batch_id - 2500, OS_TIMEOUT_INFINITE);
|
||||
}
|
||||
/* this resets the buffer hashlist for the state's next use */
|
||||
memset(&bs->buffer_indices_hashlist, -1, sizeof(bs->buffer_indices_hashlist));
|
||||
|
@@ -3152,7 +3152,7 @@ stall(struct zink_context *ctx)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
sync_flush(ctx, zink_batch_state(ctx->last_fence));
|
||||
zink_screen_timeline_wait(screen, ctx->last_fence->batch_id, PIPE_TIMEOUT_INFINITE);
|
||||
zink_screen_timeline_wait(screen, ctx->last_fence->batch_id, OS_TIMEOUT_INFINITE);
|
||||
zink_batch_reset_all(ctx);
|
||||
}
|
||||
|
||||
|
@@ -111,13 +111,13 @@ tc_fence_finish(struct zink_context *ctx, struct zink_tc_fence *mfence, uint64_t
|
||||
/* this is a tc mfence, so we're just waiting on the queue mfence to complete
|
||||
* after being signaled by the real mfence
|
||||
*/
|
||||
if (*timeout_ns == PIPE_TIMEOUT_INFINITE) {
|
||||
if (*timeout_ns == OS_TIMEOUT_INFINITE) {
|
||||
util_queue_fence_wait(&mfence->ready);
|
||||
} else {
|
||||
if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))
|
||||
return false;
|
||||
}
|
||||
if (*timeout_ns && *timeout_ns != PIPE_TIMEOUT_INFINITE) {
|
||||
if (*timeout_ns && *timeout_ns != OS_TIMEOUT_INFINITE) {
|
||||
int64_t time_ns = os_time_get_nano();
|
||||
*timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
|
||||
}
|
||||
|
@@ -1102,7 +1102,7 @@ zink_get_query_result(struct pipe_context *pctx,
|
||||
struct pipe_screen *screen = pctx->screen;
|
||||
|
||||
result->b = screen->fence_finish(screen, query->base.flushed ? NULL : pctx,
|
||||
query->fence, wait ? PIPE_TIMEOUT_INFINITE : 0);
|
||||
query->fence, wait ? OS_TIMEOUT_INFINITE : 0);
|
||||
return result->b;
|
||||
}
|
||||
|
||||
|
@@ -180,7 +180,7 @@ hard_event::wait() const {
|
||||
queue()->flush();
|
||||
|
||||
if (!_fence ||
|
||||
!screen->fence_finish(screen, NULL, _fence, PIPE_TIMEOUT_INFINITE))
|
||||
!screen->fence_finish(screen, NULL, _fence, OS_TIMEOUT_INFINITE))
|
||||
throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
|
||||
}
|
||||
|
||||
|
@@ -1826,7 +1826,7 @@ dri2_blit_image(__DRIcontext *context, __DRIimage *dst, __DRIimage *src,
|
||||
screen = ctx->screen->base.screen;
|
||||
pipe->flush_resource(pipe, dst->texture);
|
||||
st_context_flush(ctx->st, 0, &fence, NULL, NULL);
|
||||
(void) screen->fence_finish(screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
|
||||
(void) screen->fence_finish(screen, NULL, fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, &fence, NULL);
|
||||
}
|
||||
}
|
||||
|
@@ -538,7 +538,7 @@ dri_flush(__DRIcontext *cPriv,
|
||||
|
||||
/* throttle on the previous fence */
|
||||
if (drawable->throttle_fence) {
|
||||
screen->fence_finish(screen, NULL, drawable->throttle_fence, PIPE_TIMEOUT_INFINITE);
|
||||
screen->fence_finish(screen, NULL, drawable->throttle_fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, &drawable->throttle_fence, NULL);
|
||||
}
|
||||
drawable->throttle_fence = new_fence;
|
||||
|
@@ -253,7 +253,7 @@ drisw_swap_buffers(struct dri_drawable *drawable)
|
||||
}
|
||||
|
||||
screen->base.screen->fence_finish(screen->base.screen, ctx->st->pipe,
|
||||
fence, PIPE_TIMEOUT_INFINITE);
|
||||
fence, OS_TIMEOUT_INFINITE);
|
||||
screen->base.screen->fence_reference(screen->base.screen, &fence, NULL);
|
||||
drisw_copy_to_front(ctx->st->pipe, drawable, ptex);
|
||||
|
||||
@@ -288,7 +288,7 @@ drisw_copy_sub_buffer(struct dri_drawable *drawable, int x, int y,
|
||||
st_context_flush(ctx->st, ST_FLUSH_FRONT, &fence, NULL, NULL);
|
||||
|
||||
screen->base.screen->fence_finish(screen->base.screen, ctx->st->pipe,
|
||||
fence, PIPE_TIMEOUT_INFINITE);
|
||||
fence, OS_TIMEOUT_INFINITE);
|
||||
screen->base.screen->fence_reference(screen->base.screen, &fence, NULL);
|
||||
|
||||
if (drawable->stvis.samples > 1) {
|
||||
|
@@ -713,7 +713,7 @@ kopper_flush_frontbuffer(struct dri_context *ctx,
|
||||
}
|
||||
/* throttle on the previous fence */
|
||||
if (drawable->throttle_fence) {
|
||||
screen->fence_finish(screen, NULL, drawable->throttle_fence, PIPE_TIMEOUT_INFINITE);
|
||||
screen->fence_finish(screen, NULL, drawable->throttle_fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, &drawable->throttle_fence, NULL);
|
||||
}
|
||||
drawable->throttle_fence = new_fence;
|
||||
|
@@ -1351,7 +1351,7 @@ void XMesaSwapBuffers( XMesaBuffer b )
|
||||
XMesaDisplay xmdpy = xmesa_init_display(b->xm_visual->display);
|
||||
struct pipe_screen *screen = xmdpy->screen;
|
||||
xmdpy->screen->fence_finish(screen, NULL, fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
xmdpy->screen->fence_reference(screen, &fence, NULL);
|
||||
}
|
||||
}
|
||||
@@ -1389,7 +1389,7 @@ void XMesaFlush( XMesaContext c )
|
||||
st_context_flush(c->st, ST_FLUSH_FRONT, &fence, NULL, NULL);
|
||||
if (fence) {
|
||||
xmdpy->screen->fence_finish(xmdpy->screen, NULL, fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
xmdpy->screen->fence_reference(xmdpy->screen, &fence, NULL);
|
||||
}
|
||||
XFlush( c->xm_visual->display );
|
||||
|
@@ -224,7 +224,7 @@ static void finish_fence(struct rendering_state *state)
|
||||
|
||||
state->pctx->screen->fence_finish(state->pctx->screen,
|
||||
NULL,
|
||||
handle, PIPE_TIMEOUT_INFINITE);
|
||||
handle, OS_TIMEOUT_INFINITE);
|
||||
state->pctx->screen->fence_reference(state->pctx->screen,
|
||||
&handle, NULL);
|
||||
}
|
||||
|
@@ -449,7 +449,7 @@ NineBuffer9_Lock( struct NineBuffer9 *This,
|
||||
|
||||
pipe = NineDevice9_GetPipe(device);
|
||||
pipe->flush(pipe, &fence, 0);
|
||||
(void) screen->fence_finish(screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
|
||||
(void) screen->fence_finish(screen, NULL, fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, &fence, NULL);
|
||||
}
|
||||
This->need_sync_if_nooverwrite = !(Flags & (D3DLOCK_DISCARD | D3DLOCK_NOOVERWRITE));
|
||||
|
@@ -687,7 +687,7 @@ static void work_present(void *data)
|
||||
{
|
||||
struct end_present_struct *work = data;
|
||||
if (work->fence_to_wait) {
|
||||
(void) work->screen->fence_finish(work->screen, NULL, work->fence_to_wait, PIPE_TIMEOUT_INFINITE);
|
||||
(void) work->screen->fence_finish(work->screen, NULL, work->fence_to_wait, OS_TIMEOUT_INFINITE);
|
||||
work->screen->fence_reference(work->screen, &(work->fence_to_wait), NULL);
|
||||
}
|
||||
ID3DPresent_PresentBuffer(work->present, work->present_handle, work->hDestWindowOverride, NULL, NULL, NULL, 0);
|
||||
@@ -921,7 +921,7 @@ bypass_rendering:
|
||||
/* Throttle rendering if needed */
|
||||
fence = swap_fences_pop_front(This);
|
||||
if (fence) {
|
||||
(void) This->screen->fence_finish(This->screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
|
||||
(void) This->screen->fence_finish(This->screen, NULL, fence, OS_TIMEOUT_INFINITE);
|
||||
This->screen->fence_reference(This->screen, &fence, NULL);
|
||||
}
|
||||
|
||||
|
@@ -350,7 +350,7 @@ impl PipeScreen {
|
||||
pub(super) fn fence_finish(&self, fence: *mut pipe_fence_handle) {
|
||||
unsafe {
|
||||
let s = &mut *self.screen;
|
||||
s.fence_finish.unwrap()(s, ptr::null_mut(), fence, PIPE_TIMEOUT_INFINITE as u64);
|
||||
s.fence_finish.unwrap()(s, ptr::null_mut(), fence, OS_TIMEOUT_INFINITE as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -249,6 +249,7 @@ rusticl_mesa_bindings_rs = rust.bindgen(
|
||||
'--bitfield-enum', 'nir_opt_if_options',
|
||||
'--bitfield-enum', 'nir_variable_mode',
|
||||
'--allowlist-type', 'float_controls',
|
||||
'--allowlist-var', 'OS_.*',
|
||||
'--allowlist-var', 'PIPE_.*',
|
||||
'--bitfield-enum', 'pipe_map_flags',
|
||||
'--allowlist-function', 'std(err|out)_ptr',
|
||||
|
@@ -16,6 +16,7 @@
|
||||
|
||||
#include "util/blob.h"
|
||||
#include "util/disk_cache.h"
|
||||
#include "util/os_time.h"
|
||||
#include "util/u_printf.h"
|
||||
#include "util/u_sampler.h"
|
||||
|
||||
|
@@ -324,7 +324,7 @@ vlVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_qu
|
||||
mtx_lock(&pq->device->mutex);
|
||||
if (surf->fence) {
|
||||
screen = pq->device->vscreen->pscreen;
|
||||
screen->fence_finish(screen, NULL, surf->fence, PIPE_TIMEOUT_INFINITE);
|
||||
screen->fence_finish(screen, NULL, surf->fence, OS_TIMEOUT_INFINITE);
|
||||
screen->fence_reference(screen, &surf->fence, NULL);
|
||||
}
|
||||
mtx_unlock(&pq->device->mutex);
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include "p_compiler.h"
|
||||
|
||||
#include "compiler/shader_enums.h"
|
||||
#include "util/os_time.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -655,9 +656,6 @@ enum pipe_viewport_swizzle {
|
||||
PIPE_VIEWPORT_SWIZZLE_NEGATIVE_W,
|
||||
};
|
||||
|
||||
#define PIPE_TIMEOUT_INFINITE 0xffffffffffffffffull
|
||||
|
||||
|
||||
/**
|
||||
* Device reset status.
|
||||
*/
|
||||
|
@@ -408,7 +408,7 @@ struct pipe_screen {
|
||||
*
|
||||
* In all other cases, the ctx parameter has no effect.
|
||||
*
|
||||
* \param timeout in nanoseconds (may be PIPE_TIMEOUT_INFINITE).
|
||||
* \param timeout in nanoseconds (may be OS_TIMEOUT_INFINITE).
|
||||
*/
|
||||
bool (*fence_finish)(struct pipe_screen *screen,
|
||||
struct pipe_context *ctx,
|
||||
|
@@ -367,7 +367,7 @@ struct radeon_winsys {
|
||||
* by the device.
|
||||
*
|
||||
* The timeout of 0 will only return the status.
|
||||
* The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
|
||||
* The timeout of OS_TIMEOUT_INFINITE will always wait until the buffer
|
||||
* is idle.
|
||||
*/
|
||||
bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer *buf,
|
||||
@@ -677,7 +677,7 @@ struct radeon_winsys {
|
||||
/**
|
||||
* Wait for the fence and return true if the fence has been signalled.
|
||||
* The timeout of 0 will only return the status.
|
||||
* The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
|
||||
* The timeout of OS_TIMEOUT_INFINITE will always wait until the fence
|
||||
* is signalled.
|
||||
*/
|
||||
bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
|
||||
|
@@ -324,7 +324,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_WRITE);
|
||||
} else {
|
||||
/* Mapping for write. */
|
||||
@@ -339,7 +339,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
|
@@ -74,7 +74,7 @@ d3d12_wgl_framebuffer_destroy(struct stw_winsys_framebuffer *fb,
|
||||
/* Ensure all resources are flushed */
|
||||
ctx->flush(ctx, &fence, PIPE_FLUSH_HINT_FINISH);
|
||||
if (fence) {
|
||||
ctx->screen->fence_finish(ctx->screen, ctx, fence, PIPE_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_finish(ctx->screen, ctx, fence, OS_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
|
||||
}
|
||||
}
|
||||
@@ -133,7 +133,7 @@ d3d12_wgl_framebuffer_resize(stw_winsys_framebuffer *fb,
|
||||
/* Ensure all resources are flushed */
|
||||
ctx->flush(ctx, &fence, PIPE_FLUSH_HINT_FINISH);
|
||||
if (fence) {
|
||||
ctx->screen->fence_finish(ctx->screen, ctx, fence, PIPE_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_finish(ctx->screen, ctx, fence, OS_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
|
||||
}
|
||||
|
||||
|
@@ -74,7 +74,7 @@ d3d12_wgl_framebuffer_destroy(struct stw_winsys_framebuffer *fb,
|
||||
/* Ensure all resources are flushed */
|
||||
ctx->flush(ctx, &fence, PIPE_FLUSH_HINT_FINISH);
|
||||
if (fence) {
|
||||
ctx->screen->fence_finish(ctx->screen, ctx, fence, PIPE_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_finish(ctx->screen, ctx, fence, OS_TIMEOUT_INFINITE);
|
||||
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
|
||||
}
|
||||
}
|
||||
|
@@ -126,7 +126,7 @@ static bool radeon_bo_wait(struct radeon_winsys *rws,
|
||||
return false;
|
||||
|
||||
/* Infinite timeout. */
|
||||
if (abs_timeout == PIPE_TIMEOUT_INFINITE) {
|
||||
if (abs_timeout == OS_TIMEOUT_INFINITE) {
|
||||
radeon_bo_wait_idle(bo);
|
||||
return true;
|
||||
}
|
||||
@@ -530,7 +530,7 @@ static void *radeon_bo_map(struct radeon_winsys *rws,
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
|
||||
}
|
||||
radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
|
||||
radeon_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_WRITE);
|
||||
} else {
|
||||
/* Mapping for write. */
|
||||
@@ -545,7 +545,7 @@ static void *radeon_bo_map(struct radeon_winsys *rws,
|
||||
}
|
||||
}
|
||||
|
||||
radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
|
||||
radeon_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
@@ -920,7 +920,7 @@ static void radeon_bo_set_metadata(struct radeon_winsys *rws,
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
|
||||
os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);
|
||||
os_wait_until_zero(&bo->num_active_ioctls, OS_TIMEOUT_INFINITE);
|
||||
|
||||
if (surf) {
|
||||
if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
|
||||
|
@@ -447,7 +447,7 @@ vmw_fence_ops_fence_finish(struct pb_fence_ops *ops,
|
||||
{
|
||||
struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
|
||||
|
||||
return vmw_fence_finish(vws, fence, PIPE_TIMEOUT_INFINITE, flag);
|
||||
return vmw_fence_finish(vws, fence, OS_TIMEOUT_INFINITE, flag);
|
||||
}
|
||||
|
||||
|
||||
|
@@ -1062,7 +1062,7 @@ static bool virgl_fence_wait(struct virgl_winsys *vws,
|
||||
if (timeout == 0)
|
||||
return !virgl_drm_resource_is_busy(vws, fence->hw_res);
|
||||
|
||||
if (timeout != PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout != OS_TIMEOUT_INFINITE) {
|
||||
int64_t start_time = os_time_get();
|
||||
timeout /= 1000;
|
||||
while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
|
||||
|
@@ -606,7 +606,7 @@ static bool virgl_fence_wait(struct virgl_winsys *vws,
|
||||
if (timeout == 0)
|
||||
return !virgl_vtest_resource_is_busy(vws, res);
|
||||
|
||||
if (timeout != PIPE_TIMEOUT_INFINITE) {
|
||||
if (timeout != OS_TIMEOUT_INFINITE) {
|
||||
int64_t start_time = os_time_get();
|
||||
timeout /= 1000;
|
||||
while (virgl_vtest_resource_is_busy(vws, res)) {
|
||||
|
@@ -78,7 +78,7 @@ st_finish(struct st_context *st)
|
||||
|
||||
if (fence) {
|
||||
st->screen->fence_finish(st->screen, NULL, fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
st->screen->fence_reference(st->screen, &fence, NULL);
|
||||
}
|
||||
|
||||
|
@@ -822,7 +822,7 @@ st_context_flush(struct st_context *st, unsigned flags,
|
||||
|
||||
if ((flags & ST_FLUSH_WAIT) && fence && *fence) {
|
||||
st->screen->fence_finish(st->screen, NULL, *fence,
|
||||
PIPE_TIMEOUT_INFINITE);
|
||||
OS_TIMEOUT_INFINITE);
|
||||
st->screen->fence_reference(st->screen, fence, NULL);
|
||||
}
|
||||
|
||||
|
@@ -45,7 +45,6 @@ extern "C" {
|
||||
|
||||
#define ONE_SECOND_IN_NS INT64_C(1000000000)
|
||||
|
||||
/* must be equal to PIPE_TIMEOUT_INFINITE */
|
||||
#define OS_TIMEOUT_INFINITE 0xffffffffffffffffull
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user