r300,r600,radeonsi: replace RADEON_FLUSH_* with PIPE_FLUSH_*

and handle PIPE_FLUSH_HINT_FINISH in r300.

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák
2017-11-28 17:54:55 +01:00
parent 950221f923
commit 2c5f2936af
29 changed files with 55 additions and 57 deletions

View File

@@ -383,7 +383,7 @@ static void r300_clear(struct pipe_context* pipe,
/* Reserve CS space. */
if (!r300->rws->cs_check_space(r300->cs, dwords)) {
r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
}
/* Emit clear packets. */

View File

@@ -129,9 +129,10 @@ static void r300_flush_wrapped(struct pipe_context *pipe,
struct pipe_fence_handle **fence,
unsigned flags)
{
r300_flush(pipe,
flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0,
fence);
if (flags & PIPE_FLUSH_HINT_FINISH)
flags &= ~PIPE_FLUSH_ASYNC;
r300_flush(pipe, flags, fence);
}
void r300_init_flush_functions(struct r300_context* r300)

View File

@@ -121,7 +121,7 @@ static bool r300_end_query(struct pipe_context* pipe,
if (q->type == PIPE_QUERY_GPU_FINISHED) {
pb_reference(&q->buf, NULL);
r300_flush(pipe, RADEON_FLUSH_ASYNC,
r300_flush(pipe, PIPE_FLUSH_ASYNC,
(struct pipe_fence_handle**)&q->buf);
return true;
}

View File

@@ -216,7 +216,7 @@ static boolean r300_reserve_cs_dwords(struct r300_context *r300,
/* Reserve requested CS space. */
if (!r300->rws->cs_check_space(r300->cs, cs_dwords)) {
r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
flushed = TRUE;
}

View File

@@ -623,7 +623,7 @@ static void compute_emit_cs(struct r600_context *rctx,
/* make sure that the gfx ring is only one active */
if (radeon_emitted(rctx->b.dma.cs, 0)) {
rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
}
/* Initialize all the compute-related registers.

View File

@@ -66,7 +66,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->gfx.flush(ctx, 0, NULL);
@@ -77,7 +77,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->dma.flush(ctx, 0, NULL);

View File

@@ -108,7 +108,7 @@ radeon_add_to_buffer_list_check_mem(struct r600_common_context *rctx,
!radeon_cs_memory_below_limit(rctx->screen, ring->cs,
rctx->vram + rbo->vram_usage,
rctx->gtt + rbo->gart_usage))
ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
}

View File

@@ -35,13 +35,13 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
{
/* Flush the DMA IB if it's not empty. */
if (radeon_emitted(ctx->b.dma.cs, 0))
ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->b.dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
if (!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
/* all will be accounted once relocation are emited */
@@ -82,7 +82,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
/* Flush if there's not enough space. */
if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
}
@@ -439,7 +439,7 @@ void r600_emit_pfp_sync_me(struct r600_context *rctx)
&offset, (struct pipe_resource**)&buf);
if (!buf) {
/* This is too heavyweight, but will work. */
rctx->b.gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
return;
}

View File

@@ -270,7 +270,7 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
RADEON_USAGE_WRITE))))
ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
@@ -288,7 +288,7 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
}
@@ -400,10 +400,10 @@ static void r600_flush_from_st(struct pipe_context *ctx,
struct pipe_fence_handle *gfx_fence = NULL;
struct pipe_fence_handle *sdma_fence = NULL;
bool deferred_fence = false;
unsigned rflags = RADEON_FLUSH_ASYNC;
unsigned rflags = PIPE_FLUSH_ASYNC;
if (flags & PIPE_FLUSH_END_OF_FRAME)
rflags |= RADEON_FLUSH_END_OF_FRAME;
rflags |= PIPE_FLUSH_END_OF_FRAME;
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
if (rctx->dma.cs)
@@ -626,12 +626,12 @@ static bool r600_resource_commit(struct pipe_context *pctx,
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
res->buf, RADEON_USAGE_READWRITE)) {
ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
res->buf, RADEON_USAGE_READWRITE)) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma.cs);
@@ -1194,7 +1194,7 @@ static boolean r600_fence_finish(struct pipe_screen *screen,
if (rctx &&
rfence->gfx_unflushed.ctx == rctx &&
rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
rctx->gfx.flush(rctx, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
rctx->gfx.flush(rctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
rfence->gfx_unflushed.ctx = NULL;
if (!timeout)

View File

@@ -1835,7 +1835,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
/* make sure that the gfx ring is only one active */
if (radeon_emitted(rctx->b.dma.cs, 0)) {
rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
}
/* Re-emit the framebuffer state if needed. */

View File

@@ -1500,7 +1500,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
* The result is that the kernel memory manager is never a bottleneck.
*/
if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
rctx->num_alloc_tex_transfer_bytes = 0;
}

View File

@@ -1259,7 +1259,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder,
FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
set_reg(dec, dec->reg.cntl, 1);
flush(dec, RADEON_FLUSH_ASYNC);
flush(dec, PIPE_FLUSH_ASYNC);
next_buffer(dec);
}

View File

@@ -59,7 +59,7 @@
*/
static void flush(struct rvce_encoder *enc)
{
enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
enc->task_info_idx = 0;
enc->bs_idx = 0;
}

View File

@@ -64,7 +64,7 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->gfx.flush(ctx, 0, NULL);
@@ -75,7 +75,7 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->dma.flush(ctx, 0, NULL);

View File

@@ -106,7 +106,7 @@ radeon_add_to_buffer_list_check_mem(struct r600_common_context *rctx,
!radeon_cs_memory_below_limit(rctx->screen, ring->cs,
rctx->vram + rbo->vram_usage,
rctx->gtt + rbo->gart_usage))
ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
}

View File

@@ -189,7 +189,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
RADEON_USAGE_WRITE))))
ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
@@ -207,7 +207,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
}
@@ -386,12 +386,12 @@ static bool r600_resource_commit(struct pipe_context *pctx,
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
res->buf, RADEON_USAGE_READWRITE)) {
ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
res->buf, RADEON_USAGE_READWRITE)) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma.cs);

View File

@@ -1834,7 +1834,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
* The result is that the kernel memory manager is never a bottleneck.
*/
if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
rctx->num_alloc_tex_transfer_bytes = 0;
}

View File

@@ -1321,7 +1321,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder,
FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
set_reg(dec, dec->reg.cntl, 1);
flush(dec, RADEON_FLUSH_ASYNC);
flush(dec, PIPE_FLUSH_ASYNC);
next_buffer(dec);
}

View File

@@ -53,7 +53,7 @@
*/
static void flush(struct rvce_encoder *enc)
{
enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
enc->task_info_idx = 0;
enc->bs_idx = 0;
}

View File

@@ -1158,7 +1158,7 @@ static void radeon_dec_end_frame(struct pipe_video_codec *decoder,
FB_BUFFER_OFFSET + FB_BUFFER_SIZE, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
set_reg(dec, RDECODE_ENGINE_CNTL, 1);
flush(dec, RADEON_FLUSH_ASYNC);
flush(dec, PIPE_FLUSH_ASYNC);
next_buffer(dec);
}

View File

@@ -56,7 +56,7 @@ static void radeon_vcn_enc_get_param(struct radeon_encoder *enc, struct pipe_h26
static void flush(struct radeon_encoder *enc)
{
enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
}
static void radeon_enc_flush(struct pipe_video_codec *encoder)

View File

@@ -31,9 +31,6 @@
#include "amd/common/ac_gpu_info.h"
#include "amd/common/ac_surface.h"
#define RADEON_FLUSH_ASYNC (1 << 0)
#define RADEON_FLUSH_END_OF_FRAME (1 << 1)
/* Tiling flags. */
enum radeon_bo_layout {
RADEON_LAYOUT_LINEAR = 0,
@@ -531,7 +528,7 @@ struct radeon_winsys {
* Flush a command stream.
*
* \param cs A command stream to flush.
* \param flags, RADEON_FLUSH_ASYNC or 0.
* \param flags, PIPE_FLUSH_* flags.
* \param fence Pointer to a fence. If non-NULL, a fence is inserted
* after the CS and is returned through this parameter.
* \return Negative POSIX error code or 0 for success.

View File

@@ -271,7 +271,7 @@ static boolean si_fence_finish(struct pipe_screen *screen,
* not going to wait.
*/
threaded_context_unwrap_sync(ctx);
sctx->b.gfx.flush(&sctx->b, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
sctx->b.gfx.flush(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
rfence->gfx_unflushed.ctx = NULL;
if (!timeout)
@@ -378,10 +378,10 @@ static void si_flush_from_st(struct pipe_context *ctx,
struct pipe_fence_handle *sdma_fence = NULL;
bool deferred_fence = false;
struct si_fine_fence fine = {};
unsigned rflags = RADEON_FLUSH_ASYNC;
unsigned rflags = PIPE_FLUSH_ASYNC;
if (flags & PIPE_FLUSH_END_OF_FRAME)
rflags |= RADEON_FLUSH_END_OF_FRAME;
rflags |= PIPE_FLUSH_END_OF_FRAME;
if (flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) {
assert(flags & PIPE_FLUSH_DEFERRED);

View File

@@ -53,7 +53,7 @@ void si_need_cs_space(struct si_context *ctx)
ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
ctx->b.gtt = 0;
@@ -63,7 +63,7 @@ void si_need_cs_space(struct si_context *ctx)
* and just flush if there is not enough space left.
*/
if (!ctx->b.ws->cs_check_space(cs, 2048))
ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
void si_context_gfx_flush(void *context, unsigned flags,
@@ -83,7 +83,7 @@ void si_context_gfx_flush(void *context, unsigned flags,
return;
if (ctx->screen->debug_flags & DBG(CHECK_VM))
flags &= ~RADEON_FLUSH_ASYNC;
flags &= ~PIPE_FLUSH_ASYNC;
/* If the state tracker is flushing the GFX IB, r600_flush_from_st is
* responsible for flushing the DMA IB and merging the fences from both.

View File

@@ -2822,7 +2822,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx)
/* Flush the context to re-emit both init_config states. */
sctx->b.initial_gfx_cs_size = 0; /* force flush */
si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
/* Set ring bindings. */
if (sctx->esgs_ring) {
@@ -3161,7 +3161,7 @@ static void si_init_tess_factor_ring(struct si_context *sctx)
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->b.initial_gfx_cs_size = 0; /* force flush */
si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
}
/**

View File

@@ -235,7 +235,7 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
* Only check whether the buffer is being used for write. */
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
@@ -245,7 +245,7 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
}
} else {
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}

View File

@@ -1493,7 +1493,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
/* The submission has been queued, unlock the fence now. */
simple_mtx_unlock(&ws->bo_fence_lock);
if (!(flags & RADEON_FLUSH_ASYNC)) {
if (!(flags & PIPE_FLUSH_ASYNC)) {
amdgpu_cs_sync_flush(rcs);
error_code = cur->error_code;
}

View File

@@ -490,7 +490,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
@@ -500,7 +500,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
}
} else {
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}

View File

@@ -399,7 +399,7 @@ static bool radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
/* Flush if there are any relocs. Clean up otherwise. */
if (cs->csc->num_relocs) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
} else {
radeon_cs_context_cleanup(cs->csc);
cs->base.used_vram = 0;
@@ -655,7 +655,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
cs->cst->flags[0] |= RADEON_CS_USE_VM;
cs->cst->cs.num_chunks = 3;
}
if (flags & RADEON_FLUSH_END_OF_FRAME) {
if (flags & PIPE_FLUSH_END_OF_FRAME) {
cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
cs->cst->cs.num_chunks = 3;
}
@@ -669,7 +669,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
if (util_queue_is_initialized(&cs->ws->cs_queue)) {
util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
radeon_drm_cs_emit_ioctl_oneshot, NULL);
if (!(flags & RADEON_FLUSH_ASYNC))
if (!(flags & PIPE_FLUSH_ASYNC))
radeon_drm_cs_sync_flush(rcs);
} else {
radeon_drm_cs_emit_ioctl_oneshot(cs, 0);