panfrost: Do fine-grained flushing when preparing BO for CPU accesses
We don't have to flush all batches when we're only interested in reading/writing a specific BO. Thanks to the panfrost_flush_batches_accessing_bo() and panfrost_bo_wait() helpers we can now flush only the batches touching the BO we want to access from the CPU. This fixes the dEQP-GLES2.functional.fbo.render.texsubimage.* tests. Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
This commit is contained in:
@@ -51,10 +51,6 @@ dEQP-GLES2.functional.fbo.render.shared_colorbuffer.tex2d_rgb_depth_component16
|
||||
dEQP-GLES2.functional.fbo.render.shared_depthbuffer.rbo_rgb565_depth_component16 Fail
|
||||
dEQP-GLES2.functional.fbo.render.shared_depthbuffer.tex2d_rgba_depth_component16 Fail
|
||||
dEQP-GLES2.functional.fbo.render.shared_depthbuffer.tex2d_rgb_depth_component16 Fail
|
||||
dEQP-GLES2.functional.fbo.render.texsubimage.after_render_tex2d_rgba Fail
|
||||
dEQP-GLES2.functional.fbo.render.texsubimage.after_render_tex2d_rgb Fail
|
||||
dEQP-GLES2.functional.fbo.render.texsubimage.between_render_tex2d_rgba Fail
|
||||
dEQP-GLES2.functional.fbo.render.texsubimage.between_render_tex2d_rgb Fail
|
||||
dEQP-GLES2.functional.fragment_ops.depth_stencil.random.0 Fail
|
||||
dEQP-GLES2.functional.fragment_ops.depth_stencil.random.10 Fail
|
||||
dEQP-GLES2.functional.fragment_ops.depth_stencil.random.11 Fail
|
||||
|
@@ -578,10 +578,8 @@ panfrost_transfer_map(struct pipe_context *pctx,
|
||||
is_bound |= fb->cbufs[c]->texture == resource;
|
||||
}
|
||||
|
||||
if (is_bound && (usage & PIPE_TRANSFER_READ)) {
|
||||
assert(level == 0);
|
||||
panfrost_flush_all_batches(ctx, true);
|
||||
}
|
||||
if (is_bound && (usage & PIPE_TRANSFER_READ))
|
||||
assert(level == 0);
|
||||
|
||||
/* TODO: Respect usage flags */
|
||||
|
||||
@@ -594,11 +592,11 @@ panfrost_transfer_map(struct pipe_context *pctx,
|
||||
/* No flush for writes to uninitialized */
|
||||
} else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
|
||||
if (usage & PIPE_TRANSFER_WRITE) {
|
||||
/* STUB: flush reading */
|
||||
//printf("debug: missed reading flush %d\n", resource->target);
|
||||
panfrost_flush_batches_accessing_bo(ctx, bo, PAN_BO_ACCESS_RW);
|
||||
panfrost_bo_wait(bo, INT64_MAX, PAN_BO_ACCESS_RW);
|
||||
} else if (usage & PIPE_TRANSFER_READ) {
|
||||
/* STUB: flush writing */
|
||||
//printf("debug: missed writing flush %d (%d-%d)\n", resource->target, box->x, box->x + box->width);
|
||||
panfrost_flush_batches_accessing_bo(ctx, bo, PAN_BO_ACCESS_WRITE);
|
||||
panfrost_bo_wait(bo, INT64_MAX, PAN_BO_ACCESS_WRITE);
|
||||
} else {
|
||||
/* Why are you even mapping?! */
|
||||
}
|
||||
@@ -748,11 +746,8 @@ panfrost_generate_mipmap(
|
||||
* reorder-type optimizations in place. But for now prioritize
|
||||
* correctness. */
|
||||
|
||||
struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
|
||||
bool has_draws = batch->last_job.gpu;
|
||||
|
||||
if (has_draws)
|
||||
panfrost_flush_all_batches(ctx, true);
|
||||
panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, PAN_BO_ACCESS_RW);
|
||||
panfrost_bo_wait(rsrc->bo, INT64_MAX, PAN_BO_ACCESS_RW);
|
||||
|
||||
/* We've flushed the original buffer if needed, now trigger a blit */
|
||||
|
||||
@@ -765,8 +760,10 @@ panfrost_generate_mipmap(
|
||||
/* If the blit was successful, flush once more. If it wasn't, well, let
|
||||
* the state tracker deal with it. */
|
||||
|
||||
if (blit_res)
|
||||
panfrost_flush_all_batches(ctx, true);
|
||||
if (blit_res) {
|
||||
panfrost_flush_batches_accessing_bo(ctx, rsrc->bo, PAN_BO_ACCESS_WRITE);
|
||||
panfrost_bo_wait(rsrc->bo, INT64_MAX, PAN_BO_ACCESS_WRITE);
|
||||
}
|
||||
|
||||
return blit_res;
|
||||
}
|
||||
|
Reference in New Issue
Block a user