zink: more explicitly track/check rp optimizing per-context

if tc creation fails for whatever reason, rp optimizing must be
marked as disabled for that context to avoid erroneous assumptions
about rp operation

fixes #8787

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22319>
This commit is contained in:
Mike Blumenkrantz
2023-04-05 11:35:53 -04:00
committed by Marge Bot
parent 2c78cbbfe1
commit 215beee16d
6 changed files with 15 additions and 13 deletions

View File

@@ -630,7 +630,7 @@ zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
struct zink_screen *screen = zink_screen(ctx->base.screen);
if (!screen->driver_workarounds.track_renderpasses)
if (ctx->tc && !ctx->track_renderpasses)
tc_driver_internal_flush_notify(ctx->tc);
struct zink_batch_state *bs;

View File

@@ -232,7 +232,7 @@ zink_clear(struct pipe_context *pctx,
if (batch->in_rp) {
if (buffers & PIPE_CLEAR_DEPTHSTENCIL && (ctx->zsbuf_unused || ctx->zsbuf_readonly)) {
/* this will need a layout change */
assert(!zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses);
assert(!ctx->track_renderpasses);
zink_batch_no_rp(ctx);
} else {
clear_in_rp(pctx, buffers, scissor_state, pcolor, depth, stencil);
@@ -311,11 +311,11 @@ zink_clear(struct pipe_context *pctx,
clear->zs.bits |= (buffers & PIPE_CLEAR_DEPTHSTENCIL);
if (zink_fb_clear_first_needs_explicit(fb_clear)) {
ctx->rp_clears_enabled &= ~PIPE_CLEAR_DEPTHSTENCIL;
if (!zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses)
if (!ctx->track_renderpasses)
ctx->dynamic_fb.tc_info.zsbuf_clear_partial = true;
} else {
ctx->rp_clears_enabled |= (buffers & PIPE_CLEAR_DEPTHSTENCIL);
if (!zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses)
if (!ctx->track_renderpasses)
ctx->dynamic_fb.tc_info.zsbuf_clear = true;
}
}

View File

@@ -63,7 +63,7 @@
static void
update_tc_info(struct zink_context *ctx)
{
if (ctx->tc && zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses) {
if (ctx->track_renderpasses) {
const struct tc_renderpass_info *info = threaded_context_get_renderpass_info(ctx->tc);
ctx->rp_changed |= ctx->dynamic_fb.tc_info.data != info->data;
ctx->dynamic_fb.tc_info.data = info->data;
@@ -2599,7 +2599,7 @@ begin_rendering(struct zink_context *ctx)
bool changed_layout = false;
bool changed_size = false;
bool zsbuf_used = zink_is_zsbuf_used(ctx);
bool use_tc_info = !ctx->blitting && ctx->tc && zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses;
bool use_tc_info = !ctx->blitting && ctx->track_renderpasses;
if (ctx->rp_changed || ctx->rp_layout_changed || (!ctx->batch.in_rp && ctx->rp_loadop_changed)) {
/* init imageviews, base loadOp, formats */
for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
@@ -2810,7 +2810,7 @@ zink_batch_rp(struct zink_context *ctx)
update_tc_info(ctx);
ctx->rp_tc_info_updated = false;
}
bool maybe_has_query_ends = !ctx->tc || !zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses || ctx->dynamic_fb.tc_info.has_query_ends;
bool maybe_has_query_ends = !ctx->track_renderpasses || ctx->dynamic_fb.tc_info.has_query_ends;
ctx->queries_in_rp = maybe_has_query_ends;
/* if possible, out-of-renderpass resume any queries that were stopped when previous rp ended */
if (!ctx->queries_disabled && !maybe_has_query_ends) {
@@ -2878,7 +2878,7 @@ zink_batch_no_rp(struct zink_context *ctx)
{
if (!ctx->batch.in_rp)
return;
if (zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses && !ctx->blitting)
if (ctx->track_renderpasses && !ctx->blitting)
tc_renderpass_info_reset(&ctx->dynamic_fb.tc_info);
zink_batch_no_rp_safe(ctx);
}
@@ -2922,7 +2922,7 @@ zink_prep_fb_attachment(struct zink_context *ctx, struct zink_surface *surf, uns
if (ctx->blitting)
return surf->image_view;
VkImageLayout layout;
if (ctx->tc && zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses && !ctx->blitting) {
if (ctx->track_renderpasses && !ctx->blitting) {
layout = zink_tc_renderpass_info_parse(ctx, &ctx->dynamic_fb.tc_info, i < ctx->fb_state.nr_cbufs ? i : PIPE_MAX_COLOR_BUFS, &pipeline, &access);
assert(i < ctx->fb_state.nr_cbufs || layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || !zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS));
if (i == ctx->fb_state.nr_cbufs && zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS))
@@ -3272,7 +3272,7 @@ unbind_fb_surface(struct zink_context *ctx, struct pipe_surface *surf, unsigned
res->fb_binds &= ~BITFIELD_BIT(idx);
/* this is called just before the resource loses a reference, so a refcount==1 means the resource will be destroyed */
if (!res->fb_bind_count && res->base.b.reference.count > 1) {
if (ctx->tc && zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses && !ctx->blitting) {
if (ctx->track_renderpasses && !ctx->blitting) {
if (!(res->base.b.bind & PIPE_BIND_DISPLAY_TARGET) && util_format_is_depth_or_stencil(surf->format))
/* assume that all depth buffers which are not swapchain images will be used for sampling to avoid splitting renderpasses */
zink_screen(ctx->base.screen)->image_barrier(ctx, res, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
@@ -3631,7 +3631,7 @@ zink_flush(struct pipe_context *pctx,
check_device_lost(ctx);
}
}
if (!screen->driver_workarounds.track_renderpasses)
if (ctx->tc && !screen->driver_workarounds.track_renderpasses)
tc_driver_internal_flush_notify(ctx->tc);
} else {
fence = &batch->state->fence;
@@ -5085,6 +5085,7 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
&ctx->tc);
if (tc && (struct zink_context*)tc != ctx) {
ctx->track_renderpasses = screen->driver_workarounds.track_renderpasses;
threaded_context_init_bytes_mapped_limit(tc, 4);
ctx->base.set_context_param = zink_set_context_param;
}

View File

@@ -1785,7 +1785,7 @@ zink_bind_fs_state(struct pipe_context *pctx,
if (shadow_mask != ctx->gfx_stages[MESA_SHADER_FRAGMENT]->fs.legacy_shadow_mask &&
!zink_screen(pctx->screen)->driver_workarounds.needs_zs_shader_swizzle)
zink_update_shadow_samplerviews(ctx, shadow_mask | ctx->gfx_stages[MESA_SHADER_FRAGMENT]->fs.legacy_shadow_mask);
if (!zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses && !ctx->blitting)
if (!ctx->track_renderpasses && !ctx->blitting)
zink_parse_tc_info(ctx);
}
zink_update_fbfetch(ctx);

View File

@@ -454,7 +454,7 @@ get_render_pass(struct zink_context *ctx)
struct zink_render_pass_state state = {0};
uint32_t clears = 0;
bool have_zsbuf = fb->zsbuf && zink_is_zsbuf_used(ctx);
bool use_tc_info = !ctx->blitting && ctx->tc && zink_screen(ctx->base.screen)->driver_workarounds.track_renderpasses;
bool use_tc_info = !ctx->blitting && ctx->track_renderpasses;
state.samples = fb->samples > 0;
for (int i = 0; i < fb->nr_cbufs; i++) {

View File

@@ -1680,6 +1680,7 @@ struct zink_context {
struct zink_batch_state *last_free_batch_state; //for appending
bool oom_flush;
bool oom_stall;
bool track_renderpasses;
struct zink_batch batch;
unsigned shader_has_inlinable_uniforms_mask;