zink: move renderpass handling to zink_render_pass.c
no functional changes Reviewed-by: Dave Airlie <airlied@redhat.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16476>
This commit is contained in:

committed by
Marge Bot

parent
4457a9388a
commit
b181080d9f
@@ -21,12 +21,17 @@
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "zink_context.h"
|
||||
#include "zink_framebuffer.h"
|
||||
#include "zink_kopper.h"
|
||||
#include "zink_query.h"
|
||||
#include "zink_render_pass.h"
|
||||
|
||||
#include "zink_resource.h"
|
||||
#include "zink_screen.h"
|
||||
|
||||
#include "util/u_memory.h"
|
||||
#include "util/u_string.h"
|
||||
#include "util/u_blitter.h"
|
||||
|
||||
|
||||
static VkRenderPass
|
||||
@@ -264,3 +269,418 @@ zink_render_pass_attachment_get_barrier_info(const struct zink_render_pass *rp,
|
||||
*access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
||||
return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
rp_state_size(const struct zink_render_pass_pipeline_state *pstate)
|
||||
{
|
||||
return offsetof(struct zink_render_pass_pipeline_state, attachments) +
|
||||
sizeof(pstate->attachments[0]) * pstate->num_attachments;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
hash_rp_state(const void *key)
|
||||
{
|
||||
const struct zink_render_pass_pipeline_state *s = key;
|
||||
return _mesa_hash_data(key, rp_state_size(s));
|
||||
}
|
||||
|
||||
static bool
|
||||
equals_rp_state(const void *a, const void *b)
|
||||
{
|
||||
return !memcmp(a, b, rp_state_size(a));
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
hash_render_pass_state(const void *key)
|
||||
{
|
||||
struct zink_render_pass_state* s = (struct zink_render_pass_state*)key;
|
||||
return _mesa_hash_data(key, offsetof(struct zink_render_pass_state, rts) + sizeof(s->rts[0]) * s->num_rts);
|
||||
}
|
||||
|
||||
static bool
|
||||
equals_render_pass_state(const void *a, const void *b)
|
||||
{
|
||||
const struct zink_render_pass_state *s_a = a, *s_b = b;
|
||||
if (s_a->num_rts != s_b->num_rts)
|
||||
return false;
|
||||
return memcmp(a, b, offsetof(struct zink_render_pass_state, rts) + sizeof(s_a->rts[0]) * s_a->num_rts) == 0;
|
||||
}
|
||||
|
||||
static struct zink_render_pass *
|
||||
get_render_pass(struct zink_context *ctx)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
const struct pipe_framebuffer_state *fb = &ctx->fb_state;
|
||||
struct zink_render_pass_state state = {0};
|
||||
uint32_t clears = 0;
|
||||
state.swapchain_init = ctx->new_swapchain;
|
||||
state.samples = fb->samples > 0;
|
||||
|
||||
u_foreach_bit(i, ctx->fbfetch_outputs)
|
||||
state.rts[i].fbfetch = true;
|
||||
|
||||
for (int i = 0; i < fb->nr_cbufs; i++) {
|
||||
struct pipe_surface *surf = fb->cbufs[i];
|
||||
if (surf && !zink_use_dummy_attachments(ctx)) {
|
||||
struct zink_surface *transient = zink_transient_surface(surf);
|
||||
state.rts[i].format = zink_get_format(screen, surf->format);
|
||||
state.rts[i].samples = MAX3(transient ? transient->base.nr_samples : 0, surf->texture->nr_samples, 1);
|
||||
state.rts[i].clear_color = zink_fb_clear_enabled(ctx, i) && !zink_fb_clear_first_needs_explicit(&ctx->fb_clears[i]);
|
||||
clears |= !!state.rts[i].clear_color ? PIPE_CLEAR_COLOR0 << i : 0;
|
||||
state.rts[i].swapchain = surf->texture->bind & PIPE_BIND_DISPLAY_TARGET;
|
||||
if (transient) {
|
||||
state.num_cresolves++;
|
||||
state.rts[i].resolve = true;
|
||||
if (!state.rts[i].clear_color)
|
||||
state.msaa_expand_mask |= BITFIELD_BIT(i);
|
||||
}
|
||||
} else {
|
||||
state.rts[i].format = VK_FORMAT_R8G8B8A8_UNORM;
|
||||
state.rts[i].samples = fb->samples;
|
||||
}
|
||||
state.num_rts++;
|
||||
}
|
||||
state.num_cbufs = fb->nr_cbufs;
|
||||
assert(!state.num_cresolves || state.num_cbufs == state.num_cresolves);
|
||||
|
||||
if (fb->zsbuf) {
|
||||
struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
|
||||
struct zink_framebuffer_clear *fb_clear = &ctx->fb_clears[PIPE_MAX_COLOR_BUFS];
|
||||
struct zink_surface *transient = zink_transient_surface(fb->zsbuf);
|
||||
state.rts[fb->nr_cbufs].format = zsbuf->format;
|
||||
state.rts[fb->nr_cbufs].samples = MAX3(transient ? transient->base.nr_samples : 0, fb->zsbuf->texture->nr_samples, 1);
|
||||
if (transient) {
|
||||
state.num_zsresolves = 1;
|
||||
state.rts[fb->nr_cbufs].resolve = true;
|
||||
}
|
||||
state.rts[fb->nr_cbufs].clear_color = zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS) &&
|
||||
!zink_fb_clear_first_needs_explicit(fb_clear) &&
|
||||
(zink_fb_clear_element(fb_clear, 0)->zs.bits & PIPE_CLEAR_DEPTH);
|
||||
state.rts[fb->nr_cbufs].clear_stencil = zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS) &&
|
||||
!zink_fb_clear_first_needs_explicit(fb_clear) &&
|
||||
(zink_fb_clear_element(fb_clear, 0)->zs.bits & PIPE_CLEAR_STENCIL);
|
||||
if (state.rts[fb->nr_cbufs].clear_color)
|
||||
clears |= PIPE_CLEAR_DEPTH;
|
||||
if (state.rts[fb->nr_cbufs].clear_stencil)
|
||||
clears |= PIPE_CLEAR_STENCIL;
|
||||
const uint64_t outputs_written = ctx->gfx_stages[PIPE_SHADER_FRAGMENT] ?
|
||||
ctx->gfx_stages[PIPE_SHADER_FRAGMENT]->nir->info.outputs_written : 0;
|
||||
bool needs_write_z = (ctx->dsa_state && ctx->dsa_state->hw_state.depth_write) ||
|
||||
outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH);
|
||||
needs_write_z |= state.num_zsresolves || state.rts[fb->nr_cbufs].clear_color;
|
||||
|
||||
bool needs_write_s = state.rts[fb->nr_cbufs].clear_stencil || outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
|
||||
if (!needs_write_z && (!ctx->dsa_state || !ctx->dsa_state->base.depth_enabled))
|
||||
/* depth sample, stencil write */
|
||||
state.rts[fb->nr_cbufs].mixed_zs = needs_write_s && zsbuf->bind_count[0];
|
||||
else
|
||||
/* depth write + sample */
|
||||
state.rts[fb->nr_cbufs].mixed_zs = needs_write_z && zsbuf->bind_count[0];
|
||||
state.rts[fb->nr_cbufs].needs_write = needs_write_z | needs_write_s;
|
||||
state.num_rts++;
|
||||
}
|
||||
state.have_zsbuf = fb->zsbuf != NULL;
|
||||
assert(clears == ctx->rp_clears_enabled);
|
||||
state.clears = clears;
|
||||
uint32_t hash = hash_render_pass_state(&state);
|
||||
struct hash_entry *entry = _mesa_hash_table_search_pre_hashed(ctx->render_pass_cache, hash,
|
||||
&state);
|
||||
struct zink_render_pass *rp;
|
||||
if (entry) {
|
||||
rp = entry->data;
|
||||
assert(rp->state.clears == clears);
|
||||
} else {
|
||||
struct zink_render_pass_pipeline_state pstate;
|
||||
pstate.samples = state.samples;
|
||||
rp = zink_create_render_pass(screen, &state, &pstate);
|
||||
if (!_mesa_hash_table_insert_pre_hashed(ctx->render_pass_cache, hash, &rp->state, rp))
|
||||
return NULL;
|
||||
bool found = false;
|
||||
struct set_entry *entry = _mesa_set_search_or_add(&ctx->render_pass_state_cache, &pstate, &found);
|
||||
struct zink_render_pass_pipeline_state *ppstate;
|
||||
if (!found) {
|
||||
entry->key = ralloc(ctx, struct zink_render_pass_pipeline_state);
|
||||
ppstate = (void*)entry->key;
|
||||
memcpy(ppstate, &pstate, rp_state_size(&pstate));
|
||||
ppstate->id = ctx->render_pass_state_cache.entries;
|
||||
}
|
||||
ppstate = (void*)entry->key;
|
||||
rp->pipeline_state = ppstate->id;
|
||||
}
|
||||
return rp;
|
||||
}
|
||||
|
||||
static void
|
||||
setup_framebuffer(struct zink_context *ctx)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
struct zink_render_pass *rp = ctx->gfx_pipeline_state.render_pass;
|
||||
|
||||
if (ctx->gfx_pipeline_state.sample_locations_enabled && ctx->sample_locations_changed) {
|
||||
unsigned samples = ctx->gfx_pipeline_state.rast_samples + 1;
|
||||
unsigned idx = util_logbase2_ceil(MAX2(samples, 1));
|
||||
VkExtent2D grid_size = screen->maxSampleLocationGridSize[idx];
|
||||
|
||||
for (unsigned pixel = 0; pixel < grid_size.width * grid_size.height; pixel++) {
|
||||
for (unsigned sample = 0; sample < samples; sample++) {
|
||||
unsigned pixel_x = pixel % grid_size.width;
|
||||
unsigned pixel_y = pixel / grid_size.width;
|
||||
unsigned wi = pixel * samples + sample;
|
||||
unsigned ri = (pixel_y * grid_size.width + pixel_x % grid_size.width);
|
||||
ri = ri * samples + sample;
|
||||
ctx->vk_sample_locations[wi].x = (ctx->sample_locations[ri] & 0xf) / 16.0f;
|
||||
ctx->vk_sample_locations[wi].y = (16 - (ctx->sample_locations[ri] >> 4)) / 16.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (rp)
|
||||
ctx->rp_changed |= ctx->rp_clears_enabled != rp->state.clears;
|
||||
if (ctx->rp_changed)
|
||||
rp = get_render_pass(ctx);
|
||||
|
||||
ctx->fb_changed |= rp != ctx->gfx_pipeline_state.render_pass;
|
||||
if (rp->pipeline_state != ctx->gfx_pipeline_state.rp_state) {
|
||||
ctx->gfx_pipeline_state.rp_state = rp->pipeline_state;
|
||||
ctx->gfx_pipeline_state.dirty = true;
|
||||
}
|
||||
|
||||
ctx->rp_changed = false;
|
||||
bool has_swapchain = false;
|
||||
for (unsigned i = 0; i < ctx->fb_state.nr_cbufs; i++) {
|
||||
if (!ctx->fb_state.cbufs[i])
|
||||
continue;
|
||||
struct zink_resource *res = zink_resource(ctx->fb_state.cbufs[i]->texture);
|
||||
if (zink_is_swapchain(res)) {
|
||||
has_swapchain = true;
|
||||
if (zink_kopper_acquire(ctx, res, UINT64_MAX))
|
||||
zink_surface_swapchain_update(ctx, zink_csurface(ctx->fb_state.cbufs[i]));
|
||||
}
|
||||
}
|
||||
if (has_swapchain && (ctx->swapchain_size.width || ctx->swapchain_size.height)) {
|
||||
unsigned old_w = ctx->fb_state.width;
|
||||
unsigned old_h = ctx->fb_state.height;
|
||||
ctx->fb_state.width = ctx->swapchain_size.width;
|
||||
ctx->fb_state.height = ctx->swapchain_size.height;
|
||||
zink_kopper_fixup_depth_buffer(ctx);
|
||||
zink_update_framebuffer_state(ctx, old_w, old_h);
|
||||
ctx->swapchain_size.width = ctx->swapchain_size.height = 0;
|
||||
}
|
||||
|
||||
if (!ctx->fb_changed)
|
||||
return;
|
||||
|
||||
zink_init_framebuffer(screen, ctx->framebuffer, rp);
|
||||
ctx->fb_changed = false;
|
||||
ctx->gfx_pipeline_state.render_pass = rp;
|
||||
}
|
||||
|
||||
static bool
|
||||
prep_fb_attachments(struct zink_context *ctx, VkImageView *att)
|
||||
{
|
||||
const unsigned cresolve_offset = ctx->fb_state.nr_cbufs + !!ctx->fb_state.zsbuf;
|
||||
unsigned num_resolves = 0;
|
||||
for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
|
||||
struct zink_surface *surf = zink_csurface(ctx->fb_state.cbufs[i]);
|
||||
struct zink_surface *transient = zink_transient_surface(ctx->fb_state.cbufs[i]);
|
||||
if (surf && zink_resource(surf->base.texture)->obj != surf->obj) {
|
||||
zink_resource_rebind(ctx, zink_resource(surf->base.texture));
|
||||
surf = zink_csurface(ctx->fb_state.cbufs[i]);
|
||||
}
|
||||
if (transient) {
|
||||
att[i] = zink_prep_fb_attachment(ctx, transient, i);
|
||||
att[i + cresolve_offset] = zink_prep_fb_attachment(ctx, surf, i);
|
||||
num_resolves++;
|
||||
} else {
|
||||
att[i] = zink_prep_fb_attachment(ctx, surf, i);
|
||||
if (!att[i])
|
||||
/* dead swapchain */
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (ctx->fb_state.zsbuf) {
|
||||
struct zink_surface *surf = zink_csurface(ctx->fb_state.zsbuf);
|
||||
struct zink_surface *transient = zink_transient_surface(ctx->fb_state.zsbuf);
|
||||
if (transient) {
|
||||
att[ctx->fb_state.nr_cbufs] = zink_prep_fb_attachment(ctx, transient, ctx->fb_state.nr_cbufs);
|
||||
att[cresolve_offset + num_resolves] = zink_prep_fb_attachment(ctx, surf, ctx->fb_state.nr_cbufs);
|
||||
} else {
|
||||
att[ctx->fb_state.nr_cbufs] = zink_prep_fb_attachment(ctx, surf, ctx->fb_state.nr_cbufs);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned
|
||||
begin_render_pass(struct zink_context *ctx)
|
||||
{
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
|
||||
|
||||
VkRenderPassBeginInfo rpbi = {0};
|
||||
rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
|
||||
rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
|
||||
rpbi.renderArea.offset.x = 0;
|
||||
rpbi.renderArea.offset.y = 0;
|
||||
rpbi.renderArea.extent.width = fb_state->width;
|
||||
rpbi.renderArea.extent.height = fb_state->height;
|
||||
|
||||
VkClearValue clears[PIPE_MAX_COLOR_BUFS + 1] = {0};
|
||||
unsigned clear_buffers = 0;
|
||||
uint32_t clear_validate = 0;
|
||||
for (int i = 0; i < fb_state->nr_cbufs; i++) {
|
||||
/* these are no-ops */
|
||||
if (!fb_state->cbufs[i] || !zink_fb_clear_enabled(ctx, i))
|
||||
continue;
|
||||
/* these need actual clear calls inside the rp */
|
||||
struct zink_framebuffer_clear_data *clear = zink_fb_clear_element(&ctx->fb_clears[i], 0);
|
||||
if (zink_fb_clear_needs_explicit(&ctx->fb_clears[i])) {
|
||||
clear_buffers |= (PIPE_CLEAR_COLOR0 << i);
|
||||
if (zink_fb_clear_count(&ctx->fb_clears[i]) < 2 ||
|
||||
zink_fb_clear_element_needs_explicit(clear))
|
||||
continue;
|
||||
}
|
||||
/* we now know there's one clear that can be done here */
|
||||
zink_fb_clear_util_unpack_clear_color(clear, fb_state->cbufs[i]->format, (void*)&clears[i].color);
|
||||
rpbi.clearValueCount = i + 1;
|
||||
clear_validate |= PIPE_CLEAR_COLOR0 << i;
|
||||
assert(ctx->framebuffer->rp->state.clears);
|
||||
}
|
||||
if (fb_state->zsbuf && zink_fb_clear_enabled(ctx, PIPE_MAX_COLOR_BUFS)) {
|
||||
struct zink_framebuffer_clear *fb_clear = &ctx->fb_clears[PIPE_MAX_COLOR_BUFS];
|
||||
struct zink_framebuffer_clear_data *clear = zink_fb_clear_element(fb_clear, 0);
|
||||
if (!zink_fb_clear_element_needs_explicit(clear)) {
|
||||
clears[fb_state->nr_cbufs].depthStencil.depth = clear->zs.depth;
|
||||
clears[fb_state->nr_cbufs].depthStencil.stencil = clear->zs.stencil;
|
||||
rpbi.clearValueCount = fb_state->nr_cbufs + 1;
|
||||
clear_validate |= clear->zs.bits;
|
||||
assert(ctx->framebuffer->rp->state.clears);
|
||||
}
|
||||
if (zink_fb_clear_needs_explicit(fb_clear)) {
|
||||
for (int j = !zink_fb_clear_element_needs_explicit(clear);
|
||||
(clear_buffers & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL && j < zink_fb_clear_count(fb_clear);
|
||||
j++)
|
||||
clear_buffers |= zink_fb_clear_element(fb_clear, j)->zs.bits;
|
||||
}
|
||||
}
|
||||
assert(clear_validate == ctx->framebuffer->rp->state.clears);
|
||||
rpbi.pClearValues = &clears[0];
|
||||
rpbi.framebuffer = ctx->framebuffer->fb;
|
||||
|
||||
assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
|
||||
|
||||
VkRenderPassAttachmentBeginInfo infos;
|
||||
VkImageView att[2 * (PIPE_MAX_COLOR_BUFS + 1)];
|
||||
infos.sType = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO;
|
||||
infos.pNext = NULL;
|
||||
infos.attachmentCount = ctx->framebuffer->state.num_attachments;
|
||||
infos.pAttachments = att;
|
||||
if (!prep_fb_attachments(ctx, att))
|
||||
return 0;
|
||||
#ifndef NDEBUG
|
||||
const unsigned cresolve_offset = ctx->fb_state.nr_cbufs + !!ctx->fb_state.zsbuf;
|
||||
for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
|
||||
if (ctx->fb_state.cbufs[i]) {
|
||||
struct zink_surface *surf = zink_csurface(ctx->fb_state.cbufs[i]);
|
||||
struct zink_surface *transient = zink_transient_surface(ctx->fb_state.cbufs[i]);
|
||||
if (surf->base.format == ctx->fb_state.cbufs[i]->format) {
|
||||
if (transient) {
|
||||
assert(zink_resource(transient->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[i].usage);
|
||||
assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[cresolve_offset].usage);
|
||||
} else {
|
||||
assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[i].usage);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ctx->fb_state.zsbuf) {
|
||||
struct zink_surface *surf = zink_csurface(ctx->fb_state.zsbuf);
|
||||
struct zink_surface *transient = zink_transient_surface(ctx->fb_state.zsbuf);
|
||||
if (transient) {
|
||||
assert(zink_resource(transient->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[ctx->fb_state.nr_cbufs].usage);
|
||||
assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[cresolve_offset].usage);
|
||||
} else {
|
||||
assert(zink_resource(surf->base.texture)->obj->vkusage == ctx->framebuffer->state.infos[ctx->fb_state.nr_cbufs].usage);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
rpbi.pNext = &infos;
|
||||
|
||||
VKCTX(CmdBeginRenderPass)(batch->state->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
|
||||
batch->in_rp = true;
|
||||
ctx->new_swapchain = false;
|
||||
return clear_buffers;
|
||||
}
|
||||
|
||||
void
|
||||
zink_begin_render_pass(struct zink_context *ctx)
|
||||
{
|
||||
setup_framebuffer(ctx);
|
||||
/* TODO: need replicate EXT */
|
||||
if (ctx->framebuffer->rp->state.msaa_expand_mask) {
|
||||
uint32_t rp_state = ctx->gfx_pipeline_state.rp_state;
|
||||
struct zink_render_pass *rp = ctx->gfx_pipeline_state.render_pass;
|
||||
|
||||
u_foreach_bit(i, ctx->framebuffer->rp->state.msaa_expand_mask) {
|
||||
struct zink_ctx_surface *csurf = (struct zink_ctx_surface*)ctx->fb_state.cbufs[i];
|
||||
if (csurf->transient_init)
|
||||
continue;
|
||||
struct pipe_surface *dst_view = (struct pipe_surface*)csurf->transient;
|
||||
assert(dst_view);
|
||||
struct pipe_sampler_view src_templ, *src_view;
|
||||
struct pipe_resource *src = ctx->fb_state.cbufs[i]->texture;
|
||||
struct pipe_box dstbox;
|
||||
|
||||
u_box_3d(0, 0, 0, ctx->fb_state.width, ctx->fb_state.height,
|
||||
1 + dst_view->u.tex.last_layer - dst_view->u.tex.first_layer, &dstbox);
|
||||
|
||||
util_blitter_default_src_texture(ctx->blitter, &src_templ, src, ctx->fb_state.cbufs[i]->u.tex.level);
|
||||
src_view = ctx->base.create_sampler_view(&ctx->base, src, &src_templ);
|
||||
|
||||
zink_blit_begin(ctx, ZINK_BLIT_SAVE_FB | ZINK_BLIT_SAVE_FS | ZINK_BLIT_SAVE_TEXTURES);
|
||||
util_blitter_blit_generic(ctx->blitter, dst_view, &dstbox,
|
||||
src_view, &dstbox, ctx->fb_state.width, ctx->fb_state.height,
|
||||
PIPE_MASK_RGBAZS, PIPE_TEX_FILTER_NEAREST, NULL,
|
||||
false, false);
|
||||
|
||||
pipe_sampler_view_reference(&src_view, NULL);
|
||||
csurf->transient_init = true;
|
||||
}
|
||||
ctx->fb_changed = ctx->rp_changed = false;
|
||||
ctx->gfx_pipeline_state.rp_state = rp_state;
|
||||
ctx->gfx_pipeline_state.render_pass = rp;
|
||||
}
|
||||
assert(ctx->gfx_pipeline_state.render_pass);
|
||||
unsigned clear_buffers = begin_render_pass(ctx);
|
||||
|
||||
if (ctx->render_condition.query)
|
||||
zink_start_conditional_render(ctx);
|
||||
zink_clear_framebuffer(ctx, clear_buffers);
|
||||
}
|
||||
|
||||
void
|
||||
zink_end_render_pass(struct zink_context *ctx)
|
||||
{
|
||||
if (ctx->batch.in_rp) {
|
||||
if (ctx->render_condition.query)
|
||||
zink_stop_conditional_render(ctx);
|
||||
VKCTX(CmdEndRenderPass)(ctx->batch.state->cmdbuf);
|
||||
for (unsigned i = 0; i < ctx->fb_state.nr_cbufs; i++) {
|
||||
struct zink_ctx_surface *csurf = (struct zink_ctx_surface*)ctx->fb_state.cbufs[i];
|
||||
if (csurf)
|
||||
csurf->transient_init = true;
|
||||
}
|
||||
}
|
||||
ctx->batch.in_rp = false;
|
||||
}
|
||||
|
||||
bool
|
||||
zink_init_render_pass(struct zink_context *ctx)
|
||||
{
|
||||
_mesa_set_init(&ctx->render_pass_state_cache, ctx, hash_rp_state, equals_rp_state);
|
||||
ctx->render_pass_cache = _mesa_hash_table_create(NULL,
|
||||
hash_render_pass_state,
|
||||
equals_render_pass_state);
|
||||
return !!ctx->render_pass_cache;
|
||||
}
|
||||
|
Reference in New Issue
Block a user