etnaviv: split PIPE_BUFFER resources from other types of resources
Buffer resources are quite special as they are only one dimensional, always linear, don't have miplevels or array slices, never have a texture or render compatible sibling, don't ever use TS. The gallium context interface acknowledges this fact by providing separate entry points for buffer maps/unmaps/flushes. Provide a specialized etna_buffer_resource as a much more lightweight alternative to the fullblown etna_resource and implement buffer maps/unmaps in the same straight forward, direct map manner that is hidden inside all the tiling, TS and resource sibling handling in etna_transfer_map/unmap. It is expected that further map optimizations can be added on top of this simple implementation much more easily than in the merged buffer/texture transfer code. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Christian Gmeiner <cgmeiner@igalia.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34061>
This commit is contained in:
@@ -279,7 +279,7 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
|
||||
/* Add start to index offset, when rendering indexed */
|
||||
index_offset += draws[0].start * info->index_size;
|
||||
|
||||
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(indexbuf)->bo;
|
||||
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_buffer_resource(indexbuf)->bo;
|
||||
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = index_offset;
|
||||
ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
|
||||
ctx->index_buffer.FE_INDEX_STREAM_CONTROL = translate_index_size(info->index_size);
|
||||
|
@@ -59,7 +59,7 @@ etna_acc_destroy_query(struct etna_context *ctx, struct etna_query *q)
|
||||
static void
|
||||
realloc_query_bo(struct etna_context *ctx, struct etna_acc_query *aq)
|
||||
{
|
||||
struct etna_resource *rsc;
|
||||
struct etna_buffer_resource *rsc;
|
||||
void *map;
|
||||
|
||||
pipe_resource_reference(&aq->prsc, NULL);
|
||||
@@ -68,7 +68,7 @@ realloc_query_bo(struct etna_context *ctx, struct etna_acc_query *aq)
|
||||
0, 0x1000);
|
||||
|
||||
/* don't assume the buffer is zero-initialized */
|
||||
rsc = etna_resource(aq->prsc);
|
||||
rsc = etna_buffer_resource(aq->prsc);
|
||||
|
||||
etna_bo_cpu_prep(rsc->bo, DRM_ETNA_PREP_WRITE);
|
||||
|
||||
@@ -111,7 +111,7 @@ etna_acc_get_query_result(struct etna_context *ctx, struct etna_query *q,
|
||||
bool wait, union pipe_query_result *result)
|
||||
{
|
||||
struct etna_acc_query *aq = etna_acc_query(q);
|
||||
struct etna_resource *rsc = etna_resource(aq->prsc);
|
||||
struct etna_buffer_resource *rsc = etna_buffer_resource(aq->prsc);
|
||||
const struct etna_acc_sample_provider *p = aq->provider;
|
||||
uint32_t prep_op = DRM_ETNA_PREP_READ;
|
||||
|
||||
|
@@ -69,7 +69,7 @@ occlusion_allocate(struct etna_context *ctx, ASSERTED unsigned query_type)
|
||||
static void
|
||||
occlusion_resume(struct etna_acc_query *aq, struct etna_context *ctx)
|
||||
{
|
||||
struct etna_resource *rsc = etna_resource(aq->prsc);
|
||||
struct etna_buffer_resource *rsc = etna_buffer_resource(aq->prsc);
|
||||
struct etna_reloc r = {
|
||||
.bo = rsc->bo,
|
||||
.flags = ETNA_RELOC_WRITE
|
||||
|
@@ -83,7 +83,7 @@ pm_query(struct etna_context *ctx, struct etna_acc_query *aq, unsigned flags)
|
||||
struct etna_perf p = {
|
||||
.flags = flags,
|
||||
.sequence = pq->sequence,
|
||||
.bo = etna_resource(aq->prsc)->bo,
|
||||
.bo = etna_buffer_resource(aq->prsc)->bo,
|
||||
.signal = pq->signal,
|
||||
.offset = offset
|
||||
};
|
||||
|
@@ -285,7 +285,7 @@ etna_layout_multiple(const struct etna_screen *screen,
|
||||
switch (layout) {
|
||||
case ETNA_LAYOUT_LINEAR:
|
||||
*paddingX = rs_align ? 16 : 4;
|
||||
*paddingY = !specs->use_blt && templat->target != PIPE_BUFFER ? 4 : 1;
|
||||
*paddingY = !specs->use_blt ? 4 : 1;
|
||||
*halign = rs_align ? TEXTURE_HALIGN_SIXTEEN : TEXTURE_HALIGN_FOUR;
|
||||
break;
|
||||
case ETNA_LAYOUT_TILED:
|
||||
@@ -313,6 +313,55 @@ etna_layout_multiple(const struct etna_screen *screen,
|
||||
}
|
||||
}
|
||||
|
||||
static struct pipe_resource *
|
||||
etna_buffer_resource_alloc(struct pipe_screen *pscreen,
|
||||
const struct pipe_resource *templat)
|
||||
{
|
||||
struct etna_screen *screen = etna_screen(pscreen);
|
||||
uint32_t size = pipe_buffer_size(templat);
|
||||
uint32_t flags = DRM_ETNA_GEM_CACHE_WC;
|
||||
struct etna_buffer_resource *rsc;
|
||||
|
||||
DBG_F(ETNA_DBG_RESOURCE_MSGS,
|
||||
"target=%d, format=%s, width=%u, usage=%u, bind=%x, flags=%x",
|
||||
templat->target, util_format_name(templat->format), templat->width0,
|
||||
templat->usage, templat->bind, templat->flags);
|
||||
|
||||
assert(!(templat->bind & PIPE_BIND_SHARED));
|
||||
|
||||
rsc = CALLOC_STRUCT(etna_buffer_resource);
|
||||
if (!rsc)
|
||||
return NULL;
|
||||
|
||||
rsc->base = *templat;
|
||||
rsc->base.screen = pscreen;
|
||||
|
||||
pipe_reference_init(&rsc->base.reference, 1);
|
||||
util_range_init(&rsc->valid_buffer_range);
|
||||
|
||||
if (templat->bind & PIPE_BIND_VERTEX_BUFFER)
|
||||
flags |= DRM_ETNA_GEM_FORCE_MMU;
|
||||
|
||||
rsc->bo = etna_bo_new(screen->dev, size, flags);
|
||||
if (unlikely(!rsc->bo)) {
|
||||
BUG("Problem allocating video memory for resource");
|
||||
goto free_rsc;
|
||||
}
|
||||
|
||||
if (DBG_ENABLED(ETNA_DBG_ZERO)) {
|
||||
void *map = etna_bo_map(rsc->bo);
|
||||
etna_bo_cpu_prep(rsc->bo, DRM_ETNA_PREP_WRITE);
|
||||
memset(map, 0, size);
|
||||
etna_bo_cpu_fini(rsc->bo);
|
||||
}
|
||||
|
||||
return &rsc->base;
|
||||
|
||||
free_rsc:
|
||||
FREE(rsc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Create a new resource object, using the given template info */
|
||||
struct pipe_resource *
|
||||
etna_resource_alloc(struct pipe_screen *pscreen, unsigned layout,
|
||||
@@ -354,7 +403,6 @@ etna_resource_alloc(struct pipe_screen *pscreen, unsigned layout,
|
||||
rsc->explicit_flush = true;
|
||||
|
||||
pipe_reference_init(&rsc->base.reference, 1);
|
||||
util_range_init(&rsc->valid_buffer_range);
|
||||
|
||||
size = setup_miptree(rsc, paddingX, paddingY, msaa_xscale, msaa_yscale);
|
||||
|
||||
@@ -379,12 +427,7 @@ etna_resource_alloc(struct pipe_screen *pscreen, unsigned layout,
|
||||
if (unlikely(!rsc->bo))
|
||||
goto free_rsc;
|
||||
} else {
|
||||
uint32_t flags = DRM_ETNA_GEM_CACHE_WC;
|
||||
|
||||
if (templat->bind & PIPE_BIND_VERTEX_BUFFER)
|
||||
flags |= DRM_ETNA_GEM_FORCE_MMU;
|
||||
|
||||
rsc->bo = etna_bo_new(screen->dev, size, flags);
|
||||
rsc->bo = etna_bo_new(screen->dev, size, DRM_ETNA_GEM_CACHE_WC);
|
||||
if (unlikely(!rsc->bo)) {
|
||||
BUG("Problem allocating video memory for resource");
|
||||
goto free_rsc;
|
||||
@@ -418,6 +461,9 @@ etna_resource_create(struct pipe_screen *pscreen,
|
||||
struct etna_screen *screen = etna_screen(pscreen);
|
||||
unsigned layout = ETNA_LAYOUT_TILED;
|
||||
|
||||
if (templat->target == PIPE_BUFFER)
|
||||
return etna_buffer_resource_alloc(pscreen, templat);
|
||||
|
||||
/* At this point we don't know if the resource will be used as a texture,
|
||||
* render target, or both, because gallium sets the bits whenever possible
|
||||
* This matters because on some GPUs (GC2000) there is no tiling that is
|
||||
@@ -451,7 +497,6 @@ etna_resource_create(struct pipe_screen *pscreen,
|
||||
|
||||
if (/* linear base or scanout without modifier requested */
|
||||
(templat->bind & (PIPE_BIND_LINEAR | PIPE_BIND_SCANOUT)) ||
|
||||
templat->target == PIPE_BUFFER || /* buffer always linear */
|
||||
/* compressed textures don't use tiling, they have their own "tiles" */
|
||||
util_format_is_compressed(templat->format)) {
|
||||
layout = ETNA_LAYOUT_LINEAR;
|
||||
@@ -576,10 +621,27 @@ etna_resource_changed(struct pipe_screen *pscreen, struct pipe_resource *prsc)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
etna_buffer_resource_destroy(struct pipe_screen *pscreen,
|
||||
struct pipe_resource *prsc)
|
||||
{
|
||||
struct etna_buffer_resource *rsc = etna_buffer_resource(prsc);
|
||||
|
||||
etna_bo_del(rsc->bo);
|
||||
|
||||
util_range_destroy(&rsc->valid_buffer_range);
|
||||
FREE(rsc);
|
||||
}
|
||||
|
||||
static void
|
||||
etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
|
||||
{
|
||||
struct etna_resource *rsc = etna_resource(prsc);
|
||||
struct etna_resource *rsc;
|
||||
|
||||
if (prsc->target == PIPE_BUFFER)
|
||||
return etna_buffer_resource_destroy(pscreen, prsc);
|
||||
|
||||
rsc = etna_resource(prsc);
|
||||
|
||||
if (rsc->bo)
|
||||
etna_bo_del(rsc->bo);
|
||||
@@ -593,8 +655,6 @@ etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
|
||||
if (rsc->ts_scanout)
|
||||
renderonly_scanout_destroy(rsc->ts_scanout, etna_screen(pscreen)->ro);
|
||||
|
||||
util_range_destroy(&rsc->valid_buffer_range);
|
||||
|
||||
pipe_resource_reference(&rsc->texture, NULL);
|
||||
pipe_resource_reference(&rsc->render, NULL);
|
||||
|
||||
@@ -668,7 +728,6 @@ etna_resource_from_handle(struct pipe_screen *pscreen,
|
||||
*prsc = *tmpl;
|
||||
|
||||
pipe_reference_init(&prsc->reference, 1);
|
||||
util_range_init(&rsc->valid_buffer_range);
|
||||
prsc->screen = pscreen;
|
||||
|
||||
rsc->bo = etna_screen_bo_from_handle(pscreen, handle);
|
||||
|
@@ -193,6 +193,16 @@ enum etna_resource_status {
|
||||
ETNA_PENDING_READ = 0x02,
|
||||
};
|
||||
|
||||
struct etna_buffer_resource {
|
||||
struct pipe_resource base;
|
||||
|
||||
/* buffer range that has been initialized */
|
||||
struct util_range valid_buffer_range;
|
||||
|
||||
/* backing storage */
|
||||
struct etna_bo *bo;
|
||||
};
|
||||
|
||||
struct etna_resource {
|
||||
struct pipe_resource base;
|
||||
struct renderonly_scanout *scanout;
|
||||
@@ -209,9 +219,6 @@ struct etna_resource {
|
||||
|
||||
struct etna_resource_level levels[ETNA_NUM_LOD];
|
||||
|
||||
/* buffer range that has been initialized */
|
||||
struct util_range valid_buffer_range;
|
||||
|
||||
/* for when TE doesn't support the base layout */
|
||||
struct pipe_resource *texture;
|
||||
/* for when PE doesn't support the base layout */
|
||||
@@ -284,9 +291,17 @@ etna_resource_ext_ts(const struct etna_resource *res)
|
||||
static inline struct etna_resource *
|
||||
etna_resource(struct pipe_resource *p)
|
||||
{
|
||||
assert(p->target != PIPE_BUFFER);
|
||||
return (struct etna_resource *)p;
|
||||
}
|
||||
|
||||
static inline struct etna_buffer_resource *
|
||||
etna_buffer_resource(struct pipe_resource *p)
|
||||
{
|
||||
assert(p->target == PIPE_BUFFER);
|
||||
return (struct etna_buffer_resource *)p;
|
||||
}
|
||||
|
||||
void
|
||||
etna_resource_used(struct etna_context *ctx, struct pipe_resource *prsc,
|
||||
enum etna_resource_status status);
|
||||
|
@@ -562,7 +562,7 @@ etna_set_vertex_buffers(struct pipe_context *pctx, unsigned num_buffers,
|
||||
etna_usermem_map */
|
||||
|
||||
if (vbi->buffer.resource) { /* GPU buffer */
|
||||
cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_resource(vbi->buffer.resource)->bo;
|
||||
cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_buffer_resource(vbi->buffer.resource)->bo;
|
||||
cs->FE_VERTEX_STREAM_BASE_ADDR.offset = vbi->buffer_offset;
|
||||
cs->FE_VERTEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
|
||||
} else {
|
||||
|
@@ -166,7 +166,7 @@ etna_create_sampler_view_desc(struct pipe_context *pctx, struct pipe_resource *p
|
||||
if (!sv->res)
|
||||
goto error;
|
||||
|
||||
uint32_t *buf = etna_bo_map(etna_resource(sv->res)->bo) + suballoc_offset;
|
||||
uint32_t *buf = etna_bo_map(etna_buffer_resource(sv->res)->bo) + suballoc_offset;
|
||||
|
||||
/** GC7000 needs the size of the BASELOD level */
|
||||
uint32_t base_width = u_minify(res->base.width0, sv->base.u.tex.first_level);
|
||||
@@ -223,7 +223,7 @@ etna_create_sampler_view_desc(struct pipe_context *pctx, struct pipe_resource *p
|
||||
DESC_SET(LOD_ADDR(lod), etna_bo_gpu_va(res->bo) + res->levels[lod].offset);
|
||||
#undef DESC_SET
|
||||
|
||||
sv->DESC_ADDR.bo = etna_resource(sv->res)->bo;
|
||||
sv->DESC_ADDR.bo = etna_buffer_resource(sv->res)->bo;
|
||||
sv->DESC_ADDR.offset = suballoc_offset;
|
||||
sv->DESC_ADDR.flags = ETNA_RELOC_READ;
|
||||
|
||||
|
@@ -48,6 +48,99 @@
|
||||
|
||||
#define ETNA_PIPE_MAP_DISCARD_LEVEL (PIPE_MAP_DRV_PRV << 0)
|
||||
|
||||
static void *
|
||||
etna_buffer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
unsigned level, unsigned usage, const struct pipe_box *box,
|
||||
struct pipe_transfer **out_transfer)
|
||||
{
|
||||
struct etna_buffer_resource *rsc = etna_buffer_resource(prsc);
|
||||
struct etna_context *ctx = etna_context(pctx);
|
||||
struct etna_transfer *trans;
|
||||
|
||||
trans = slab_zalloc(&ctx->transfer_pool);
|
||||
if (!trans)
|
||||
return NULL;
|
||||
|
||||
/* Upgrade to UNSYNCHRONIZED if range is uninitialized. */
|
||||
if ((usage & PIPE_MAP_WRITE) &&
|
||||
!util_ranges_intersect(&rsc->valid_buffer_range,
|
||||
box->x, box->x + box->width))
|
||||
usage |= PIPE_MAP_UNSYNCHRONIZED;
|
||||
|
||||
pipe_resource_reference(&trans->base.resource, prsc);
|
||||
trans->base.level = level;
|
||||
trans->base.usage = usage;
|
||||
trans->base.box = *box;
|
||||
|
||||
/* map buffer object */
|
||||
trans->mapped = etna_bo_map(rsc->bo);
|
||||
if (!trans->mapped)
|
||||
goto free_trans;
|
||||
|
||||
if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
|
||||
enum etna_resource_status status = etna_resource_status(ctx, &rsc->base);
|
||||
uint32_t prep_flags = 0;
|
||||
|
||||
/*
|
||||
* Infer flush requirement from resource access and current GPU usage
|
||||
* (reads must wait for GPU writes, writes must have exclusive access
|
||||
* to the buffer).
|
||||
*/
|
||||
if (((usage & PIPE_MAP_READ) && (status & ETNA_PENDING_WRITE)) ||
|
||||
((usage & PIPE_MAP_WRITE) && status)) {
|
||||
etna_flush(pctx, NULL, 0, true);
|
||||
}
|
||||
|
||||
if (usage & PIPE_MAP_READ)
|
||||
prep_flags |= DRM_ETNA_PREP_READ;
|
||||
if (usage & PIPE_MAP_WRITE)
|
||||
prep_flags |= DRM_ETNA_PREP_WRITE;
|
||||
|
||||
if (etna_bo_cpu_prep(rsc->bo, prep_flags))
|
||||
goto free_trans;
|
||||
}
|
||||
|
||||
*out_transfer = &trans->base;
|
||||
return trans->mapped + box->x;
|
||||
|
||||
free_trans:
|
||||
slab_free(&ctx->transfer_pool, trans);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
etna_buffer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
|
||||
{
|
||||
struct etna_buffer_resource *rsc = etna_buffer_resource(ptrans->resource);
|
||||
struct etna_transfer *trans = etna_transfer(ptrans);
|
||||
struct etna_context *ctx = etna_context(pctx);
|
||||
|
||||
if (!(ptrans->usage & PIPE_MAP_UNSYNCHRONIZED))
|
||||
etna_bo_cpu_fini(rsc->bo);
|
||||
|
||||
if ((ptrans->usage & PIPE_MAP_WRITE) &&
|
||||
!(ptrans->usage & PIPE_MAP_FLUSH_EXPLICIT))
|
||||
util_range_add(&rsc->base, &rsc->valid_buffer_range,
|
||||
ptrans->box.x, ptrans->box.x + ptrans->box.width);
|
||||
|
||||
pipe_resource_reference(&ptrans->resource, NULL);
|
||||
slab_free(&ctx->transfer_pool, trans);
|
||||
}
|
||||
|
||||
static void
|
||||
etna_transfer_flush_region(struct pipe_context *pctx,
|
||||
struct pipe_transfer *ptrans,
|
||||
const struct pipe_box *box)
|
||||
{
|
||||
struct etna_buffer_resource *rsc = etna_buffer_resource(ptrans->resource);
|
||||
|
||||
assert(ptrans->resource->target == PIPE_BUFFER);
|
||||
|
||||
util_range_add(&rsc->base, &rsc->valid_buffer_range,
|
||||
ptrans->box.x + box->x,
|
||||
ptrans->box.x + box->x + box->width);
|
||||
}
|
||||
|
||||
/* Compute offset into a 1D/2D/3D buffer of a certain box.
|
||||
* This box must be aligned to the block width and height of the
|
||||
* underlying format. */
|
||||
@@ -102,7 +195,7 @@ static void etna_unpatch_data(void *buffer, const struct pipe_transfer *ptrans)
|
||||
}
|
||||
|
||||
static void
|
||||
etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
|
||||
etna_texture_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
|
||||
{
|
||||
struct etna_context *ctx = etna_context(pctx);
|
||||
struct etna_transfer *trans = etna_transfer(ptrans);
|
||||
@@ -156,10 +249,6 @@ etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
|
||||
}
|
||||
}
|
||||
|
||||
if (ptrans->resource->target == PIPE_BUFFER)
|
||||
util_range_add(&rsc->base, &rsc->valid_buffer_range,
|
||||
ptrans->box.x, ptrans->box.x + ptrans->box.width);
|
||||
|
||||
etna_resource_level_ts_mark_invalid(res_level);
|
||||
etna_resource_level_mark_changed(res_level);
|
||||
|
||||
@@ -187,11 +276,9 @@ etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
|
||||
}
|
||||
|
||||
static void *
|
||||
etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
unsigned level,
|
||||
unsigned usage,
|
||||
const struct pipe_box *box,
|
||||
struct pipe_transfer **out_transfer)
|
||||
etna_texture_map(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
unsigned level, unsigned usage, const struct pipe_box *box,
|
||||
struct pipe_transfer **out_transfer)
|
||||
{
|
||||
struct etna_context *ctx = etna_context(pctx);
|
||||
struct etna_screen *screen = ctx->screen;
|
||||
@@ -207,17 +294,6 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
|
||||
assert(level <= prsc->last_level);
|
||||
|
||||
/*
|
||||
* Upgrade to UNSYNCHRONIZED if target is PIPE_BUFFER and range is uninitialized.
|
||||
*/
|
||||
if ((usage & PIPE_MAP_WRITE) &&
|
||||
(prsc->target == PIPE_BUFFER) &&
|
||||
!util_ranges_intersect(&rsc->valid_buffer_range,
|
||||
box->x,
|
||||
box->x + box->width)) {
|
||||
usage |= PIPE_MAP_UNSYNCHRONIZED;
|
||||
}
|
||||
|
||||
/* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
|
||||
* being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
|
||||
* check needs to be extended to coherent mappings and shared resources.
|
||||
@@ -309,14 +385,6 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
res_level = &rsc->levels[0];
|
||||
}
|
||||
|
||||
/* XXX we don't handle PIPE_MAP_FLUSH_EXPLICIT; this flag can be ignored
|
||||
* when mapping in-place,
|
||||
* but when not in place we need to fire off the copy operation in
|
||||
* transfer_flush_region (currently
|
||||
* a no-op) instead of unmap. Need to handle this to support
|
||||
* ARB_map_buffer_range extension at least.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Pull resources into the CPU domain. Only skipped for unsynchronized
|
||||
* transfers without a temporary resource.
|
||||
@@ -422,32 +490,18 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
fail:
|
||||
etna_bo_cpu_fini(rsc->bo);
|
||||
fail_prep:
|
||||
etna_transfer_unmap(pctx, ptrans);
|
||||
etna_texture_unmap(pctx, ptrans);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
etna_transfer_flush_region(struct pipe_context *pctx,
|
||||
struct pipe_transfer *ptrans,
|
||||
const struct pipe_box *box)
|
||||
{
|
||||
struct etna_resource *rsc = etna_resource(ptrans->resource);
|
||||
|
||||
if (ptrans->resource->target == PIPE_BUFFER)
|
||||
util_range_add(&rsc->base,
|
||||
&rsc->valid_buffer_range,
|
||||
ptrans->box.x + box->x,
|
||||
ptrans->box.x + box->x + box->width);
|
||||
}
|
||||
|
||||
void
|
||||
etna_transfer_init(struct pipe_context *pctx)
|
||||
{
|
||||
pctx->buffer_map = etna_transfer_map;
|
||||
pctx->texture_map = etna_transfer_map;
|
||||
pctx->buffer_map = etna_buffer_map;
|
||||
pctx->texture_map = etna_texture_map;
|
||||
pctx->transfer_flush_region = etna_transfer_flush_region;
|
||||
pctx->buffer_unmap = etna_transfer_unmap;
|
||||
pctx->texture_unmap = etna_transfer_unmap;
|
||||
pctx->buffer_unmap = etna_buffer_unmap;
|
||||
pctx->texture_unmap = etna_texture_unmap;
|
||||
pctx->buffer_subdata = u_default_buffer_subdata;
|
||||
pctx->texture_subdata = u_default_texture_subdata;
|
||||
}
|
||||
|
@@ -155,7 +155,7 @@ etna_uniforms_write(const struct etna_context *ctx,
|
||||
|
||||
case ETNA_UNIFORM_UBO_ADDR:
|
||||
etna_cmd_stream_reloc(stream, &(struct etna_reloc) {
|
||||
.bo = etna_resource(cb[val].buffer)->bo,
|
||||
.bo = etna_buffer_resource(cb[val].buffer)->bo,
|
||||
.flags = ETNA_RELOC_READ,
|
||||
.offset = cb[val].buffer_offset,
|
||||
});
|
||||
|
Reference in New Issue
Block a user