asahi: Use a dynarray for writers
We don't want a writer hash table with persistent pointers to resources, because the resources could be freed without the hash table being updated (even though the underlying BO will not be freed until it's ready). To avoid the reference count hell, do away with the pointer hash table and instead use a flat dynarray for mapping BO (handles) to writer (batch indices). Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21620>
This commit is contained in:

committed by
Marge Bot

parent
0cde42a506
commit
c4aa7828b3
@@ -94,14 +94,14 @@ agx_batch_cleanup(struct agx_context *ctx, struct agx_batch *batch)
|
||||
batch->occlusion_buffer.cpu = NULL;
|
||||
batch->occlusion_buffer.gpu = 0;
|
||||
|
||||
/* There is no more writer for anything we wrote recorded on this context */
|
||||
hash_table_foreach(ctx->writer, ent) {
|
||||
if (ent->data == batch)
|
||||
_mesa_hash_table_remove(ctx->writer, ent);
|
||||
}
|
||||
|
||||
int handle;
|
||||
AGX_BATCH_FOREACH_BO_HANDLE(batch, handle) {
|
||||
/* There is no more writer on this context for anything we wrote */
|
||||
struct agx_batch *writer = agx_writer_get(ctx, handle);
|
||||
assert((writer == NULL || writer == batch) &&
|
||||
"cannot read while another batch writes");
|
||||
|
||||
agx_writer_remove(ctx, handle);
|
||||
agx_bo_unreference(agx_lookup_bo(dev, handle));
|
||||
}
|
||||
|
||||
@@ -229,11 +229,11 @@ static void
|
||||
agx_flush_writer_except(struct agx_context *ctx, struct agx_resource *rsrc,
|
||||
struct agx_batch *except, const char *reason)
|
||||
{
|
||||
struct hash_entry *ent = _mesa_hash_table_search(ctx->writer, rsrc);
|
||||
struct agx_batch *writer = agx_writer_get(ctx, rsrc->bo->handle);
|
||||
|
||||
if (ent && ent->data != except) {
|
||||
if (writer && writer != except) {
|
||||
perf_debug_ctx(ctx, "Flush writer due to: %s\n", reason);
|
||||
agx_flush_batch(ctx, ent->data);
|
||||
agx_flush_batch(ctx, writer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,24 +281,23 @@ void
|
||||
agx_batch_writes(struct agx_batch *batch, struct agx_resource *rsrc)
|
||||
{
|
||||
struct agx_context *ctx = batch->ctx;
|
||||
struct hash_entry *ent = _mesa_hash_table_search(ctx->writer, rsrc);
|
||||
struct agx_batch *writer = agx_writer_get(ctx, rsrc->bo->handle);
|
||||
|
||||
agx_flush_readers_except(ctx, rsrc, batch, "Write from other batch");
|
||||
|
||||
/* Nothing to do if we're already writing */
|
||||
if (ent && ent->data == batch)
|
||||
if (writer == batch)
|
||||
return;
|
||||
|
||||
/* Hazard: writer-after-write, write-after-read */
|
||||
if (ent)
|
||||
if (writer)
|
||||
agx_flush_writer(ctx, rsrc, "Multiple writers");
|
||||
|
||||
/* Write is strictly stronger than a read */
|
||||
agx_batch_reads(batch, rsrc);
|
||||
|
||||
/* We are now the new writer */
|
||||
assert(!_mesa_hash_table_search(ctx->writer, rsrc));
|
||||
_mesa_hash_table_insert(ctx->writer, rsrc, batch);
|
||||
agx_writer_add(ctx, agx_batch_idx(batch), rsrc->bo->handle);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1119,7 +1119,7 @@ agx_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
|
||||
pctx->screen = screen;
|
||||
pctx->priv = priv;
|
||||
|
||||
ctx->writer = _mesa_pointer_hash_table_create(ctx);
|
||||
util_dynarray_init(&ctx->writer, ctx);
|
||||
|
||||
pctx->stream_uploader = u_upload_create_default(pctx);
|
||||
if (!pctx->stream_uploader) {
|
||||
|
@@ -340,12 +340,60 @@ struct agx_context {
|
||||
|
||||
struct blitter_context *blitter;
|
||||
|
||||
/* Map of agx_resource to agx_batch that writes that resource */
|
||||
struct hash_table *writer;
|
||||
/* Map of GEM handle to (batch index + 1) that (conservatively) writes that
|
||||
* BO, or 0 if no writer.
|
||||
*/
|
||||
struct util_dynarray writer;
|
||||
|
||||
struct agx_meta_cache meta;
|
||||
};
|
||||
|
||||
static void
|
||||
agx_writer_add(struct agx_context *ctx, uint8_t batch_index, unsigned handle)
|
||||
{
|
||||
assert(batch_index < AGX_MAX_BATCHES && "invariant");
|
||||
static_assert(AGX_MAX_BATCHES < 0xFF, "no overflow on addition");
|
||||
|
||||
/* If we need to grow, double the capacity so insertion is amortized O(1). */
|
||||
if (unlikely(handle >= ctx->writer.size)) {
|
||||
unsigned new_size =
|
||||
MAX2(ctx->writer.capacity * 2, util_next_power_of_two(handle + 1));
|
||||
unsigned grow = new_size - ctx->writer.size;
|
||||
|
||||
memset(util_dynarray_grow(&ctx->writer, uint8_t, grow), 0,
|
||||
grow * sizeof(uint8_t));
|
||||
}
|
||||
|
||||
/* There is now room */
|
||||
uint8_t *value = util_dynarray_element(&ctx->writer, uint8_t, handle);
|
||||
assert((*value) == 0 && "there should be no existing writer");
|
||||
*value = batch_index + 1;
|
||||
}
|
||||
|
||||
static struct agx_batch *
|
||||
agx_writer_get(struct agx_context *ctx, unsigned handle)
|
||||
{
|
||||
if (handle >= ctx->writer.size)
|
||||
return NULL;
|
||||
|
||||
uint8_t value = *util_dynarray_element(&ctx->writer, uint8_t, handle);
|
||||
|
||||
if (value > 0)
|
||||
return &ctx->batches.slots[value - 1];
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
agx_writer_remove(struct agx_context *ctx, unsigned handle)
|
||||
{
|
||||
if (handle >= ctx->writer.size)
|
||||
return;
|
||||
|
||||
uint8_t *value = util_dynarray_element(&ctx->writer, uint8_t, handle);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
static inline struct agx_context *
|
||||
agx_context(struct pipe_context *pctx)
|
||||
{
|
||||
|
Reference in New Issue
Block a user