radeon_winsys.h: add a winsys parameter to most winsys buffer functions

This will allow removing the winsys pointer from buffers.

The amdgpu winsys adds dummy_ws to get radeon_winsys because there can be
no radeon_winsys around (e.g. while amdgpu_winsys is being destroyed), but
we still need some way to call buffer functions.

Reviewed-by: Zoltán Böszörményi <zboszor@gmail.com>
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9809>
This commit is contained in:
Marek Olšák
2021-03-23 18:33:41 -04:00
committed by Marge Bot
parent aed8af5456
commit 65495e6caa
42 changed files with 169 additions and 150 deletions

View File

@@ -150,16 +150,16 @@ static bool r300_get_query_result(struct pipe_context* pipe,
if (q->type == PIPE_QUERY_GPU_FINISHED) {
if (wait) {
r300->rws->buffer_wait(q->buf, PIPE_TIMEOUT_INFINITE,
r300->rws->buffer_wait(r300->rws, q->buf, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
vresult->b = TRUE;
} else {
vresult->b = r300->rws->buffer_wait(q->buf, 0, RADEON_USAGE_READWRITE);
vresult->b = r300->rws->buffer_wait(r300->rws, q->buf, 0, RADEON_USAGE_READWRITE);
}
return vresult->b;
}
map = r300->rws->buffer_map(q->buf, &r300->cs,
map = r300->rws->buffer_map(r300->rws, q->buf, &r300->cs,
PIPE_MAP_READ |
(!wait ? PIPE_MAP_DONTBLOCK : 0));
if (!map)

View File

@@ -374,7 +374,7 @@ static void r300_draw_arrays_immediate(struct r300_context *r300,
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
map[vbi] = (uint32_t*)r300->rws->buffer_map(r300->rws,
r300_resource(vbuf->buffer.resource)->buf,
&r300->cs, PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * draw->start;
@@ -611,7 +611,7 @@ static void r300_draw_elements(struct r300_context *r300,
/* Fallback for misaligned ushort indices. */
if (indexSize == 2 && (start & 1) && indexBuffer) {
/* If we got here, then orgIndexBuffer == indexBuffer. */
uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
uint16_t *ptr = r300->rws->buffer_map(r300->rws, r300_resource(orgIndexBuffer)->buf,
&r300->cs,
PIPE_MAP_READ |
PIPE_MAP_UNSYNCHRONIZED);
@@ -946,7 +946,7 @@ static boolean r300_render_allocate_vertices(struct vbuf_render* render,
return FALSE;
}
r300->draw_vbo_offset = 0;
r300render->vbo_ptr = rws->buffer_map(r300->vbo, &r300->cs,
r300render->vbo_ptr = rws->buffer_map(rws, r300->vbo, &r300->cs,
PIPE_MAP_WRITE);
}

View File

@@ -96,7 +96,7 @@ r300_buffer_transfer_map( struct pipe_context *context,
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r300->rws->cs_is_buffer_referenced(&r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
!r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
!r300->rws->buffer_wait(r300->rws, rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
unsigned i;
struct pb_buffer *new_buf;
@@ -127,7 +127,7 @@ r300_buffer_transfer_map( struct pipe_context *context,
usage |= PIPE_MAP_UNSYNCHRONIZED;
}
map = rws->buffer_map(rbuf->buf, &r300->cs, usage);
map = rws->buffer_map(rws, rbuf->buf, &r300->cs, usage);
if (!map) {
slab_free(&r300->pool_transfers, transfer);

View File

@@ -1143,7 +1143,7 @@ r300_texture_create_object(struct r300_screen *rscreen,
tiling.u.legacy.microtile = tex->tex.microtile;
tiling.u.legacy.macrotile = tex->tex.macrotile[0];
tiling.u.legacy.stride = tex->tex.stride_in_bytes[0];
rws->buffer_set_metadata(tex->buf, &tiling, NULL);
rws->buffer_set_metadata(rws, tex->buf, &tiling, NULL);
return tex;
@@ -1198,7 +1198,7 @@ struct pipe_resource *r300_texture_from_handle(struct pipe_screen *screen,
if (!buffer)
return NULL;
rws->buffer_get_metadata(buffer, &tiling, NULL);
rws->buffer_get_metadata(rws, buffer, &tiling, NULL);
/* Enforce a microtiled zbuffer. */
if (util_format_is_depth_or_stencil(base->format) &&

View File

@@ -120,7 +120,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
referenced_hw = TRUE;
} else {
referenced_hw =
!r300->rws->buffer_wait(tex->buf, 0, RADEON_USAGE_READWRITE);
!r300->rws->buffer_wait(r300->rws, tex->buf, 0, RADEON_USAGE_READWRITE);
}
trans = CALLOC_STRUCT(r300_transfer);
@@ -218,7 +218,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
if (trans->linear_texture) {
/* The detiled texture is of the same size as the region being mapped
* (no offset needed). */
map = r300->rws->buffer_map(trans->linear_texture->buf,
map = r300->rws->buffer_map(r300->rws, trans->linear_texture->buf,
&r300->cs, usage);
if (!map) {
pipe_resource_reference(
@@ -230,7 +230,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
return map;
} else {
/* Tiling is disabled. */
map = r300->rws->buffer_map(tex->buf, &r300->cs, usage);
map = r300->rws->buffer_map(r300->rws, tex->buf, &r300->cs, usage);
if (!map) {
FREE(trans);
return NULL;

View File

@@ -332,7 +332,7 @@ static void eg_dump_last_ib(struct r600_context *rctx, FILE *f)
* waited for the context, so this buffer should be idle.
* If the GPU is hung, there is no point in waiting for it.
*/
uint32_t *map = rctx->b.ws->buffer_map(rctx->last_trace_buf->buf,
uint32_t *map = rctx->b.ws->buffer_map(rctx->b.ws, rctx->last_trace_buf->buf,
NULL,
PIPE_MAP_UNSYNCHRONIZED |
PIPE_MAP_READ);

View File

@@ -461,7 +461,7 @@ static void *evergreen_create_compute_state(struct pipe_context *ctx,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
//TODO: use util_memcpy_cpu_to_le32 ?
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
rctx->b.ws->buffer_unmap(shader->code_bo->buf);
rctx->b.ws->buffer_unmap(rctx->b.ws, shader->code_bo->buf);
#endif
return shader;

View File

@@ -2778,7 +2778,7 @@ void *r600_create_vertex_fetch_shader(struct pipe_context *ctx,
} else {
memcpy(bytecode, bc.bytecode, fs_size);
}
rctx->b.ws->buffer_unmap(shader->buffer->buf);
rctx->b.ws->buffer_unmap(rctx->b.ws, shader->buffer->buf);
r600_bytecode_clear(&bc);
return shader;

View File

@@ -54,7 +54,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
assert(!(resource->flags & RADEON_FLAG_SPARSE));
if (usage & PIPE_MAP_UNSYNCHRONIZED) {
return ctx->ws->buffer_map(resource->buf, NULL, usage);
return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);
}
if (!(usage & PIPE_MAP_WRITE)) {
@@ -85,7 +85,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
}
}
if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
if (busy || !ctx->ws->buffer_wait(ctx->ws, resource->buf, 0, rusage)) {
if (usage & PIPE_MAP_DONTBLOCK) {
return NULL;
} else {
@@ -98,7 +98,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
}
/* Setting the CS to NULL will prevent doing checks we have done already. */
return ctx->ws->buffer_map(resource->buf, NULL, usage);
return ctx->ws->buffer_map(ctx->ws, resource->buf, NULL, usage);
}
void r600_init_resource_fields(struct r600_common_screen *rscreen,
@@ -254,7 +254,7 @@ r600_invalidate_buffer(struct r600_common_context *rctx,
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
!rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
} else {
util_range_set_empty(&rbuffer->valid_buffer_range);
@@ -409,7 +409,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
*/
if (rbuffer->flags & RADEON_FLAG_SPARSE ||
r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
!rctx->ws->buffer_wait(rctx->ws, rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
unsigned offset;
struct r600_resource *staging = NULL;

View File

@@ -572,7 +572,7 @@ static bool r600_resource_commit(struct pipe_context *pctx,
assert(resource->target == PIPE_BUFFER);
return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
return ctx->ws->buffer_commit(ctx->ws, res->buf, box->x, box->width, commit);
}
bool r600_common_context_init(struct r600_common_context *rctx,

View File

@@ -526,7 +526,7 @@ static bool r600_query_hw_prepare_buffer(struct r600_common_screen *rscreen,
struct r600_resource *buffer)
{
/* Callers ensure that the buffer is currently unused by the GPU. */
uint32_t *results = rscreen->ws->buffer_map(buffer->buf, NULL,
uint32_t *results = rscreen->ws->buffer_map(rscreen->ws, buffer->buf, NULL,
PIPE_MAP_WRITE |
PIPE_MAP_UNSYNCHRONIZED);
if (!results)
@@ -1021,7 +1021,7 @@ void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
/* Obtain a new buffer if the current one can't be mapped without a stall. */
if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
!rctx->ws->buffer_wait(rctx->ws, query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
r600_resource_reference(&query->buffer.buf, NULL);
query->buffer.buf = r600_new_query_buffer(rctx->screen, query);
} else {
@@ -1343,7 +1343,7 @@ bool r600_query_hw_get_result(struct r600_common_context *rctx,
void *map;
if (rquery->b.flushed)
map = rctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
map = rctx->ws->buffer_map(rctx->ws, qbuf->buf->buf, NULL, usage);
else
map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf, usage);

View File

@@ -157,7 +157,7 @@ static int store_shader(struct pipe_context *ctx,
} else {
memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
}
rctx->b.ws->buffer_unmap(shader->bo->buf);
rctx->b.ws->buffer_unmap(rctx->b.ws, shader->bo->buf);
}
return 0;

View File

@@ -519,7 +519,7 @@ static bool r600_texture_get_handle(struct pipe_screen* screen,
if (!res->b.is_shared || update_metadata) {
r600_texture_init_metadata(rscreen, rtex, &metadata);
rscreen->ws->buffer_set_metadata(res->buf, &metadata, NULL);
rscreen->ws->buffer_set_metadata(rscreen->ws, res->buf, &metadata, NULL);
}
slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;
@@ -1132,7 +1132,7 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
if (!buf)
return NULL;
rscreen->ws->buffer_get_metadata(buf, &metadata, NULL);
rscreen->ws->buffer_get_metadata(rscreen->ws, buf, &metadata, NULL);
r600_surface_import_metadata(rscreen, &surface, &metadata,
&array_mode, &is_scanout);
@@ -1343,7 +1343,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
/* Write & linear only: */
else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,
RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rtex->resource.buf, 0,
!rctx->ws->buffer_wait(rctx->ws, rtex->resource.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
if (r600_can_invalidate_texture(rctx->screen, rtex,
@@ -1899,7 +1899,7 @@ r600_texture_from_memobj(struct pipe_screen *screen,
struct pb_buffer *buf = NULL;
if (memobj->b.dedicated) {
rscreen->ws->buffer_get_metadata(memobj->buf, &metadata, NULL);
rscreen->ws->buffer_get_metadata(rscreen->ws, memobj->buf, &metadata, NULL);
r600_surface_import_metadata(rscreen, &surface, &metadata,
&array_mode, &is_scanout);
} else {

View File

@@ -152,7 +152,7 @@ static void map_msg_fb_it_buf(struct ruvd_decoder *dec)
buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
/* and map it for CPU access */
ptr = dec->ws->buffer_map(buf->res->buf, &dec->cs,
ptr = dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
/* calc buffer offsets */
@@ -177,7 +177,7 @@ static void send_msg_buf(struct ruvd_decoder *dec)
buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
/* unmap the buffer */
dec->ws->buffer_unmap(buf->res->buf);
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
dec->bs_ptr = NULL;
dec->msg = NULL;
dec->fb = NULL;
@@ -840,7 +840,7 @@ static void ruvd_begin_frame(struct pipe_video_codec *decoder,
&ruvd_destroy_associated_data);
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(
dec->bs_ptr = dec->ws->buffer_map(dec->ws,
dec->bs_buffers[dec->cur_buffer].res->buf,
&dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
}
@@ -888,14 +888,14 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
new_size += 2; /* save for EOI */
if (new_size > buf->res->buf->size) {
dec->ws->buffer_unmap(buf->res->buf);
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
dec->bs_ptr = NULL;
if (!rvid_resize_buffer(dec->screen, &dec->cs, buf, new_size)) {
RVID_ERR("Can't resize bitstream buffer!");
return;
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, &dec->cs,
dec->bs_ptr = dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs,
PIPE_MAP_WRITE |
RADEON_MAP_TEMPORARY);
if (!dec->bs_ptr)
@@ -939,7 +939,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder,
bs_size = align(dec->bs_size, 128);
memset(dec->bs_ptr, 0, bs_size - dec->bs_size);
dec->ws->buffer_unmap(bs_buf->res->buf);
dec->ws->buffer_unmap(dec->ws, bs_buf->res->buf);
dec->bs_ptr = NULL;
map_msg_fb_it_buf(dec);

View File

@@ -357,7 +357,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder,
struct rvid_buffer *fb = feedback;
if (size) {
uint32_t *ptr = enc->ws->buffer_map(
uint32_t *ptr = enc->ws->buffer_map(enc->ws,
fb->res->buf, &enc->cs,
PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
@@ -367,7 +367,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder,
*size = 0;
}
enc->ws->buffer_unmap(fb->res->buf);
enc->ws->buffer_unmap(enc->ws, fb->res->buf);
}
//dump_feedback(enc, fb);
rvid_destroy_buffer(fb);

View File

@@ -97,12 +97,12 @@ bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
if (!rvid_create_buffer(screen, new_buf, new_size, new_buf->usage))
goto error;
src = ws->buffer_map(old_buf.res->buf, cs,
src = ws->buffer_map(ws, old_buf.res->buf, cs,
PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
if (!src)
goto error;
dst = ws->buffer_map(new_buf->res->buf, cs,
dst = ws->buffer_map(ws, new_buf->res->buf, cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dst)
goto error;
@@ -113,14 +113,14 @@ bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
dst += bytes;
memset(dst, 0, new_size);
}
ws->buffer_unmap(new_buf->res->buf);
ws->buffer_unmap(old_buf.res->buf);
ws->buffer_unmap(ws, new_buf->res->buf);
ws->buffer_unmap(ws, old_buf.res->buf);
rvid_destroy_buffer(&old_buf);
return true;
error:
if (src)
ws->buffer_unmap(old_buf.res->buf);
ws->buffer_unmap(ws, old_buf.res->buf);
rvid_destroy_buffer(new_buf);
*new_buf = old_buf;
return false;

View File

@@ -144,7 +144,7 @@ static void map_msg_fb_it_buf(struct ruvd_decoder *dec)
/* and map it for CPU access */
ptr =
dec->ws->buffer_map(buf->res->buf, &dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
@@ -168,7 +168,7 @@ static void send_msg_buf(struct ruvd_decoder *dec)
buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
/* unmap the buffer */
dec->ws->buffer_unmap(buf->res->buf);
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
dec->msg = NULL;
dec->fb = NULL;
dec->it = NULL;
@@ -1013,7 +1013,7 @@ static void ruvd_begin_frame(struct pipe_video_codec *decoder, struct pipe_video
&ruvd_destroy_associated_data);
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, &dec->cs,
dec->bs_ptr = dec->ws->buffer_map(dec->ws, dec->bs_buffers[dec->cur_buffer].res->buf, &dec->cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
}
@@ -1051,13 +1051,13 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
unsigned new_size = dec->bs_size + sizes[i];
if (new_size > buf->res->buf->size) {
dec->ws->buffer_unmap(buf->res->buf);
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
if (!si_vid_resize_buffer(dec->screen, &dec->cs, buf, new_size)) {
RVID_ERR("Can't resize bitstream buffer!");
return;
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, &dec->cs,
dec->bs_ptr = dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dec->bs_ptr)
return;
@@ -1092,7 +1092,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder, struct pipe_video_b
bs_size = align(dec->bs_size, 128);
memset(dec->bs_ptr, 0, bs_size - dec->bs_size);
dec->ws->buffer_unmap(bs_buf->res->buf);
dec->ws->buffer_unmap(dec->ws, bs_buf->res->buf);
map_msg_fb_it_buf(dec);
dec->msg->size = sizeof(*dec->msg);

View File

@@ -247,13 +247,13 @@ static void radeon_uvd_enc_get_feedback(struct pipe_video_codec *encoder, void *
if (NULL != size) {
radeon_uvd_enc_feedback_t *fb_data = (radeon_uvd_enc_feedback_t *)enc->ws->buffer_map(
fb->res->buf, &enc->cs, PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
enc->ws, fb->res->buf, &enc->cs, PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
if (!fb_data->status)
*size = fb_data->bitstream_size;
else
*size = 0;
enc->ws->buffer_unmap(fb->res->buf);
enc->ws->buffer_unmap(enc->ws, fb->res->buf);
}
si_vid_destroy_buffer(fb);

View File

@@ -347,7 +347,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder, void *feedback,
struct rvid_buffer *fb = feedback;
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, &enc->cs,
uint32_t *ptr = enc->ws->buffer_map(enc->ws, fb->res->buf, &enc->cs,
PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
if (ptr[1]) {
@@ -356,7 +356,7 @@ static void rvce_get_feedback(struct pipe_video_codec *encoder, void *feedback,
*size = 0;
}
enc->ws->buffer_unmap(fb->res->buf);
enc->ws->buffer_unmap(enc->ws, fb->res->buf);
}
// dump_feedback(enc, fb);
si_vid_destroy_buffer(fb);

View File

@@ -1602,10 +1602,10 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
si_vid_clear_buffer(dec->base.context, &dec->ctx);
/* ctx needs probs table */
ptr = dec->ws->buffer_map(dec->ctx.res->buf, &dec->cs,
ptr = dec->ws->buffer_map(dec->ws, dec->ctx.res->buf, &dec->cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
fill_probs_table(ptr);
dec->ws->buffer_unmap(dec->ctx.res->buf);
dec->ws->buffer_unmap(dec->ws, dec->ctx.res->buf);
dec->bs_ptr = NULL;
} else if (fmt == PIPE_VIDEO_FORMAT_HEVC) {
unsigned ctx_size;
@@ -1781,14 +1781,14 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
RVID_ERR("Can't allocated context buffer.\n");
si_vid_clear_buffer(dec->base.context, &dec->ctx);
ptr = dec->ws->buffer_map(dec->ctx.res->buf, &dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
ptr = dec->ws->buffer_map(dec->ws, dec->ctx.res->buf, &dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
for (i = 0; i < 4; ++i) {
rvcn_init_mode_probs((void*)(ptr + i * align(sizeof(rvcn_av1_frame_context_t), 2048)));
rvcn_av1_init_mv_probs((void*)(ptr + i * align(sizeof(rvcn_av1_frame_context_t), 2048)));
rvcn_av1_default_coef_probs((void*)(ptr + i * align(sizeof(rvcn_av1_frame_context_t), 2048)), i);
}
dec->ws->buffer_unmap(dec->ctx.res->buf);
dec->ws->buffer_unmap(dec->ws, dec->ctx.res->buf);
}
break;
@@ -1881,7 +1881,7 @@ static void map_msg_fb_it_probs_buf(struct radeon_decoder *dec)
/* and map it for CPU access */
ptr =
dec->ws->buffer_map(buf->res->buf, &dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
/* calc buffer offsets */
dec->msg = ptr;
@@ -1906,7 +1906,7 @@ static void send_msg_buf(struct radeon_decoder *dec)
buf = &dec->msg_fb_it_probs_buffers[dec->cur_buffer];
/* unmap the buffer */
dec->ws->buffer_unmap(buf->res->buf);
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
dec->bs_ptr = NULL;
dec->msg = NULL;
dec->fb = NULL;
@@ -2176,7 +2176,7 @@ static void radeon_dec_begin_frame(struct pipe_video_codec *decoder,
&radeon_dec_destroy_associated_data);
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(dec->bs_buffers[dec->cur_buffer].res->buf, &dec->cs,
dec->bs_ptr = dec->ws->buffer_map(dec->ws, dec->bs_buffers[dec->cur_buffer].res->buf, &dec->cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
}
@@ -2214,14 +2214,14 @@ static void radeon_dec_decode_bitstream(struct pipe_video_codec *decoder,
unsigned new_size = dec->bs_size + sizes[i];
if (new_size > buf->res->buf->size) {
dec->ws->buffer_unmap(buf->res->buf);
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
dec->bs_ptr = NULL;
if (!si_vid_resize_buffer(dec->screen, &dec->cs, buf, new_size)) {
RVID_ERR("Can't resize bitstream buffer!");
return;
}
dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, &dec->cs,
dec->bs_ptr = dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dec->bs_ptr)
return;
@@ -2248,7 +2248,7 @@ void send_cmd_dec(struct radeon_decoder *dec, struct pipe_video_buffer *target,
bs_buf = &dec->bs_buffers[dec->cur_buffer];
memset(dec->bs_ptr, 0, align(dec->bs_size, 128) - dec->bs_size);
dec->ws->buffer_unmap(bs_buf->res->buf);
dec->ws->buffer_unmap(dec->ws, bs_buf->res->buf);
dec->bs_ptr = NULL;
map_msg_fb_it_probs_buf(dec);
@@ -2410,11 +2410,11 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context,
void *ptr;
buf = &dec->msg_fb_it_probs_buffers[i];
ptr = dec->ws->buffer_map(buf->res->buf, &dec->cs,
ptr = dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
ptr += FB_BUFFER_OFFSET + FB_BUFFER_SIZE;
fill_probs_table(ptr);
dec->ws->buffer_unmap(buf->res->buf);
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
dec->bs_ptr = NULL;
}
}

View File

@@ -287,7 +287,7 @@ void send_cmd_jpeg(struct radeon_decoder *dec, struct pipe_video_buffer *target,
bs_buf = &dec->bs_buffers[dec->cur_buffer];
memset(dec->bs_ptr, 0, align(dec->bs_size, 128) - dec->bs_size);
dec->ws->buffer_unmap(bs_buf->res->buf);
dec->ws->buffer_unmap(dec->ws, bs_buf->res->buf);
dec->bs_ptr = NULL;
dt = radeon_jpeg_get_decode_param(dec, target, picture);

View File

@@ -369,13 +369,13 @@ static void radeon_enc_get_feedback(struct pipe_video_codec *encoder, void *feed
struct rvid_buffer *fb = feedback;
if (size) {
uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, &enc->cs,
uint32_t *ptr = enc->ws->buffer_map(enc->ws, fb->res->buf, &enc->cs,
PIPE_MAP_READ_WRITE | RADEON_MAP_TEMPORARY);
if (ptr[1])
*size = ptr[6];
else
*size = 0;
enc->ws->buffer_unmap(fb->res->buf);
enc->ws->buffer_unmap(enc->ws, fb->res->buf);
}
si_vid_destroy_buffer(fb);

View File

@@ -98,11 +98,11 @@ bool si_vid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
if (!si_vid_create_buffer(screen, new_buf, new_size, new_buf->usage))
goto error;
src = ws->buffer_map(old_buf.res->buf, cs, PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
src = ws->buffer_map(ws, old_buf.res->buf, cs, PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
if (!src)
goto error;
dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
dst = ws->buffer_map(ws, new_buf->res->buf, cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!dst)
goto error;
@@ -112,14 +112,14 @@ bool si_vid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
dst += bytes;
memset(dst, 0, new_size);
}
ws->buffer_unmap(new_buf->res->buf);
ws->buffer_unmap(old_buf.res->buf);
ws->buffer_unmap(ws, new_buf->res->buf);
ws->buffer_unmap(ws, old_buf.res->buf);
si_vid_destroy_buffer(&old_buf);
return true;
error:
if (src)
ws->buffer_unmap(old_buf.res->buf);
ws->buffer_unmap(ws, old_buf.res->buf);
si_vid_destroy_buffer(new_buf);
*new_buf = old_buf;
return false;

View File

@@ -336,15 +336,15 @@ struct radeon_winsys {
* \param usage A bitmask of the PIPE_MAP_* and RADEON_MAP_* flags.
* \return The pointer at the beginning of the buffer.
*/
void *(*buffer_map)(struct pb_buffer *buf, struct radeon_cmdbuf *cs,
enum pipe_map_flags usage);
void *(*buffer_map)(struct radeon_winsys *ws, struct pb_buffer *buf,
struct radeon_cmdbuf *cs, enum pipe_map_flags usage);
/**
* Unmap a buffer object from the client's address space.
*
* \param buf A winsys buffer object to unmap.
*/
void (*buffer_unmap)(struct pb_buffer *buf);
void (*buffer_unmap)(struct radeon_winsys *ws, struct pb_buffer *buf);
/**
* Wait for the buffer and return true if the buffer is not used
@@ -354,7 +354,8 @@ struct radeon_winsys {
* The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
* is idle.
*/
bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout, enum radeon_bo_usage usage);
bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer *buf,
uint64_t timeout, enum radeon_bo_usage usage);
/**
* Return buffer metadata.
@@ -363,8 +364,8 @@ struct radeon_winsys {
* \param buf A winsys buffer object to get the flags from.
* \param md Metadata
*/
void (*buffer_get_metadata)(struct pb_buffer *buf, struct radeon_bo_metadata *md,
struct radeon_surf *surf);
void (*buffer_get_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
struct radeon_bo_metadata *md, struct radeon_surf *surf);
/**
* Set buffer metadata.
@@ -373,8 +374,8 @@ struct radeon_winsys {
* \param buf A winsys buffer object to set the flags for.
* \param md Metadata
*/
void (*buffer_set_metadata)(struct pb_buffer *buf, struct radeon_bo_metadata *md,
struct radeon_surf *surf);
void (*buffer_set_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
struct radeon_bo_metadata *md, struct radeon_surf *surf);
/**
* Get a winsys buffer from a winsys handle. The internal structure
@@ -430,7 +431,8 @@ struct radeon_winsys {
*
* \return false on out of memory or other failure, true on success.
*/
bool (*buffer_commit)(struct pb_buffer *buf, uint64_t offset, uint64_t size, bool commit);
bool (*buffer_commit)(struct radeon_winsys *ws, struct pb_buffer *buf,
uint64_t offset, uint64_t size, bool commit);
/**
* Return the virtual address of a buffer.

View File

@@ -80,7 +80,7 @@ static bool gfx10_alloc_query_buffer(struct si_context *sctx)
qbuf = list_first_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list);
if (!qbuf->refcount &&
!si_cs_is_buffer_referenced(sctx, qbuf->buf->buf, RADEON_USAGE_READWRITE) &&
sctx->ws->buffer_wait(qbuf->buf->buf, 0, RADEON_USAGE_READWRITE)) {
sctx->ws->buffer_wait(sctx->ws, qbuf->buf->buf, 0, RADEON_USAGE_READWRITE)) {
/* Can immediately re-use the oldest buffer */
list_del(&qbuf->list);
} else {
@@ -108,7 +108,7 @@ static bool gfx10_alloc_query_buffer(struct si_context *sctx)
* We need to set the high bit of all the primitive counters for
* compatibility with the SET_PREDICATION packet.
*/
uint64_t *results = sctx->ws->buffer_map(qbuf->buf->buf, NULL,
uint64_t *results = sctx->ws->buffer_map(sctx->ws, qbuf->buf->buf, NULL,
PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
assert(results);
@@ -247,7 +247,7 @@ static bool gfx10_sh_query_get_result(struct si_context *sctx, struct si_query *
void *map;
if (rquery->b.flushed)
map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
map = sctx->ws->buffer_map(sctx->ws, qbuf->buf->buf, NULL, usage);
else
map = si_buffer_map(sctx, qbuf->buf, usage);

View File

@@ -39,7 +39,7 @@ bool si_cs_is_buffer_referenced(struct si_context *sctx, struct pb_buffer *buf,
void *si_buffer_map(struct si_context *sctx, struct si_resource *resource,
unsigned usage)
{
return sctx->ws->buffer_map(resource->buf, &sctx->gfx_cs, usage);
return sctx->ws->buffer_map(sctx->ws, resource->buf, &sctx->gfx_cs, usage);
}
void si_init_resource_fields(struct si_screen *sscreen, struct si_resource *res, uint64_t size,
@@ -252,7 +252,7 @@ static bool si_invalidate_buffer(struct si_context *sctx, struct si_resource *bu
/* Check if mapping this buffer would cause waiting for the GPU. */
if (si_cs_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
!sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) {
!sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, RADEON_USAGE_READWRITE)) {
/* Reallocate the buffer in the same pipe_resource. */
si_alloc_resource(sctx->screen, buf);
si_rebind_buffer(sctx, &buf->b.b);
@@ -402,7 +402,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
*/
if (buf->flags & RADEON_FLAG_SPARSE || force_discard_range ||
si_cs_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) ||
!sctx->ws->buffer_wait(buf->buf, 0, RADEON_USAGE_READWRITE)) {
!sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
struct u_upload_mgr *uploader;
struct si_resource *staging = NULL;
@@ -513,7 +513,7 @@ static void si_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_trans
if (transfer->usage & (PIPE_MAP_ONCE | RADEON_MAP_TEMPORARY) &&
!stransfer->staging)
sctx->ws->buffer_unmap(si_resource(stransfer->b.b.resource)->buf);
sctx->ws->buffer_unmap(sctx->ws, si_resource(stransfer->b.b.resource)->buf);
si_resource_reference(&stransfer->staging, NULL);
assert(stransfer->b.staging == NULL); /* for threaded context only */
@@ -717,7 +717,7 @@ static bool si_resource_commit(struct pipe_context *pctx, struct pipe_resource *
assert(resource->target == PIPE_BUFFER);
return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
return ctx->ws->buffer_commit(ctx->ws, res->buf, box->x, box->width, commit);
}
void si_init_screen_buffer_functions(struct si_screen *sscreen)

View File

@@ -107,7 +107,7 @@ static void si_dump_shader(struct si_screen *sscreen, struct si_shader *shader,
unsigned size = shader->bo->b.b.width0;
fprintf(f, "BO: VA=%" PRIx64 " Size=%u\n", shader->bo->gpu_address, size);
const char *mapped = sscreen->ws->buffer_map(
const char *mapped = sscreen->ws->buffer_map(sscreen->ws,
shader->bo->buf, NULL,
PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
@@ -115,7 +115,7 @@ static void si_dump_shader(struct si_screen *sscreen, struct si_shader *shader,
fprintf(f, " %4x: %08x\n", i, *(uint32_t *)(mapped + i));
}
sscreen->ws->buffer_unmap(shader->bo->buf);
sscreen->ws->buffer_unmap(sscreen->ws, shader->bo->buf);
fprintf(f, "\n");
}
@@ -402,7 +402,7 @@ static void si_log_chunk_type_cs_print(void *data, FILE *f)
* waited for the context, so this buffer should be idle.
* If the GPU is hung, there is no point in waiting for it.
*/
uint32_t *map = ctx->ws->buffer_map(scs->trace_buf->buf, NULL,
uint32_t *map = ctx->ws->buffer_map(ctx->ws, scs->trace_buf->buf, NULL,
PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_READ);
if (map) {
last_trace_id = map[0];

View File

@@ -225,7 +225,7 @@ struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
static bool si_fine_fence_signaled(struct radeon_winsys *rws, const struct si_fine_fence *fine)
{
char *map =
rws->buffer_map(fine->buf->buf, NULL, PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED);
rws->buffer_map(rws, fine->buf->buf, NULL, PIPE_MAP_READ | PIPE_MAP_UNSYNCHRONIZED);
if (!map)
return false;

View File

@@ -1075,7 +1075,7 @@ static bool si_pc_query_get_result(struct si_context *sctx, struct si_query *squ
void *map;
if (squery->b.flushed)
map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
map = sctx->ws->buffer_map(sctx->ws, qbuf->buf->buf, NULL, usage);
else
map = si_buffer_map(sctx, qbuf->buf, usage);

View File

@@ -543,7 +543,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
goto fail;
sctx->border_color_map =
ws->buffer_map(sctx->border_color_buffer->buf, NULL, PIPE_MAP_WRITE);
ws->buffer_map(ws, sctx->border_color_buffer->buf, NULL, PIPE_MAP_WRITE);
if (!sctx->border_color_map)
goto fail;
}

View File

@@ -570,7 +570,7 @@ void si_query_buffer_reset(struct si_context *sctx, struct si_query_buffer *buff
/* Discard even the oldest buffer if it can't be mapped without a stall. */
if (si_cs_is_buffer_referenced(sctx, buffer->buf->buf, RADEON_USAGE_READWRITE) ||
!sctx->ws->buffer_wait(buffer->buf->buf, 0, RADEON_USAGE_READWRITE)) {
!sctx->ws->buffer_wait(sctx->ws, buffer->buf->buf, 0, RADEON_USAGE_READWRITE)) {
si_resource_reference(&buffer->buf, NULL);
} else {
buffer->unprepared = true;
@@ -629,7 +629,7 @@ static bool si_query_hw_prepare_buffer(struct si_context *sctx, struct si_query_
struct si_screen *screen = sctx->screen;
/* The caller ensures that the buffer is currently unused by the GPU. */
uint32_t *results = screen->ws->buffer_map(qbuf->buf->buf, NULL,
uint32_t *results = screen->ws->buffer_map(sctx->ws, qbuf->buf->buf, NULL,
PIPE_MAP_WRITE | PIPE_MAP_UNSYNCHRONIZED);
if (!results)
return false;
@@ -1424,7 +1424,7 @@ bool si_query_hw_get_result(struct si_context *sctx, struct si_query *squery, bo
void *map;
if (squery->b.flushed)
map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
map = sctx->ws->buffer_map(sctx->ws, qbuf->buf->buf, NULL, usage);
else
map = si_buffer_map(sctx, qbuf->buf, usage);

View File

@@ -871,7 +871,7 @@ bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader
u.get_external_symbol = si_get_external_symbol;
u.cb_data = &scratch_va;
u.rx_va = shader->bo->gpu_address;
u.rx_ptr = sscreen->ws->buffer_map(
u.rx_ptr = sscreen->ws->buffer_map(sscreen->ws,
shader->bo->buf, NULL,
PIPE_MAP_READ_WRITE | PIPE_MAP_UNSYNCHRONIZED | RADEON_MAP_TEMPORARY);
if (!u.rx_ptr)
@@ -886,7 +886,7 @@ bool si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader
memcpy(shader->binary.uploaded_code, u.rx_ptr, size);
}
sscreen->ws->buffer_unmap(shader->bo->buf);
sscreen->ws->buffer_unmap(sscreen->ws, shader->bo->buf);
ac_rtld_close(&binary);
return size >= 0;

View File

@@ -491,7 +491,7 @@ si_get_thread_trace(struct si_context *sctx,
memset(thread_trace, 0, sizeof(*thread_trace));
thread_trace->num_traces = max_se;
sctx->thread_trace->ptr = sctx->ws->buffer_map(sctx->thread_trace->bo,
sctx->thread_trace->ptr = sctx->ws->buffer_map(sctx->ws, sctx->thread_trace->bo,
NULL,
PIPE_MAP_READ);

View File

@@ -4818,7 +4818,7 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, unsigned count,
return NULL;
}
void *map =
sscreen->ws->buffer_map(v->instance_divisor_factor_buffer->buf, NULL, PIPE_MAP_WRITE);
sscreen->ws->buffer_map(sscreen->ws, v->instance_divisor_factor_buffer->buf, NULL, PIPE_MAP_WRITE);
memcpy(map, divisor_factors, num_divisors * sizeof(divisor_factors[0]));
}
return v;

View File

@@ -521,7 +521,7 @@ static void si_set_tex_bo_metadata(struct si_screen *sscreen, struct si_texture
ac_surface_get_umd_metadata(&sscreen->info, &tex->surface,
tex->buffer.b.b.last_level + 1,
desc, &md.size_metadata, md.metadata);
sscreen->ws->buffer_set_metadata(tex->buffer.buf, &md, &tex->surface);
sscreen->ws->buffer_set_metadata(sscreen->ws, tex->buffer.buf, &md, &tex->surface);
}
static bool si_displayable_dcc_needs_explicit_flush(struct si_texture *tex)
@@ -1118,7 +1118,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
SI_RESOURCE_FLAG_DRIVER_INTERNAL, PIPE_USAGE_STREAM,
dcc_retile_map_size,
sscreen->info.tcc_cache_line_size);
void *map = sscreen->ws->buffer_map(buf->buf, NULL, PIPE_MAP_WRITE);
void *map = sscreen->ws->buffer_map(sscreen->ws, buf->buf, NULL, PIPE_MAP_WRITE);
/* Upload the retile map into the staging buffer. */
memcpy(map, tex->surface.u.gfx9.dcc_retile_map, dcc_retile_map_size);
@@ -1541,7 +1541,7 @@ static struct pipe_resource *si_texture_from_winsys_buffer(struct si_screen *ssc
dedicated = false;
if (dedicated) {
sscreen->ws->buffer_get_metadata(buf, &metadata, &surface);
sscreen->ws->buffer_get_metadata(sscreen->ws, buf, &metadata, &surface);
} else {
/**
* The bo metadata is unset for un-dedicated images. So we fall
@@ -1864,7 +1864,7 @@ static void *si_texture_transfer_map(struct pipe_context *ctx, struct pipe_resou
tex->buffer.domains & RADEON_DOMAIN_VRAM || tex->buffer.flags & RADEON_FLAG_GTT_WC;
/* Write & linear only: */
else if (si_cs_is_buffer_referenced(sctx, tex->buffer.buf, RADEON_USAGE_READWRITE) ||
!sctx->ws->buffer_wait(tex->buffer.buf, 0, RADEON_USAGE_READWRITE)) {
!sctx->ws->buffer_wait(sctx->ws, tex->buffer.buf, 0, RADEON_USAGE_READWRITE)) {
/* It's busy. */
if (si_can_invalidate_texture(sctx->screen, tex, usage, box))
si_texture_invalidate_storage(sctx, tex);
@@ -1967,7 +1967,7 @@ static void si_texture_transfer_unmap(struct pipe_context *ctx, struct pipe_tran
if (sizeof(void *) == 4) {
struct si_resource *buf = stransfer->staging ? stransfer->staging : &tex->buffer;
sctx->ws->buffer_unmap(buf->buf);
sctx->ws->buffer_unmap(sctx->ws, buf->buf);
}
if ((transfer->usage & PIPE_MAP_WRITE) && stransfer->staging)

View File

@@ -47,7 +47,8 @@ struct amdgpu_sparse_backing_chunk {
uint32_t begin, end;
};
static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
static bool amdgpu_bo_wait(struct radeon_winsys *rws,
struct pb_buffer *_buf, uint64_t timeout,
enum radeon_bo_usage usage)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
@@ -173,13 +174,13 @@ void amdgpu_bo_destroy(void *winsys, struct pb_buffer *_buf)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
struct amdgpu_screen_winsys *sws_iter;
struct amdgpu_winsys *ws = bo->ws;
struct amdgpu_winsys *ws = winsys;
assert(bo->bo && "must not be called for slab entries");
if (!bo->u.real.is_user_ptr && bo->u.real.cpu_ptr) {
bo->u.real.cpu_ptr = NULL;
amdgpu_bo_unmap(&bo->base);
amdgpu_bo_unmap(&ws->dummy_ws.base, &bo->base);
}
assert(bo->u.real.is_user_ptr || bo->u.real.map_count == 0);
@@ -277,7 +278,8 @@ static bool amdgpu_bo_do_map(struct amdgpu_winsys_bo *bo, void **cpu)
return true;
}
void *amdgpu_bo_map(struct pb_buffer *buf,
void *amdgpu_bo_map(struct radeon_winsys *rws,
struct pb_buffer *buf,
struct radeon_cmdbuf *rcs,
enum pipe_map_flags usage)
{
@@ -306,7 +308,7 @@ void *amdgpu_bo_map(struct pb_buffer *buf,
return NULL;
}
if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
RADEON_USAGE_WRITE)) {
return NULL;
}
@@ -317,7 +319,7 @@ void *amdgpu_bo_map(struct pb_buffer *buf,
return NULL;
}
if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
RADEON_USAGE_READWRITE)) {
return NULL;
}
@@ -345,7 +347,7 @@ void *amdgpu_bo_map(struct pb_buffer *buf,
}
}
amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
@@ -360,7 +362,7 @@ void *amdgpu_bo_map(struct pb_buffer *buf,
}
}
amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
}
@@ -407,7 +409,7 @@ void *amdgpu_bo_map(struct pb_buffer *buf,
return (uint8_t*)cpu + offset;
}
void amdgpu_bo_unmap(struct pb_buffer *buf)
void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
struct amdgpu_winsys_bo *real;
@@ -622,7 +624,9 @@ error_bo_alloc:
bool amdgpu_bo_can_reclaim(void *winsys, struct pb_buffer *_buf)
{
return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
struct amdgpu_winsys *ws = winsys;
return amdgpu_bo_wait(&ws->dummy_ws.base, _buf, 0, RADEON_USAGE_READWRITE);
}
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
@@ -1188,8 +1192,8 @@ error_alloc_commitments:
}
static bool
amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
bool commit)
amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct pb_buffer *buf,
uint64_t offset, uint64_t size, bool commit)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
struct amdgpu_sparse_commitment *comm;
@@ -1316,7 +1320,8 @@ out:
return ok;
}
static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
static void amdgpu_buffer_get_metadata(struct radeon_winsys *rws,
struct pb_buffer *_buf,
struct radeon_bo_metadata *md,
struct radeon_surf *surf)
{
@@ -1337,7 +1342,8 @@ static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
}
static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
static void amdgpu_buffer_set_metadata(struct radeon_winsys *rws,
struct pb_buffer *_buf,
struct radeon_bo_metadata *md,
struct radeon_surf *surf)
{

View File

@@ -126,10 +126,11 @@ struct pb_buffer *amdgpu_bo_create(struct amdgpu_winsys *ws,
enum radeon_bo_domain domain,
enum radeon_bo_flag flags);
void amdgpu_bo_destroy(void *winsys, struct pb_buffer *_buf);
void *amdgpu_bo_map(struct pb_buffer *buf,
void *amdgpu_bo_map(struct radeon_winsys *rws,
struct pb_buffer *buf,
struct radeon_cmdbuf *rcs,
enum pipe_map_flags usage);
void amdgpu_bo_unmap(struct pb_buffer *buf);
void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf);
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws);
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);

View File

@@ -729,7 +729,7 @@ static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
if (!pb)
return false;
mapped = amdgpu_bo_map(pb, NULL, PIPE_MAP_WRITE);
mapped = amdgpu_bo_map(&ws->dummy_ws.base, pb, NULL, PIPE_MAP_WRITE);
if (!mapped) {
pb_reference(&pb, NULL);
return false;
@@ -1063,7 +1063,7 @@ amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_i
if (!preamble_bo)
return false;
map = (uint32_t*)amdgpu_bo_map(preamble_bo, NULL,
map = (uint32_t*)amdgpu_bo_map(&ws->dummy_ws.base, preamble_bo, NULL,
PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
if (!map) {
pb_reference(&preamble_bo, NULL);
@@ -1077,7 +1077,7 @@ amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_i
uint32_t ib_pad_dw_mask = ws->info.ib_pad_dw_mask[cs->ring_type];
while (preamble_num_dw & ib_pad_dw_mask)
map[preamble_num_dw++] = PKT3_NOP_PAD;
amdgpu_bo_unmap(preamble_bo);
amdgpu_bo_unmap(&ws->dummy_ws.base, preamble_bo);
for (unsigned i = 0; i < 2; i++) {
csc[i]->ib[IB_PREAMBLE] = csc[i]->ib[IB_MAIN];

View File

@@ -435,6 +435,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
aws->fd = ws->fd;
aws->info.drm_major = drm_major;
aws->info.drm_minor = drm_minor;
aws->dummy_ws.aws = aws; /* only the pointer is used */
if (!do_winsys_init(aws, config, fd))
goto fail_alloc;
@@ -442,7 +443,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
/* Create managers. */
pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS,
500000, aws->check_vm ? 1.0f : 2.0f, 0,
(aws->info.vram_size + aws->info.gart_size) / 8, NULL,
(aws->info.vram_size + aws->info.gart_size) / 8, ws,
amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
unsigned min_slab_order = 8; /* 256 bytes */

View File

@@ -39,6 +39,19 @@ struct amdgpu_cs;
#define NUM_SLAB_ALLOCATORS 3
struct amdgpu_screen_winsys {
struct radeon_winsys base;
struct amdgpu_winsys *aws;
int fd;
struct pipe_reference reference;
struct amdgpu_screen_winsys *next;
/* Maps a BO to its KMS handle valid for this DRM file descriptor
* Protected by amdgpu_winsys::sws_list_lock
*/
struct hash_table *kms_handles;
};
struct amdgpu_winsys {
struct pipe_reference reference;
@@ -106,19 +119,11 @@ struct amdgpu_winsys {
* and re-imported buffers. */
struct hash_table *bo_export_table;
simple_mtx_t bo_export_table_lock;
};
struct amdgpu_screen_winsys {
struct radeon_winsys base;
struct amdgpu_winsys *aws;
int fd;
struct pipe_reference reference;
struct amdgpu_screen_winsys *next;
/* Maps a BO to its KMS handle valid for this DRM file descriptor
* Protected by amdgpu_winsys::sws_list_lock
/* Since most winsys functions require struct radeon_winsys *, dummy_ws.base is used
* for invoking them because sws_list can be NULL.
*/
struct hash_table *kms_handles;
struct amdgpu_screen_winsys dummy_ws;
};
static inline struct amdgpu_screen_winsys *

View File

@@ -129,7 +129,8 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
}
}
static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
static bool radeon_bo_wait(struct radeon_winsys *rws,
struct pb_buffer *_buf, uint64_t timeout,
enum radeon_bo_usage usage)
{
struct radeon_bo *bo = radeon_bo(_buf);
@@ -496,7 +497,8 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
return (uint8_t*)bo->u.real.ptr + offset;
}
static void *radeon_bo_map(struct pb_buffer *buf,
static void *radeon_bo_map(struct radeon_winsys *rws,
struct pb_buffer *buf,
struct radeon_cmdbuf *rcs,
enum pipe_map_flags usage)
{
@@ -521,7 +523,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
return NULL;
}
if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
if (!radeon_bo_wait(rws, (struct pb_buffer*)bo, 0,
RADEON_USAGE_WRITE)) {
return NULL;
}
@@ -532,7 +534,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
return NULL;
}
if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
if (!radeon_bo_wait(rws, (struct pb_buffer*)bo, 0,
RADEON_USAGE_READWRITE)) {
return NULL;
}
@@ -552,7 +554,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
cs->flush_cs(cs->flush_data,
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
}
radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
@@ -567,7 +569,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
}
}
radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
}
@@ -578,7 +580,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
return radeon_bo_do_map(bo);
}
static void radeon_bo_unmap(struct pb_buffer *_buf)
static void radeon_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *_buf)
{
struct radeon_bo *bo = (struct radeon_bo*)_buf;
@@ -744,7 +746,7 @@ bool radeon_bo_can_reclaim(void *winsys, struct pb_buffer *_buf)
if (radeon_bo_is_referenced_by_any_cs(bo))
return false;
return radeon_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
return radeon_bo_wait(winsys, _buf, 0, RADEON_USAGE_READWRITE);
}
bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
@@ -872,7 +874,8 @@ static unsigned eg_tile_split_rev(unsigned eg_tile_split)
}
}
static void radeon_bo_get_metadata(struct pb_buffer *_buf,
static void radeon_bo_get_metadata(struct radeon_winsys *rws,
struct pb_buffer *_buf,
struct radeon_bo_metadata *md,
struct radeon_surf *surf)
{
@@ -929,7 +932,8 @@ static void radeon_bo_get_metadata(struct pb_buffer *_buf,
md->u.legacy.scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
}
static void radeon_bo_set_metadata(struct pb_buffer *_buf,
static void radeon_bo_set_metadata(struct radeon_winsys *rws,
struct pb_buffer *_buf,
struct radeon_bo_metadata *md,
struct radeon_surf *surf)
{

View File

@@ -792,7 +792,7 @@ static bool radeon_fence_wait(struct radeon_winsys *ws,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
return ws->buffer_wait((struct pb_buffer*)fence, timeout,
return ws->buffer_wait(ws, (struct pb_buffer*)fence, timeout,
RADEON_USAGE_READWRITE);
}