gallium/u_threaded: Prepare for buffer sharedness tracking

tc_add_to_buffer_list and tc_bind_buffer need the tc instance too.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17338>
This commit is contained in:
Jonathan
2022-07-13 21:00:28 +02:00
committed by Marge Bot
parent 6718bff75b
commit aa87803069

View File

@@ -459,7 +459,7 @@ threaded_context_flush(struct pipe_context *_pipe,
}
static void
tc_add_to_buffer_list(struct tc_buffer_list *next, struct pipe_resource *buf)
tc_add_to_buffer_list(struct threaded_context *tc, struct tc_buffer_list *next, struct pipe_resource *buf)
{
uint32_t id = threaded_resource(buf)->buffer_id_unique;
BITSET_SET(next->buffer_list, id & TC_BUFFER_ID_MASK);
@@ -467,7 +467,7 @@ tc_add_to_buffer_list(struct tc_buffer_list *next, struct pipe_resource *buf)
/* Set a buffer binding and add it to the buffer list. */
static void
tc_bind_buffer(uint32_t *binding, struct tc_buffer_list *next, struct pipe_resource *buf)
tc_bind_buffer(struct threaded_context *tc, uint32_t *binding, struct tc_buffer_list *next, struct pipe_resource *buf)
{
uint32_t id = threaded_resource(buf)->buffer_id_unique;
*binding = id;
@@ -985,7 +985,7 @@ tc_get_query_result_resource(struct pipe_context *_pipe,
p->result_type = result_type;
p->index = index;
tc_set_resource_reference(&p->resource, resource);
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], resource);
tc_add_to_buffer_list(tc, &tc->buffer_lists[tc->next_buf_list], resource);
p->offset = offset;
}
@@ -1277,7 +1277,7 @@ tc_set_constant_buffer(struct pipe_context *_pipe,
tc_set_resource_reference(&p->cb.buffer, buffer);
if (buffer) {
tc_bind_buffer(&tc->const_buffers[shader][index],
tc_bind_buffer(tc, &tc->const_buffers[shader][index],
&tc->buffer_lists[tc->next_buf_list], buffer);
} else {
tc_unbind_buffer(&tc->const_buffers[shader][index]);
@@ -1477,7 +1477,7 @@ tc_set_sampler_views(struct pipe_context *_pipe,
for (unsigned i = 0; i < count; i++) {
if (views[i] && views[i]->target == PIPE_BUFFER) {
tc_bind_buffer(&tc->sampler_buffers[shader][start + i], next,
tc_bind_buffer(tc, &tc->sampler_buffers[shader][start + i], next,
views[i]->texture);
} else {
tc_unbind_buffer(&tc->sampler_buffers[shader][start + i]);
@@ -1489,7 +1489,7 @@ tc_set_sampler_views(struct pipe_context *_pipe,
pipe_sampler_view_reference(&p->slot[i], views[i]);
if (views[i] && views[i]->target == PIPE_BUFFER) {
tc_bind_buffer(&tc->sampler_buffers[shader][start + i], next,
tc_bind_buffer(tc, &tc->sampler_buffers[shader][start + i], next,
views[i]->texture);
} else {
tc_unbind_buffer(&tc->sampler_buffers[shader][start + i]);
@@ -1568,7 +1568,7 @@ tc_set_shader_images(struct pipe_context *_pipe,
tc_set_resource_reference(&p->slot[i].resource, resource);
if (resource && resource->target == PIPE_BUFFER) {
tc_bind_buffer(&tc->image_buffers[shader][start + i], next, resource);
tc_bind_buffer(tc, &tc->image_buffers[shader][start + i], next, resource);
if (images[i].access & PIPE_IMAGE_ACCESS_WRITE) {
struct threaded_resource *tres = threaded_resource(resource);
@@ -1663,7 +1663,7 @@ tc_set_shader_buffers(struct pipe_context *_pipe,
if (src->buffer) {
struct threaded_resource *tres = threaded_resource(src->buffer);
tc_bind_buffer(&tc->shader_buffers[shader][start + i], next, &tres->b);
tc_bind_buffer(tc, &tc->shader_buffers[shader][start + i], next, &tres->b);
if (writable_bitmask & BITFIELD_BIT(i)) {
tc_buffer_disable_cpu_storage(src->buffer);
@@ -1739,7 +1739,7 @@ tc_set_vertex_buffers(struct pipe_context *_pipe,
struct pipe_resource *buf = buffers[i].buffer.resource;
if (buf) {
tc_bind_buffer(&tc->vertex_buffers[start + i], next, buf);
tc_bind_buffer(tc, &tc->vertex_buffers[start + i], next, buf);
} else {
tc_unbind_buffer(&tc->vertex_buffers[start + i]);
}
@@ -1757,7 +1757,7 @@ tc_set_vertex_buffers(struct pipe_context *_pipe,
dst->buffer_offset = src->buffer_offset;
if (buf) {
tc_bind_buffer(&tc->vertex_buffers[start + i], next, buf);
tc_bind_buffer(tc, &tc->vertex_buffers[start + i], next, buf);
} else {
tc_unbind_buffer(&tc->vertex_buffers[start + i]);
}
@@ -1814,7 +1814,7 @@ tc_set_stream_output_targets(struct pipe_context *_pipe,
pipe_so_target_reference(&p->targets[i], tgs[i]);
if (tgs[i]) {
tc_buffer_disable_cpu_storage(tgs[i]->buffer);
tc_bind_buffer(&tc->streamout_buffers[i], next, tgs[i]->buffer);
tc_bind_buffer(tc, &tc->streamout_buffers[i], next, tgs[i]->buffer);
} else {
tc_unbind_buffer(&tc->streamout_buffers[i]);
}
@@ -2701,7 +2701,7 @@ tc_buffer_subdata(struct pipe_context *_pipe,
/* This is will always be busy because if it wasn't, tc_improve_map_buffer-
* _flags would set UNSYNCHRONIZED and we wouldn't get here.
*/
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], resource);
tc_add_to_buffer_list(tc, &tc->buffer_lists[tc->next_buf_list], resource);
p->usage = usage;
p->offset = offset;
p->size = size;
@@ -3333,7 +3333,7 @@ tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info,
tc_set_resource_reference(&p->info.index.resource,
info->index.resource);
}
tc_add_to_buffer_list(next, info->index.resource);
tc_add_to_buffer_list(tc, next, info->index.resource);
}
memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX);
@@ -3345,11 +3345,11 @@ tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info,
indirect->count_from_stream_output);
if (indirect->buffer)
tc_add_to_buffer_list(next, indirect->buffer);
tc_add_to_buffer_list(tc, next, indirect->buffer);
if (indirect->indirect_draw_count)
tc_add_to_buffer_list(next, indirect->indirect_draw_count);
tc_add_to_buffer_list(tc, next, indirect->indirect_draw_count);
if (indirect->count_from_stream_output)
tc_add_to_buffer_list(next, indirect->count_from_stream_output->buffer);
tc_add_to_buffer_list(tc, next, indirect->count_from_stream_output->buffer);
memcpy(&p->indirect, indirect, sizeof(*indirect));
p->draw.start = draws[0].start;
@@ -3401,7 +3401,7 @@ tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info,
tc_set_resource_reference(&p->info.index.resource,
info->index.resource);
}
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource);
tc_add_to_buffer_list(tc, &tc->buffer_lists[tc->next_buf_list], info->index.resource);
}
if (drawid_offset > 0)
((struct tc_draw_single_drawid*)p)->drawid_offset = drawid_offset;
@@ -3516,7 +3516,7 @@ tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info,
tc_set_resource_reference(&p->info.index.resource,
info->index.resource);
}
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource);
tc_add_to_buffer_list(tc, &tc->buffer_lists[tc->next_buf_list], info->index.resource);
}
take_index_buffer_ownership = false;
memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX);
@@ -3728,7 +3728,7 @@ tc_launch_grid(struct pipe_context *_pipe,
memcpy(&p->info, info, sizeof(*info));
if (info->indirect)
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->indirect);
tc_add_to_buffer_list(tc, &tc->buffer_lists[tc->next_buf_list], info->indirect);
/* This must be after tc_add_*call, which can flush the batch. */
if (unlikely(tc->add_all_compute_bindings_to_buffer_list))
@@ -3775,8 +3775,8 @@ tc_resource_copy_region(struct pipe_context *_pipe,
if (dst->target == PIPE_BUFFER) {
struct tc_buffer_list *next = &tc->buffer_lists[tc->next_buf_list];
tc_add_to_buffer_list(next, src);
tc_add_to_buffer_list(next, dst);
tc_add_to_buffer_list(tc, next, src);
tc_add_to_buffer_list(tc, next, dst);
util_range_add(&tdst->b, &tdst->valid_buffer_range,
dstx, dstx + src_box->width);
@@ -4080,7 +4080,7 @@ tc_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
tc_buffer_disable_cpu_storage(res);
tc_set_resource_reference(&p->res, res);
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], res);
tc_add_to_buffer_list(tc, &tc->buffer_lists[tc->next_buf_list], res);
p->offset = offset;
p->size = size;
memcpy(p->clear_value, clear_value, clear_value_size);