radeonsi: stop using u_vbuf and adapt to gallium interface changes
This commit is contained in:
@@ -1546,7 +1546,7 @@ static void evergreen_cb(struct r600_context *rctx, struct r600_pipe_state *rsta
|
||||
|
||||
format = si_translate_colorformat(surf->base.format);
|
||||
swap = si_translate_colorswap(surf->base.format);
|
||||
if (rtex->resource.b.b.b.usage == PIPE_USAGE_STAGING) {
|
||||
if (rtex->resource.b.b.usage == PIPE_USAGE_STAGING) {
|
||||
endian = V_028C70_ENDIAN_NONE;
|
||||
} else {
|
||||
endian = si_colorformat_endian_swap(format);
|
||||
|
@@ -28,7 +28,7 @@
|
||||
|
||||
#include "../../winsys/radeon/drm/radeon_winsys.h"
|
||||
#include "util/u_double_list.h"
|
||||
#include "util/u_vbuf.h"
|
||||
#include "util/u_transfer.h"
|
||||
|
||||
#define R600_ERR(fmt, args...) \
|
||||
fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
|
||||
@@ -56,7 +56,7 @@ struct r600_tiling_info {
|
||||
};
|
||||
|
||||
struct r600_resource {
|
||||
struct u_vbuf_resource b;
|
||||
struct u_resource b;
|
||||
|
||||
/* Winsys objects. */
|
||||
struct pb_buffer *buf;
|
||||
|
@@ -60,8 +60,8 @@ static void r600_blitter_begin(struct pipe_context *ctx, enum r600_blitter_op op
|
||||
util_blitter_save_viewport(rctx->blitter, &rctx->viewport);
|
||||
}
|
||||
util_blitter_save_vertex_buffers(rctx->blitter,
|
||||
rctx->vbuf_mgr->nr_vertex_buffers,
|
||||
rctx->vbuf_mgr->vertex_buffer);
|
||||
rctx->nr_vertex_buffers,
|
||||
rctx->vertex_buffer);
|
||||
util_blitter_save_so_targets(rctx->blitter, rctx->num_so_targets,
|
||||
(struct pipe_stream_output_target**)rctx->so_targets);
|
||||
|
||||
@@ -123,8 +123,8 @@ void r600_blit_uncompress_depth(struct pipe_context *ctx, struct r600_resource_t
|
||||
if (!texture->dirty_db)
|
||||
return;
|
||||
|
||||
for (level = 0; level <= texture->resource.b.b.b.last_level; level++) {
|
||||
unsigned num_layers = u_num_layers(&texture->resource.b.b.b, level);
|
||||
for (level = 0; level <= texture->resource.b.b.last_level; level++) {
|
||||
unsigned num_layers = u_num_layers(&texture->resource.b.b, level);
|
||||
|
||||
for (layer = 0; layer < num_layers; layer++) {
|
||||
struct pipe_surface *zsurf, *cbsurf, surf_tmpl;
|
||||
@@ -135,7 +135,7 @@ void r600_blit_uncompress_depth(struct pipe_context *ctx, struct r600_resource_t
|
||||
surf_tmpl.u.tex.last_layer = layer;
|
||||
surf_tmpl.usage = PIPE_BIND_DEPTH_STENCIL;
|
||||
|
||||
zsurf = ctx->create_surface(ctx, &texture->resource.b.b.b, &surf_tmpl);
|
||||
zsurf = ctx->create_surface(ctx, &texture->resource.b.b, &surf_tmpl);
|
||||
|
||||
surf_tmpl.format = texture->flushed_depth_texture->real_format;
|
||||
surf_tmpl.usage = PIPE_BIND_RENDER_TARGET;
|
||||
@@ -367,8 +367,8 @@ void r600_blit_push_depth(struct pipe_context *ctx, struct r600_resource_texture
|
||||
struct pipe_box sbox;
|
||||
|
||||
sbox.x = sbox.y = sbox.z = 0;
|
||||
sbox.width = texture->resource.b.b.b.width0;
|
||||
sbox.height = texture->resource.b.b.b.height0;
|
||||
sbox.width = texture->resource.b.b.width0;
|
||||
sbox.height = texture->resource.b.b.height0;
|
||||
/* XXX that might be wrong */
|
||||
sbox.depth = 1;
|
||||
|
||||
|
@@ -76,8 +76,8 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe,
|
||||
struct r600_context *rctx = (struct r600_context*)pipe;
|
||||
uint8_t *data;
|
||||
|
||||
if (rbuffer->b.user_ptr)
|
||||
return (uint8_t*)rbuffer->b.user_ptr + transfer->box.x;
|
||||
if (rbuffer->b.b.user_ptr)
|
||||
return (uint8_t*)rbuffer->b.b.user_ptr + transfer->box.x;
|
||||
|
||||
data = rctx->ws->buffer_map(rbuffer->buf, rctx->cs, transfer->usage);
|
||||
if (!data)
|
||||
@@ -92,7 +92,7 @@ static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
|
||||
struct r600_resource *rbuffer = r600_resource(transfer->resource);
|
||||
struct r600_context *rctx = (struct r600_context*)pipe;
|
||||
|
||||
if (rbuffer->b.user_ptr)
|
||||
if (rbuffer->b.b.user_ptr)
|
||||
return;
|
||||
|
||||
rctx->ws->buffer_unmap(rbuffer->buf);
|
||||
@@ -124,7 +124,7 @@ static void r600_buffer_transfer_inline_write(struct pipe_context *pipe,
|
||||
struct r600_resource *rbuffer = r600_resource(resource);
|
||||
uint8_t *map = NULL;
|
||||
|
||||
assert(rbuffer->b.user_ptr == NULL);
|
||||
assert(rbuffer->b.b.user_ptr == NULL);
|
||||
|
||||
map = rctx->ws->buffer_map(rbuffer->buf, rctx->cs,
|
||||
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | usage);
|
||||
@@ -198,17 +198,17 @@ struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
|
||||
|
||||
rbuffer = util_slab_alloc(&rscreen->pool_buffers);
|
||||
|
||||
rbuffer->b.b.b = *templ;
|
||||
pipe_reference_init(&rbuffer->b.b.b.reference, 1);
|
||||
rbuffer->b.b.b.screen = screen;
|
||||
rbuffer->b.b.vtbl = &r600_buffer_vtbl;
|
||||
rbuffer->b.user_ptr = NULL;
|
||||
rbuffer->b.b = *templ;
|
||||
pipe_reference_init(&rbuffer->b.b.reference, 1);
|
||||
rbuffer->b.b.screen = screen;
|
||||
rbuffer->b.vtbl = &r600_buffer_vtbl;
|
||||
rbuffer->b.b.user_ptr = NULL;
|
||||
|
||||
if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) {
|
||||
util_slab_free(&rscreen->pool_buffers, rbuffer);
|
||||
return NULL;
|
||||
}
|
||||
return &rbuffer->b.b.b;
|
||||
return &rbuffer->b.b;
|
||||
}
|
||||
|
||||
struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
|
||||
@@ -220,22 +220,21 @@ struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
|
||||
|
||||
rbuffer = util_slab_alloc(&rscreen->pool_buffers);
|
||||
|
||||
pipe_reference_init(&rbuffer->b.b.b.reference, 1);
|
||||
rbuffer->b.b.vtbl = &r600_buffer_vtbl;
|
||||
rbuffer->b.b.b.screen = screen;
|
||||
rbuffer->b.b.b.target = PIPE_BUFFER;
|
||||
rbuffer->b.b.b.format = PIPE_FORMAT_R8_UNORM;
|
||||
rbuffer->b.b.b.usage = PIPE_USAGE_IMMUTABLE;
|
||||
rbuffer->b.b.b.bind = bind;
|
||||
rbuffer->b.b.b.width0 = bytes;
|
||||
rbuffer->b.b.b.height0 = 1;
|
||||
rbuffer->b.b.b.depth0 = 1;
|
||||
rbuffer->b.b.b.array_size = 1;
|
||||
rbuffer->b.b.b.flags = 0;
|
||||
rbuffer->b.b.b.user_ptr = ptr;
|
||||
rbuffer->b.user_ptr = ptr;
|
||||
pipe_reference_init(&rbuffer->b.b.reference, 1);
|
||||
rbuffer->b.vtbl = &r600_buffer_vtbl;
|
||||
rbuffer->b.b.screen = screen;
|
||||
rbuffer->b.b.target = PIPE_BUFFER;
|
||||
rbuffer->b.b.format = PIPE_FORMAT_R8_UNORM;
|
||||
rbuffer->b.b.usage = PIPE_USAGE_IMMUTABLE;
|
||||
rbuffer->b.b.bind = bind;
|
||||
rbuffer->b.b.width0 = bytes;
|
||||
rbuffer->b.b.height0 = 1;
|
||||
rbuffer->b.b.depth0 = 1;
|
||||
rbuffer->b.b.array_size = 1;
|
||||
rbuffer->b.b.flags = 0;
|
||||
rbuffer->b.b.user_ptr = ptr;
|
||||
rbuffer->buf = NULL;
|
||||
return &rbuffer->b.b.b;
|
||||
return &rbuffer->b.b;
|
||||
}
|
||||
|
||||
void r600_upload_index_buffer(struct r600_context *rctx,
|
||||
@@ -243,16 +242,16 @@ void r600_upload_index_buffer(struct r600_context *rctx,
|
||||
{
|
||||
struct r600_resource *rbuffer = r600_resource(ib->buffer);
|
||||
|
||||
u_upload_data(rctx->vbuf_mgr->uploader, 0, count * ib->index_size,
|
||||
rbuffer->b.user_ptr, &ib->offset, &ib->buffer);
|
||||
u_upload_data(rctx->uploader, 0, count * ib->index_size,
|
||||
rbuffer->b.b.user_ptr, &ib->offset, &ib->buffer);
|
||||
}
|
||||
|
||||
void r600_upload_const_buffer(struct r600_context *rctx, struct r600_resource **rbuffer,
|
||||
uint32_t *const_offset)
|
||||
{
|
||||
if ((*rbuffer)->b.user_ptr) {
|
||||
uint8_t *ptr = (*rbuffer)->b.user_ptr;
|
||||
unsigned size = (*rbuffer)->b.b.b.width0;
|
||||
if ((*rbuffer)->b.b.user_ptr) {
|
||||
uint8_t *ptr = (*rbuffer)->b.b.user_ptr;
|
||||
unsigned size = (*rbuffer)->b.b.width0;
|
||||
|
||||
*rbuffer = NULL;
|
||||
|
||||
@@ -269,12 +268,12 @@ void r600_upload_const_buffer(struct r600_context *rctx, struct r600_resource **
|
||||
tmpPtr[i] = bswap_32(((uint32_t *)ptr)[i]);
|
||||
}
|
||||
|
||||
u_upload_data(rctx->vbuf_mgr->uploader, 0, size, tmpPtr, const_offset,
|
||||
u_upload_data(rctx->uploader, 0, size, tmpPtr, const_offset,
|
||||
(struct pipe_resource**)rbuffer);
|
||||
|
||||
free(tmpPtr);
|
||||
} else {
|
||||
u_upload_data(rctx->vbuf_mgr->uploader, 0, size, ptr, const_offset,
|
||||
u_upload_data(rctx->uploader, 0, size, ptr, const_offset,
|
||||
(struct pipe_resource**)rbuffer);
|
||||
}
|
||||
} else {
|
||||
|
@@ -387,7 +387,7 @@ void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_stat
|
||||
if (block->pm4_bo_index[id]) {
|
||||
/* find relocation */
|
||||
reloc_id = block->pm4_bo_index[id];
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[reloc_id].bo, ®->bo->b.b.b);
|
||||
pipe_resource_reference((struct pipe_resource**)&block->reloc[reloc_id].bo, ®->bo->b.b);
|
||||
block->reloc[reloc_id].bo_usage = reg->bo_usage;
|
||||
/* always force dirty for relocs for now */
|
||||
dirty |= R600_BLOCK_STATUS_DIRTY;
|
||||
@@ -620,21 +620,21 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu
|
||||
while (results_base != query->results_end) {
|
||||
query->result.u64 +=
|
||||
r600_query_read_result(map + results_base, 0, 2, true);
|
||||
results_base = (results_base + 16) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + 16) % query->buffer->b.b.width0;
|
||||
}
|
||||
break;
|
||||
case PIPE_QUERY_OCCLUSION_PREDICATE:
|
||||
while (results_base != query->results_end) {
|
||||
query->result.b = query->result.b ||
|
||||
r600_query_read_result(map + results_base, 0, 2, true) != 0;
|
||||
results_base = (results_base + 16) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + 16) % query->buffer->b.b.width0;
|
||||
}
|
||||
break;
|
||||
case PIPE_QUERY_TIME_ELAPSED:
|
||||
while (results_base != query->results_end) {
|
||||
query->result.u64 +=
|
||||
r600_query_read_result(map + results_base, 0, 2, false);
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.width0;
|
||||
}
|
||||
break;
|
||||
case PIPE_QUERY_PRIMITIVES_EMITTED:
|
||||
@@ -647,7 +647,7 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu
|
||||
while (results_base != query->results_end) {
|
||||
query->result.u64 +=
|
||||
r600_query_read_result(map + results_base, 2, 6, true);
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.width0;
|
||||
}
|
||||
break;
|
||||
case PIPE_QUERY_PRIMITIVES_GENERATED:
|
||||
@@ -655,7 +655,7 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu
|
||||
while (results_base != query->results_end) {
|
||||
query->result.u64 +=
|
||||
r600_query_read_result(map + results_base, 0, 4, true);
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.width0;
|
||||
}
|
||||
break;
|
||||
case PIPE_QUERY_SO_STATISTICS:
|
||||
@@ -664,7 +664,7 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu
|
||||
r600_query_read_result(map + results_base, 2, 6, true);
|
||||
query->result.so.primitives_storage_needed +=
|
||||
r600_query_read_result(map + results_base, 0, 4, true);
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.width0;
|
||||
}
|
||||
break;
|
||||
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
|
||||
@@ -672,7 +672,7 @@ static boolean r600_query_result(struct r600_context *ctx, struct r600_query *qu
|
||||
query->result.b = query->result.b ||
|
||||
r600_query_read_result(map + results_base, 2, 6, true) !=
|
||||
r600_query_read_result(map + results_base, 0, 4, true);
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.width0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@@ -693,7 +693,7 @@ void r600_query_begin(struct r600_context *ctx, struct r600_query *query)
|
||||
|
||||
r600_need_cs_space(ctx, query->num_cs_dw * 2, TRUE);
|
||||
|
||||
new_results_end = (query->results_end + query->result_size) % query->buffer->b.b.b.width0;
|
||||
new_results_end = (query->results_end + query->result_size) % query->buffer->b.b.width0;
|
||||
|
||||
/* collect current results if query buffer is full */
|
||||
if (new_results_end == query->results_start) {
|
||||
@@ -811,7 +811,7 @@ void r600_query_end(struct r600_context *ctx, struct r600_query *query)
|
||||
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
|
||||
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer, RADEON_USAGE_WRITE);
|
||||
|
||||
query->results_end = (query->results_end + query->result_size) % query->buffer->b.b.b.width0;
|
||||
query->results_end = (query->results_end + query->result_size) % query->buffer->b.b.width0;
|
||||
ctx->num_cs_dw_queries_suspend -= query->num_cs_dw;
|
||||
}
|
||||
|
||||
@@ -833,7 +833,7 @@ void r600_query_predication(struct r600_context *ctx, struct r600_query *query,
|
||||
uint32_t op;
|
||||
|
||||
/* find count of the query data blocks */
|
||||
count = (query->buffer->b.b.b.width0 + query->results_end - query->results_start) % query->buffer->b.b.b.width0;
|
||||
count = (query->buffer->b.b.width0 + query->results_end - query->results_start) % query->buffer->b.b.width0;
|
||||
count /= query->result_size;
|
||||
|
||||
r600_need_cs_space(ctx, 5 * count, TRUE);
|
||||
@@ -850,7 +850,7 @@ void r600_query_predication(struct r600_context *ctx, struct r600_query *query,
|
||||
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
|
||||
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer,
|
||||
RADEON_USAGE_READ);
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.b.width0;
|
||||
results_base = (results_base + query->result_size) % query->buffer->b.b.width0;
|
||||
|
||||
/* set CONTINUE bit for all packets except the first */
|
||||
op |= PREDICATION_CONTINUE;
|
||||
|
@@ -24,7 +24,6 @@
|
||||
#define R600_RESOURCE_H
|
||||
|
||||
#include "util/u_transfer.h"
|
||||
#include "util/u_vbuf.h"
|
||||
|
||||
/* flag to indicate a resource is to be used as a transfer so should not be tiled */
|
||||
#define R600_RESOURCE_FLAG_TRANSFER PIPE_RESOURCE_FLAG_DRV_PRIV
|
||||
|
@@ -279,8 +279,6 @@ void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
|
||||
rctx->vertex_elements = v;
|
||||
if (v) {
|
||||
r600_inval_shader_cache(rctx);
|
||||
u_vbuf_bind_vertex_elements(rctx->vbuf_mgr, state,
|
||||
v->vmgr_elements);
|
||||
|
||||
rctx->states[v->rstate.id] = &v->rstate;
|
||||
r600_context_pipe_state_set(rctx, &v->rstate);
|
||||
@@ -297,8 +295,6 @@ void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
|
||||
}
|
||||
if (rctx->vertex_elements == state)
|
||||
rctx->vertex_elements = NULL;
|
||||
|
||||
u_vbuf_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
|
||||
FREE(state);
|
||||
}
|
||||
|
||||
@@ -308,7 +304,12 @@ void r600_set_index_buffer(struct pipe_context *ctx,
|
||||
{
|
||||
struct r600_context *rctx = (struct r600_context *)ctx;
|
||||
|
||||
u_vbuf_set_index_buffer(rctx->vbuf_mgr, ib);
|
||||
if (ib) {
|
||||
pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
|
||||
memcpy(&rctx->index_buffer, ib, sizeof(*ib));
|
||||
} else {
|
||||
pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
|
||||
@@ -316,7 +317,7 @@ void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
|
||||
{
|
||||
struct r600_context *rctx = (struct r600_context *)ctx;
|
||||
|
||||
u_vbuf_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
|
||||
util_copy_vertex_buffers(rctx->vertex_buffer, &rctx->nr_vertex_buffers, buffers, count);
|
||||
}
|
||||
|
||||
void *si_create_vertex_elements(struct pipe_context *ctx,
|
||||
@@ -331,9 +332,7 @@ void *si_create_vertex_elements(struct pipe_context *ctx,
|
||||
return NULL;
|
||||
|
||||
v->count = count;
|
||||
v->vmgr_elements =
|
||||
u_vbuf_create_vertex_elements(rctx->vbuf_mgr, count,
|
||||
elements, v->elements);
|
||||
memcpy(v->elements, elements, sizeof(struct pipe_vertex_element) * count);
|
||||
|
||||
return v;
|
||||
}
|
||||
@@ -475,7 +474,7 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
|
||||
|
||||
r600_context_pipe_state_set(rctx, rstate);
|
||||
|
||||
if (buffer != &rbuffer->b.b.b)
|
||||
if (buffer != &rbuffer->b.b)
|
||||
pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
|
||||
}
|
||||
|
||||
@@ -561,7 +560,7 @@ static void r600_vertex_buffer_update(struct r600_context *rctx)
|
||||
count = rctx->vertex_elements->count;
|
||||
} else {
|
||||
/* bind vertex buffer once */
|
||||
count = rctx->vbuf_mgr->nr_real_vertex_buffers;
|
||||
count = rctx->nr_vertex_buffers;
|
||||
}
|
||||
assert(count <= 256 / 4);
|
||||
|
||||
@@ -585,12 +584,12 @@ static void r600_vertex_buffer_update(struct r600_context *rctx)
|
||||
/* one resource per vertex elements */
|
||||
unsigned vbuffer_index;
|
||||
vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
|
||||
vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
|
||||
vertex_buffer = &rctx->vertex_buffer[vbuffer_index];
|
||||
rbuffer = (struct r600_resource*)vertex_buffer->buffer;
|
||||
offset = rctx->vertex_elements->vbuffer_offset[i];
|
||||
} else {
|
||||
/* bind vertex buffer once */
|
||||
vertex_buffer = &rctx->vbuf_mgr->real_vertex_buffer[i];
|
||||
vertex_buffer = &rctx->vertex_buffer[i];
|
||||
rbuffer = (struct r600_resource*)vertex_buffer->buffer;
|
||||
offset = 0;
|
||||
}
|
||||
@@ -708,7 +707,7 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
|
||||
int i;
|
||||
|
||||
if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
|
||||
(info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) ||
|
||||
(info.indexed && !rctx->index_buffer.buffer) ||
|
||||
!r600_conv_pipe_prim(info.mode, &prim)) {
|
||||
return;
|
||||
}
|
||||
@@ -718,7 +717,6 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
|
||||
|
||||
si_update_derived_state(rctx);
|
||||
|
||||
u_vbuf_draw_begin(rctx->vbuf_mgr, &info);
|
||||
r600_vertex_buffer_update(rctx);
|
||||
|
||||
rdraw.vgt_num_indices = info.count;
|
||||
@@ -726,14 +724,14 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
|
||||
|
||||
if (info.indexed) {
|
||||
/* Initialize the index buffer struct. */
|
||||
pipe_resource_reference(&ib.buffer, rctx->vbuf_mgr->index_buffer.buffer);
|
||||
ib.index_size = rctx->vbuf_mgr->index_buffer.index_size;
|
||||
ib.offset = rctx->vbuf_mgr->index_buffer.offset + info.start * ib.index_size;
|
||||
pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
|
||||
ib.index_size = rctx->index_buffer.index_size;
|
||||
ib.offset = rctx->index_buffer.offset + info.start * ib.index_size;
|
||||
|
||||
/* Translate or upload, if needed. */
|
||||
r600_translate_index_buffer(rctx, &ib, info.count);
|
||||
|
||||
if (u_vbuf_resource(ib.buffer)->user_ptr) {
|
||||
if (ib.buffer->user_ptr) {
|
||||
r600_upload_index_buffer(rctx, &ib, info.count);
|
||||
}
|
||||
|
||||
@@ -857,7 +855,6 @@ void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
|
||||
}
|
||||
|
||||
pipe_resource_reference(&ib.buffer, NULL);
|
||||
u_vbuf_draw_end(rctx->vbuf_mgr);
|
||||
}
|
||||
|
||||
void _r600_pipe_state_add_reg(struct r600_context *ctx,
|
||||
|
@@ -71,7 +71,7 @@ unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
|
||||
{
|
||||
unsigned offset = rtex->offset[level];
|
||||
|
||||
switch (rtex->resource.b.b.b.target) {
|
||||
switch (rtex->resource.b.b.target) {
|
||||
case PIPE_TEXTURE_3D:
|
||||
case PIPE_TEXTURE_CUBE:
|
||||
default:
|
||||
@@ -174,7 +174,7 @@ static unsigned r600_texture_get_nblocksx(struct pipe_screen *screen,
|
||||
struct r600_resource_texture *rtex,
|
||||
unsigned level)
|
||||
{
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b.b;
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b;
|
||||
unsigned nblocksx, block_align, width;
|
||||
unsigned blocksize = util_format_get_blocksize(rtex->real_format);
|
||||
|
||||
@@ -194,7 +194,7 @@ static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen,
|
||||
struct r600_resource_texture *rtex,
|
||||
unsigned level)
|
||||
{
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b.b;
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b;
|
||||
unsigned height, tile_height;
|
||||
|
||||
height = mip_minify(ptex->height0, level);
|
||||
@@ -219,7 +219,7 @@ static void r600_texture_set_array_mode(struct pipe_screen *screen,
|
||||
struct r600_resource_texture *rtex,
|
||||
unsigned level, unsigned array_mode)
|
||||
{
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b.b;
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b;
|
||||
|
||||
switch (array_mode) {
|
||||
#if 0
|
||||
@@ -254,7 +254,7 @@ static void r600_setup_miptree(struct pipe_screen *screen,
|
||||
struct r600_resource_texture *rtex,
|
||||
unsigned array_mode)
|
||||
{
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b.b;
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b;
|
||||
enum chip_class chipc = ((struct r600_screen*)screen)->chip_class;
|
||||
unsigned size, layer_size, i, offset;
|
||||
unsigned nblocksx, nblocksy;
|
||||
@@ -394,10 +394,10 @@ r600_texture_create_object(struct pipe_screen *screen,
|
||||
return NULL;
|
||||
|
||||
resource = &rtex->resource;
|
||||
resource->b.b.b = *base;
|
||||
resource->b.b.vtbl = &r600_texture_vtbl;
|
||||
pipe_reference_init(&resource->b.b.b.reference, 1);
|
||||
resource->b.b.b.screen = screen;
|
||||
resource->b.b = *base;
|
||||
resource->b.vtbl = &r600_texture_vtbl;
|
||||
pipe_reference_init(&resource->b.b.reference, 1);
|
||||
resource->b.b.screen = screen;
|
||||
rtex->pitch_override = pitch_in_bytes_override;
|
||||
rtex->real_format = base->format;
|
||||
|
||||
@@ -457,7 +457,7 @@ r600_texture_create_object(struct pipe_screen *screen,
|
||||
stencil_align = r600_get_base_alignment(screen, rtex->stencil->real_format, array_mode);
|
||||
stencil_offset = align(rtex->size, stencil_align);
|
||||
|
||||
for (unsigned i = 0; i <= rtex->stencil->resource.b.b.b.last_level; i++)
|
||||
for (unsigned i = 0; i <= rtex->stencil->resource.b.b.last_level; i++)
|
||||
rtex->stencil->offset[i] += stencil_offset;
|
||||
|
||||
rtex->size = stencil_offset + rtex->stencil->size;
|
||||
@@ -465,7 +465,7 @@ r600_texture_create_object(struct pipe_screen *screen,
|
||||
|
||||
/* Now create the backing buffer. */
|
||||
if (!buf && alloc_bo) {
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b.b;
|
||||
struct pipe_resource *ptex = &rtex->resource.b.b;
|
||||
unsigned base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
|
||||
|
||||
if (!r600_init_resource(rscreen, resource, rtex->size, base_align, base->bind, base->usage)) {
|
||||
|
@@ -39,7 +39,7 @@ void r600_translate_index_buffer(struct r600_context *r600,
|
||||
|
||||
switch (ib->index_size) {
|
||||
case 1:
|
||||
u_upload_alloc(r600->vbuf_mgr->uploader, 0, count * 2,
|
||||
u_upload_alloc(r600->uploader, 0, count * 2,
|
||||
&out_offset, &out_buffer, &ptr);
|
||||
|
||||
util_shorten_ubyte_elts_to_userptr(
|
||||
|
@@ -201,7 +201,9 @@ static void r600_destroy_context(struct pipe_context *context)
|
||||
free(rctx->states[i]);
|
||||
}
|
||||
|
||||
u_vbuf_destroy(rctx->vbuf_mgr);
|
||||
if (rctx->uploader) {
|
||||
u_upload_destroy(rctx->uploader);
|
||||
}
|
||||
util_slab_destroy(&rctx->pool_transfers);
|
||||
|
||||
r600_update_num_contexts(rctx->screen, -1);
|
||||
@@ -263,16 +265,13 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
|
||||
sizeof(struct pipe_transfer), 64,
|
||||
UTIL_SLAB_SINGLETHREADED);
|
||||
|
||||
rctx->vbuf_mgr = u_vbuf_create(&rctx->context, 1024 * 1024, 256,
|
||||
PIPE_BIND_VERTEX_BUFFER |
|
||||
PIPE_BIND_INDEX_BUFFER |
|
||||
PIPE_BIND_CONSTANT_BUFFER,
|
||||
U_VERTEX_FETCH_DWORD_ALIGNED);
|
||||
if (!rctx->vbuf_mgr) {
|
||||
rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256,
|
||||
PIPE_BIND_INDEX_BUFFER |
|
||||
PIPE_BIND_CONSTANT_BUFFER);
|
||||
if (!rctx->uploader) {
|
||||
r600_destroy_context(&rctx->context);
|
||||
return NULL;
|
||||
}
|
||||
rctx->vbuf_mgr->caps.format_fixed32 = 0;
|
||||
|
||||
rctx->blitter = util_blitter_create(&rctx->context);
|
||||
if (rctx->blitter == NULL) {
|
||||
@@ -343,6 +342,9 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
|
||||
case PIPE_CAP_INDEP_BLEND_FUNC:
|
||||
case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
|
||||
case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
|
||||
case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
|
||||
case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
|
||||
case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
|
||||
return 1;
|
||||
|
||||
case PIPE_CAP_GLSL_FEATURE_LEVEL:
|
||||
@@ -358,6 +360,7 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
|
||||
case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
|
||||
case PIPE_CAP_VERTEX_COLOR_CLAMPED:
|
||||
case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
|
||||
case PIPE_CAP_USER_VERTEX_BUFFERS:
|
||||
return 0;
|
||||
|
||||
/* Stream output. */
|
||||
|
@@ -34,7 +34,6 @@
|
||||
#include "util/u_format.h"
|
||||
#include "util/u_math.h"
|
||||
#include "util/u_slab.h"
|
||||
#include "util/u_vbuf.h"
|
||||
#include "r600.h"
|
||||
#include "radeonsi_public.h"
|
||||
#include "r600_resource.h"
|
||||
@@ -161,7 +160,6 @@ struct r600_vertex_element
|
||||
{
|
||||
unsigned count;
|
||||
struct pipe_vertex_element elements[PIPE_MAX_ATTRIBS];
|
||||
struct u_vbuf_elements *vmgr_elements;
|
||||
unsigned fs_size;
|
||||
struct r600_pipe_state rstate;
|
||||
/* if offset is to big for fetch instructio we need to alterate
|
||||
@@ -286,7 +284,7 @@ struct r600_context {
|
||||
struct r600_textures_info ps_samplers;
|
||||
boolean shader_dirty;
|
||||
|
||||
struct u_vbuf *vbuf_mgr;
|
||||
struct u_upload_mgr *uploader;
|
||||
struct util_slab_mempool pool_transfers;
|
||||
boolean have_depth_texture, have_depth_fb;
|
||||
|
||||
@@ -326,6 +324,12 @@ struct r600_context {
|
||||
unsigned streamout_append_bitmask;
|
||||
unsigned *vs_so_stride_in_dw;
|
||||
unsigned *vs_shader_so_strides;
|
||||
|
||||
/* Vertex and index buffers. */
|
||||
bool vertex_buffers_dirty;
|
||||
struct pipe_index_buffer index_buffer;
|
||||
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
|
||||
unsigned nr_vertex_buffers;
|
||||
};
|
||||
|
||||
static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom)
|
||||
|
Reference in New Issue
Block a user