mesa/st: merge st buffer object into GL
Reviewed-by: Marek Olšák <marek.olsak@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14133>
This commit is contained in:
@@ -1921,7 +1921,7 @@ dri2_interop_export_object(__DRIcontext *_ctx,
|
||||
return MESA_GLINTEROP_INVALID_OBJECT;
|
||||
}
|
||||
|
||||
res = st_buffer_object(buf)->buffer;
|
||||
res = buf->buffer;
|
||||
if (!res) {
|
||||
/* this shouldn't happen */
|
||||
simple_mtx_unlock(&ctx->Shared->Mutex);
|
||||
@@ -1999,8 +1999,8 @@ dri2_interop_export_object(__DRIcontext *_ctx,
|
||||
}
|
||||
|
||||
if (target == GL_TEXTURE_BUFFER) {
|
||||
struct st_buffer_object *stBuf =
|
||||
st_buffer_object(obj->BufferObject);
|
||||
struct gl_buffer_object *stBuf =
|
||||
obj->BufferObject;
|
||||
|
||||
if (!stBuf || !stBuf->buffer) {
|
||||
/* this shouldn't happen */
|
||||
|
@@ -55,6 +55,7 @@
|
||||
#include "util/mesa-sha1.h"
|
||||
#include "vbo/vbo.h"
|
||||
|
||||
#include "pipe/p_state.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1397,6 +1398,26 @@ struct gl_buffer_object
|
||||
bool MinMaxCacheDirty;
|
||||
|
||||
bool HandleAllocated; /**< GL_ARB_bindless_texture */
|
||||
|
||||
struct pipe_resource *buffer;
|
||||
struct gl_context *private_refcount_ctx;
|
||||
/* This mechanism allows passing buffer references to the driver without
|
||||
* using atomics to increase the reference count.
|
||||
*
|
||||
* This private refcount can be decremented without atomics but only one
|
||||
* context (ctx above) can use this counter to be thread-safe.
|
||||
*
|
||||
* This number is atomically added to buffer->reference.count at
|
||||
* initialization. If it's never used, the same number is atomically
|
||||
* subtracted from buffer->reference.count before destruction. If this
|
||||
* number is decremented, we can pass that reference to the driver without
|
||||
* touching reference.count. At buffer destruction we only subtract
|
||||
* the number of references we did not return. This can possibly turn
|
||||
* a million atomic increments into 1 add and 1 subtract atomic op.
|
||||
*/
|
||||
int private_refcount;
|
||||
|
||||
struct pipe_transfer *transfer[MAP_COUNT];
|
||||
};
|
||||
|
||||
|
||||
|
@@ -414,7 +414,7 @@ st_create_gallium_vertex_state(struct gl_context *ctx,
|
||||
screen->create_vertex_state(screen, &vbuffer[0], velements.velems,
|
||||
velements.count,
|
||||
indexbuf ?
|
||||
st_buffer_object(indexbuf)->buffer : NULL,
|
||||
indexbuf->buffer : NULL,
|
||||
enabled_attribs);
|
||||
|
||||
for (unsigned i = 0; i < num_vbuffers; i++)
|
||||
|
@@ -44,8 +44,7 @@ static void
|
||||
st_binding_to_sb(struct gl_buffer_binding *binding,
|
||||
struct pipe_shader_buffer *sb)
|
||||
{
|
||||
struct st_buffer_object *st_obj =
|
||||
st_buffer_object(binding->BufferObject);
|
||||
struct gl_buffer_object *st_obj = binding->BufferObject;
|
||||
|
||||
if (st_obj && st_obj->buffer) {
|
||||
sb->buffer = st_obj->buffer;
|
||||
|
@@ -88,8 +88,7 @@ st_convert_image(const struct st_context *st, const struct gl_image_unit *u,
|
||||
}
|
||||
|
||||
if (stObj->base.Target == GL_TEXTURE_BUFFER) {
|
||||
struct st_buffer_object *stbuf =
|
||||
st_buffer_object(stObj->base.BufferObject);
|
||||
struct gl_buffer_object *stbuf = stObj->base.BufferObject;
|
||||
unsigned base, size;
|
||||
|
||||
if (!stbuf || !stbuf->buffer) {
|
||||
|
@@ -51,12 +51,12 @@ st_bind_ssbos(struct st_context *st, struct gl_program *prog,
|
||||
|
||||
for (i = 0; i < prog->info.num_ssbos; i++) {
|
||||
struct gl_buffer_binding *binding;
|
||||
struct st_buffer_object *st_obj;
|
||||
struct gl_buffer_object *st_obj;
|
||||
struct pipe_shader_buffer *sb = &buffers[i];
|
||||
|
||||
binding = &st->ctx->ShaderStorageBufferBindings[
|
||||
prog->sh.ShaderStorageBlocks[i]->Binding];
|
||||
st_obj = st_buffer_object(binding->BufferObject);
|
||||
st_obj = binding->BufferObject;
|
||||
|
||||
sb->buffer = st_obj ? st_obj->buffer : NULL;
|
||||
|
||||
|
@@ -59,37 +59,35 @@
|
||||
struct gl_buffer_object *
|
||||
st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
|
||||
{
|
||||
struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
|
||||
struct gl_buffer_object *obj = ST_CALLOC_STRUCT(gl_buffer_object);
|
||||
|
||||
if (!st_obj)
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
_mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
|
||||
_mesa_initialize_buffer_object(ctx, obj, name);
|
||||
|
||||
return &st_obj->Base;
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
release_buffer(struct gl_buffer_object *obj)
|
||||
{
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
if (!st_obj->buffer)
|
||||
if (!obj->buffer)
|
||||
return;
|
||||
|
||||
/* Subtract the remaining private references before unreferencing
|
||||
* the buffer. See the header file for explanation.
|
||||
*/
|
||||
if (st_obj->private_refcount) {
|
||||
assert(st_obj->private_refcount > 0);
|
||||
p_atomic_add(&st_obj->buffer->reference.count,
|
||||
-st_obj->private_refcount);
|
||||
st_obj->private_refcount = 0;
|
||||
if (obj->private_refcount) {
|
||||
assert(obj->private_refcount > 0);
|
||||
p_atomic_add(&obj->buffer->reference.count,
|
||||
-obj->private_refcount);
|
||||
obj->private_refcount = 0;
|
||||
}
|
||||
st_obj->ctx = NULL;
|
||||
obj->private_refcount_ctx = NULL;
|
||||
|
||||
pipe_resource_reference(&st_obj->buffer, NULL);
|
||||
pipe_resource_reference(&obj->buffer, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -119,8 +117,6 @@ st_bufferobj_subdata(struct gl_context *ctx,
|
||||
GLsizeiptrARB size,
|
||||
const void * data, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
/* we may be called from VBO code, so double-check params here */
|
||||
assert(offset >= 0);
|
||||
assert(size >= 0);
|
||||
@@ -137,7 +133,7 @@ st_bufferobj_subdata(struct gl_context *ctx,
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
if (!st_obj->buffer) {
|
||||
if (!obj->buffer) {
|
||||
/* we probably ran out of memory during buffer allocation */
|
||||
return;
|
||||
}
|
||||
@@ -153,7 +149,7 @@ st_bufferobj_subdata(struct gl_context *ctx,
|
||||
*/
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
|
||||
pipe->buffer_subdata(pipe, st_obj->buffer,
|
||||
pipe->buffer_subdata(pipe, obj->buffer,
|
||||
_mesa_bufferobj_mapped(obj, MAP_USER) ?
|
||||
PIPE_MAP_DIRECTLY : 0,
|
||||
offset, size, data);
|
||||
@@ -169,8 +165,6 @@ st_bufferobj_get_subdata(struct gl_context *ctx,
|
||||
GLsizeiptrARB size,
|
||||
void * data, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
/* we may be called from VBO code, so double-check params here */
|
||||
assert(offset >= 0);
|
||||
assert(size >= 0);
|
||||
@@ -179,12 +173,12 @@ st_bufferobj_get_subdata(struct gl_context *ctx,
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
if (!st_obj->buffer) {
|
||||
if (!obj->buffer) {
|
||||
/* we probably ran out of memory during buffer allocation */
|
||||
return;
|
||||
}
|
||||
|
||||
pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
|
||||
pipe_buffer_read(st_context(ctx)->pipe, obj->buffer,
|
||||
offset, size, data);
|
||||
}
|
||||
|
||||
@@ -305,7 +299,6 @@ bufferobj_data(struct gl_context *ctx,
|
||||
struct st_context *st = st_context(ctx);
|
||||
struct pipe_context *pipe = st->pipe;
|
||||
struct pipe_screen *screen = st->screen;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
struct st_memory_object *st_mem_obj = st_memory_object(memObj);
|
||||
bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
|
||||
|
||||
@@ -314,15 +307,15 @@ bufferobj_data(struct gl_context *ctx,
|
||||
* to 64 bits doesn't make much sense since hw support
|
||||
* for > 4GB resources is limited.
|
||||
*/
|
||||
st_obj->Base.Size = 0;
|
||||
obj->Size = 0;
|
||||
return GL_FALSE;
|
||||
}
|
||||
|
||||
if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
|
||||
size && st_obj->buffer &&
|
||||
st_obj->Base.Size == size &&
|
||||
st_obj->Base.Usage == usage &&
|
||||
st_obj->Base.StorageFlags == storageFlags) {
|
||||
size && obj->buffer &&
|
||||
obj->Size == size &&
|
||||
obj->Usage == usage &&
|
||||
obj->StorageFlags == storageFlags) {
|
||||
if (data) {
|
||||
/* Just discard the old contents and write new data.
|
||||
* This should be the same as creating a new buffer, but we avoid
|
||||
@@ -333,7 +326,7 @@ bufferobj_data(struct gl_context *ctx,
|
||||
* PIPE_MAP_DIRECTLY supresses implicit buffer range
|
||||
* invalidation.
|
||||
*/
|
||||
pipe->buffer_subdata(pipe, st_obj->buffer,
|
||||
pipe->buffer_subdata(pipe, obj->buffer,
|
||||
is_mapped ? PIPE_MAP_DIRECTLY :
|
||||
PIPE_MAP_DISCARD_WHOLE_RESOURCE,
|
||||
0, size, data);
|
||||
@@ -341,14 +334,14 @@ bufferobj_data(struct gl_context *ctx,
|
||||
} else if (is_mapped) {
|
||||
return GL_TRUE; /* can't reallocate, nothing to do */
|
||||
} else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
|
||||
pipe->invalidate_resource(pipe, st_obj->buffer);
|
||||
pipe->invalidate_resource(pipe, obj->buffer);
|
||||
return GL_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
st_obj->Base.Size = size;
|
||||
st_obj->Base.Usage = usage;
|
||||
st_obj->Base.StorageFlags = storageFlags;
|
||||
obj->Size = size;
|
||||
obj->Usage = usage;
|
||||
obj->StorageFlags = storageFlags;
|
||||
|
||||
release_buffer(obj);
|
||||
|
||||
@@ -370,7 +363,7 @@ bufferobj_data(struct gl_context *ctx,
|
||||
buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
|
||||
buffer.bind = bindings;
|
||||
buffer.usage =
|
||||
buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
|
||||
buffer_usage(target, obj->Immutable, storageFlags, usage);
|
||||
buffer.flags = storage_flags_to_buffer_flags(storageFlags);
|
||||
buffer.width0 = size;
|
||||
buffer.height0 = 1;
|
||||
@@ -378,42 +371,42 @@ bufferobj_data(struct gl_context *ctx,
|
||||
buffer.array_size = 1;
|
||||
|
||||
if (st_mem_obj) {
|
||||
st_obj->buffer = screen->resource_from_memobj(screen, &buffer,
|
||||
obj->buffer = screen->resource_from_memobj(screen, &buffer,
|
||||
st_mem_obj->memory,
|
||||
offset);
|
||||
}
|
||||
else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
|
||||
st_obj->buffer =
|
||||
obj->buffer =
|
||||
screen->resource_from_user_memory(screen, &buffer, (void*)data);
|
||||
}
|
||||
else {
|
||||
st_obj->buffer = screen->resource_create(screen, &buffer);
|
||||
obj->buffer = screen->resource_create(screen, &buffer);
|
||||
|
||||
if (st_obj->buffer && data)
|
||||
pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
|
||||
if (obj->buffer && data)
|
||||
pipe_buffer_write(pipe, obj->buffer, 0, size, data);
|
||||
}
|
||||
|
||||
if (!st_obj->buffer) {
|
||||
if (!obj->buffer) {
|
||||
/* out of memory */
|
||||
st_obj->Base.Size = 0;
|
||||
obj->Size = 0;
|
||||
return GL_FALSE;
|
||||
}
|
||||
|
||||
st_obj->ctx = ctx;
|
||||
obj->private_refcount_ctx = ctx;
|
||||
}
|
||||
|
||||
/* The current buffer may be bound, so we have to revalidate all atoms that
|
||||
* might be using it.
|
||||
*/
|
||||
if (st_obj->Base.UsageHistory & USAGE_ARRAY_BUFFER)
|
||||
if (obj->UsageHistory & USAGE_ARRAY_BUFFER)
|
||||
ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
|
||||
if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
|
||||
if (obj->UsageHistory & USAGE_UNIFORM_BUFFER)
|
||||
ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
|
||||
if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
|
||||
if (obj->UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
|
||||
ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
|
||||
if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
|
||||
if (obj->UsageHistory & USAGE_TEXTURE_BUFFER)
|
||||
ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
|
||||
if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
|
||||
if (obj->UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
|
||||
ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
|
||||
|
||||
return GL_TRUE;
|
||||
@@ -461,17 +454,16 @@ st_bufferobj_invalidate(struct gl_context *ctx,
|
||||
{
|
||||
struct st_context *st = st_context(ctx);
|
||||
struct pipe_context *pipe = st->pipe;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
/* We ignore partial invalidates. */
|
||||
if (offset != 0 || size != obj->Size)
|
||||
return;
|
||||
|
||||
/* If the buffer is mapped, we can't invalidate it. */
|
||||
if (!st_obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
|
||||
if (!obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
|
||||
return;
|
||||
|
||||
pipe->invalidate_resource(pipe, st_obj->buffer);
|
||||
pipe->invalidate_resource(pipe, obj->buffer);
|
||||
}
|
||||
|
||||
|
||||
@@ -536,7 +528,6 @@ st_bufferobj_map_range(struct gl_context *ctx,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
assert(offset >= 0);
|
||||
assert(length >= 0);
|
||||
@@ -558,17 +549,17 @@ st_bufferobj_map_range(struct gl_context *ctx,
|
||||
}
|
||||
|
||||
obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
|
||||
st_obj->buffer,
|
||||
obj->buffer,
|
||||
offset, length,
|
||||
transfer_flags,
|
||||
&st_obj->transfer[index]);
|
||||
&obj->transfer[index]);
|
||||
if (obj->Mappings[index].Pointer) {
|
||||
obj->Mappings[index].Offset = offset;
|
||||
obj->Mappings[index].Length = length;
|
||||
obj->Mappings[index].AccessFlags = access;
|
||||
}
|
||||
else {
|
||||
st_obj->transfer[index] = NULL;
|
||||
obj->transfer[index] = NULL;
|
||||
}
|
||||
|
||||
return obj->Mappings[index].Pointer;
|
||||
@@ -582,7 +573,6 @@ st_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
/* Subrange is relative to mapped range */
|
||||
assert(offset >= 0);
|
||||
@@ -593,7 +583,7 @@ st_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
if (!length)
|
||||
return;
|
||||
|
||||
pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
|
||||
pipe_buffer_flush_mapped_range(pipe, obj->transfer[index],
|
||||
obj->Mappings[index].Offset + offset,
|
||||
length);
|
||||
}
|
||||
@@ -607,12 +597,11 @@ st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
if (obj->Mappings[index].Length)
|
||||
pipe_buffer_unmap(pipe, st_obj->transfer[index]);
|
||||
pipe_buffer_unmap(pipe, obj->transfer[index]);
|
||||
|
||||
st_obj->transfer[index] = NULL;
|
||||
obj->transfer[index] = NULL;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
obj->Mappings[index].Offset = 0;
|
||||
obj->Mappings[index].Length = 0;
|
||||
@@ -631,8 +620,6 @@ st_copy_buffer_subdata(struct gl_context *ctx,
|
||||
GLsizeiptr size)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *srcObj = st_buffer_object(src);
|
||||
struct st_buffer_object *dstObj = st_buffer_object(dst);
|
||||
struct pipe_box box;
|
||||
|
||||
if (!size)
|
||||
@@ -644,8 +631,8 @@ st_copy_buffer_subdata(struct gl_context *ctx,
|
||||
|
||||
u_box_1d(readOffset, size, &box);
|
||||
|
||||
pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
|
||||
srcObj->buffer, 0, &box);
|
||||
pipe->resource_copy_region(pipe, dst->buffer, 0, writeOffset, 0, 0,
|
||||
src->buffer, 0, &box);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -659,7 +646,6 @@ st_clear_buffer_subdata(struct gl_context *ctx,
|
||||
struct gl_buffer_object *bufObj)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *buf = st_buffer_object(bufObj);
|
||||
static const char zeros[16] = {0};
|
||||
|
||||
if (!pipe->clear_buffer) {
|
||||
@@ -671,7 +657,7 @@ st_clear_buffer_subdata(struct gl_context *ctx,
|
||||
if (!clearValue)
|
||||
clearValue = zeros;
|
||||
|
||||
pipe->clear_buffer(pipe, buf->buffer, offset, size,
|
||||
pipe->clear_buffer(pipe, bufObj->buffer, offset, size,
|
||||
clearValue, clearValueSize);
|
||||
}
|
||||
|
||||
@@ -682,12 +668,11 @@ st_bufferobj_page_commitment(struct gl_context *ctx,
|
||||
GLboolean commit)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *buf = st_buffer_object(bufferObj);
|
||||
struct pipe_box box;
|
||||
|
||||
u_box_1d(offset, size, &box);
|
||||
|
||||
if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) {
|
||||
if (!pipe->resource_commit(pipe, bufferObj->buffer, 0, &box, commit)) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
|
||||
return;
|
||||
}
|
||||
|
@@ -35,45 +35,6 @@ struct pipe_resource;
|
||||
struct pipe_screen;
|
||||
struct st_context;
|
||||
|
||||
/**
|
||||
* State_tracker vertex/pixel buffer object, derived from Mesa's
|
||||
* gl_buffer_object.
|
||||
*/
|
||||
struct st_buffer_object
|
||||
{
|
||||
struct gl_buffer_object Base;
|
||||
struct pipe_resource *buffer; /* GPU storage */
|
||||
|
||||
struct gl_context *ctx; /* the context that owns private_refcount */
|
||||
|
||||
/* This mechanism allows passing buffer references to the driver without
|
||||
* using atomics to increase the reference count.
|
||||
*
|
||||
* This private refcount can be decremented without atomics but only one
|
||||
* context (ctx above) can use this counter to be thread-safe.
|
||||
*
|
||||
* This number is atomically added to buffer->reference.count at
|
||||
* initialization. If it's never used, the same number is atomically
|
||||
* subtracted from buffer->reference.count before destruction. If this
|
||||
* number is decremented, we can pass that reference to the driver without
|
||||
* touching reference.count. At buffer destruction we only subtract
|
||||
* the number of references we did not return. This can possibly turn
|
||||
* a million atomic increments into 1 add and 1 subtract atomic op.
|
||||
*/
|
||||
int private_refcount;
|
||||
|
||||
struct pipe_transfer *transfer[MAP_COUNT];
|
||||
};
|
||||
|
||||
|
||||
/** cast wrapper */
|
||||
static inline struct st_buffer_object *
|
||||
st_buffer_object(struct gl_buffer_object *obj)
|
||||
{
|
||||
return (struct st_buffer_object *) obj;
|
||||
}
|
||||
|
||||
|
||||
enum pipe_map_flags
|
||||
st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer);
|
||||
|
||||
@@ -88,8 +49,7 @@ st_get_buffer_reference(struct gl_context *ctx, struct gl_buffer_object *obj)
|
||||
if (unlikely(!obj))
|
||||
return NULL;
|
||||
|
||||
struct st_buffer_object *stobj = st_buffer_object(obj);
|
||||
struct pipe_resource *buffer = stobj->buffer;
|
||||
struct pipe_resource *buffer = obj->buffer;
|
||||
|
||||
if (unlikely(!buffer))
|
||||
return NULL;
|
||||
@@ -97,21 +57,21 @@ st_get_buffer_reference(struct gl_context *ctx, struct gl_buffer_object *obj)
|
||||
/* Only one context is using the fast path. All other contexts must use
|
||||
* the slow path.
|
||||
*/
|
||||
if (unlikely(stobj->ctx != ctx)) {
|
||||
if (unlikely(obj->private_refcount_ctx != ctx)) {
|
||||
p_atomic_inc(&buffer->reference.count);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
if (unlikely(stobj->private_refcount <= 0)) {
|
||||
assert(stobj->private_refcount == 0);
|
||||
if (unlikely(obj->private_refcount <= 0)) {
|
||||
assert(obj->private_refcount == 0);
|
||||
|
||||
/* This is the number of atomic increments we will skip. */
|
||||
stobj->private_refcount = 100000000;
|
||||
p_atomic_add(&buffer->reference.count, stobj->private_refcount);
|
||||
obj->private_refcount = 100000000;
|
||||
p_atomic_add(&buffer->reference.count, obj->private_refcount);
|
||||
}
|
||||
|
||||
/* Return a buffer reference while decrementing the private refcount. */
|
||||
stobj->private_refcount--;
|
||||
obj->private_refcount--;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
@@ -81,7 +81,7 @@ void st_dispatch_compute_indirect(struct gl_context *ctx,
|
||||
GLintptr indirect_offset)
|
||||
{
|
||||
struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer;
|
||||
struct pipe_resource *indirect = st_buffer_object(indirect_buffer)->buffer;
|
||||
struct pipe_resource *indirect = indirect_buffer->buffer;
|
||||
|
||||
st_dispatch_compute_common(ctx, NULL, NULL, indirect, indirect_offset);
|
||||
}
|
||||
|
@@ -393,7 +393,6 @@ st_StoreQueryResult(struct gl_context *ctx, struct gl_query_object *q,
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_query_object *stq = st_query_object(q);
|
||||
struct st_buffer_object *stObj = st_buffer_object(buf);
|
||||
boolean wait = pname == GL_QUERY_RESULT;
|
||||
enum pipe_query_value_type result_type;
|
||||
int index;
|
||||
@@ -407,7 +406,7 @@ st_StoreQueryResult(struct gl_context *ctx, struct gl_query_object *q,
|
||||
* LE. When a BE one comes along, this needs some form of resolution.
|
||||
*/
|
||||
unsigned data[2] = { CPU_TO_LE32(q->Target), 0 };
|
||||
pipe_buffer_write(pipe, stObj->buffer, offset,
|
||||
pipe_buffer_write(pipe, buf->buffer, offset,
|
||||
(ptype == GL_INT64_ARB ||
|
||||
ptype == GL_UNSIGNED_INT64_ARB) ? 8 : 4,
|
||||
data);
|
||||
@@ -476,5 +475,5 @@ st_StoreQueryResult(struct gl_context *ctx, struct gl_query_object *q,
|
||||
}
|
||||
|
||||
pipe->get_query_result_resource(pipe, stq->pq, wait, result_type, index,
|
||||
stObj->buffer, offset);
|
||||
buf->buffer, offset);
|
||||
}
|
||||
|
@@ -86,7 +86,7 @@ st_server_wait_semaphore(struct gl_context *ctx,
|
||||
struct st_semaphore_object *st_obj = st_semaphore_object(semObj);
|
||||
struct st_context *st = st_context(ctx);
|
||||
struct pipe_context *pipe = st->pipe;
|
||||
struct st_buffer_object *bufObj;
|
||||
struct gl_buffer_object *bufObj;
|
||||
struct st_texture_object *texObj;
|
||||
|
||||
/* The driver is allowed to flush during fence_server_sync, be prepared */
|
||||
@@ -108,7 +108,7 @@ st_server_wait_semaphore(struct gl_context *ctx,
|
||||
if (!bufObjs[i])
|
||||
continue;
|
||||
|
||||
bufObj = st_buffer_object(bufObjs[i]);
|
||||
bufObj = bufObjs[i];
|
||||
if (bufObj->buffer)
|
||||
pipe->flush_resource(pipe, bufObj->buffer);
|
||||
}
|
||||
@@ -135,14 +135,14 @@ st_server_signal_semaphore(struct gl_context *ctx,
|
||||
struct st_semaphore_object *st_obj = st_semaphore_object(semObj);
|
||||
struct st_context *st = st_context(ctx);
|
||||
struct pipe_context *pipe = st->pipe;
|
||||
struct st_buffer_object *bufObj;
|
||||
struct gl_buffer_object *bufObj;
|
||||
struct st_texture_object *texObj;
|
||||
|
||||
for (unsigned i = 0; i < numBufferBarriers; i++) {
|
||||
if (!bufObjs[i])
|
||||
continue;
|
||||
|
||||
bufObj = st_buffer_object(bufObjs[i]);
|
||||
bufObj = bufObjs[i];
|
||||
if (bufObj->buffer)
|
||||
pipe->flush_resource(pipe, bufObj->buffer);
|
||||
}
|
||||
|
@@ -2373,7 +2373,7 @@ st_CompressedTexSubImage(struct gl_context *ctx, GLuint dims,
|
||||
|
||||
buf_offset = buf_offset / addr.bytes_per_pixel;
|
||||
|
||||
buf = st_buffer_object(ctx->Unpack.BufferObj)->buffer;
|
||||
buf = ctx->Unpack.BufferObj->buffer;
|
||||
|
||||
addr.xoffset = x / bw;
|
||||
addr.yoffset = y / bh;
|
||||
|
@@ -118,7 +118,7 @@ st_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
|
||||
|
||||
/* Convert the transform feedback state into the gallium representation. */
|
||||
for (i = 0; i < max_num_targets; i++) {
|
||||
struct st_buffer_object *bo = st_buffer_object(sobj->base.Buffers[i]);
|
||||
struct gl_buffer_object *bo = sobj->base.Buffers[i];
|
||||
|
||||
if (bo && bo->buffer) {
|
||||
unsigned stream = obj->program->sh.LinkedTransformFeedback->
|
||||
|
@@ -157,7 +157,7 @@ prepare_indexed_draw(/* pass both st and ctx to reduce dereferences */
|
||||
st_get_buffer_reference(ctx, info->index.gl_bo);
|
||||
info->take_index_buffer_ownership = true;
|
||||
} else {
|
||||
info->index.resource = st_buffer_object(info->index.gl_bo)->buffer;
|
||||
info->index.resource = info->index.gl_bo->buffer;
|
||||
}
|
||||
|
||||
/* Return if the bound element array buffer doesn't have any backing
|
||||
@@ -251,7 +251,7 @@ st_indirect_draw_vbo(struct gl_context *ctx,
|
||||
assert(bufobj);
|
||||
|
||||
info.index_size = 1 << ib->index_size_shift;
|
||||
info.index.resource = st_buffer_object(bufobj)->buffer;
|
||||
info.index.resource = bufobj->buffer;
|
||||
draw.start = pointer_to_offset(ib->ptr) >> ib->index_size_shift;
|
||||
|
||||
info.restart_index = restart_index;
|
||||
@@ -259,7 +259,7 @@ st_indirect_draw_vbo(struct gl_context *ctx,
|
||||
}
|
||||
|
||||
info.mode = translate_prim(ctx, mode);
|
||||
indirect.buffer = st_buffer_object(indirect_data)->buffer;
|
||||
indirect.buffer = indirect_data->buffer;
|
||||
indirect.offset = indirect_offset;
|
||||
|
||||
/* Viewperf2020/Maya draws with a buffer that has no storage. */
|
||||
@@ -280,7 +280,7 @@ st_indirect_draw_vbo(struct gl_context *ctx,
|
||||
indirect.stride = stride;
|
||||
if (indirect_draw_count) {
|
||||
indirect.indirect_draw_count =
|
||||
st_buffer_object(indirect_draw_count)->buffer;
|
||||
indirect_draw_count->buffer;
|
||||
indirect.indirect_draw_count_offset = indirect_draw_count_offset;
|
||||
}
|
||||
cso_draw_vbo(st->cso_context, &info, 0, &indirect, draw);
|
||||
|
@@ -196,10 +196,8 @@ st_feedback_draw_vbo(struct gl_context *ctx,
|
||||
goto out_unref_vertex;
|
||||
|
||||
if (bufobj && bufobj->Name) {
|
||||
struct st_buffer_object *stobj = st_buffer_object(bufobj);
|
||||
|
||||
start = pointer_to_offset(ib->ptr) >> ib->index_size_shift;
|
||||
mapped_indices = pipe_buffer_map(pipe, stobj->buffer,
|
||||
mapped_indices = pipe_buffer_map(pipe, bufobj->buffer,
|
||||
PIPE_MAP_READ, &ib_transfer);
|
||||
}
|
||||
else {
|
||||
@@ -249,7 +247,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
|
||||
for (unsigned i = 0; i < prog->sh.NumUniformBlocks; i++) {
|
||||
struct gl_buffer_binding *binding =
|
||||
&st->ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
|
||||
struct st_buffer_object *st_obj = st_buffer_object(binding->BufferObject);
|
||||
struct gl_buffer_object *st_obj = binding->BufferObject;
|
||||
struct pipe_resource *buf = st_obj->buffer;
|
||||
|
||||
if (!buf)
|
||||
@@ -279,7 +277,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
|
||||
struct gl_buffer_binding *binding =
|
||||
&st->ctx->ShaderStorageBufferBindings[
|
||||
prog->sh.ShaderStorageBlocks[i]->Binding];
|
||||
struct st_buffer_object *st_obj = st_buffer_object(binding->BufferObject);
|
||||
struct gl_buffer_object *st_obj = binding->BufferObject;
|
||||
struct pipe_resource *buf = st_obj->buffer;
|
||||
|
||||
if (!buf)
|
||||
|
@@ -107,7 +107,7 @@ st_pbo_addresses_pixelstore(struct st_context *st,
|
||||
const void *pixels,
|
||||
struct st_pbo_addresses *addr)
|
||||
{
|
||||
struct pipe_resource *buf = st_buffer_object(store->BufferObj)->buffer;
|
||||
struct pipe_resource *buf = store->BufferObj->buffer;
|
||||
intptr_t buf_offset = (intptr_t) pixels;
|
||||
|
||||
if (buf_offset % addr->bytes_per_pixel)
|
||||
|
@@ -682,8 +682,8 @@ st_get_buffer_sampler_view_from_stobj(struct st_context *st,
|
||||
bool get_reference)
|
||||
{
|
||||
struct st_sampler_view *sv;
|
||||
struct st_buffer_object *stBuf =
|
||||
st_buffer_object(stObj->base.BufferObject);
|
||||
struct gl_buffer_object *stBuf =
|
||||
stObj->base.BufferObject;
|
||||
|
||||
if (!stBuf || !stBuf->buffer)
|
||||
return NULL;
|
||||
|
Reference in New Issue
Block a user