mesa: allow buffers to be mapped multiple times
OpenGL allows a buffer to be mapped only once, but we also map buffers internally, e.g. in the software primitive restart fallback, for PBOs, vbo_get_minmax_index, etc. This has always been a problem, but it will be a bigger problem with persistent buffer mappings, which will prevent all Mesa functions from mapping buffers for internal purposes. This adds a driver interface to core Mesa which supports multiple buffer mappings and allows 2 mappings: one for the GL user and one for Mesa. Note that Gallium supports an unlimited number of buffer and texture mappings, so it's not really an issue for Gallium. v2: fix unmapping in xm_dd.c, remove the GL errors there v3: fix the intel driver (by Fredrik) Reviewed-by: Fredrik Höglund <fredrik@kde.org>
This commit is contained in:
@@ -40,7 +40,8 @@
|
||||
#include "intel_regions.h"
|
||||
|
||||
static GLboolean
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index);
|
||||
|
||||
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
|
||||
static void
|
||||
@@ -93,8 +94,7 @@ intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
|
||||
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
|
||||
* (though it does if you call glDeleteBuffers)
|
||||
*/
|
||||
if (obj->Pointer)
|
||||
intel_bufferobj_unmap(ctx, obj);
|
||||
_mesa_buffer_unmap_all_mappings(ctx, obj);
|
||||
|
||||
free(intel_obj->sys_buffer);
|
||||
|
||||
@@ -127,7 +127,8 @@ intel_bufferobj_data(struct gl_context * ctx,
|
||||
intel_obj->Base.Usage = usage;
|
||||
intel_obj->Base.StorageFlags = storageFlags;
|
||||
|
||||
assert(!obj->Pointer); /* Mesa should have unmapped it */
|
||||
assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
|
||||
assert(!obj->Mappings[MAP_INTERNAL].Pointer);
|
||||
|
||||
if (intel_obj->buffer != NULL)
|
||||
release_buffer(intel_obj);
|
||||
@@ -272,7 +273,8 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
|
||||
static void *
|
||||
intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
GLbitfield access, struct gl_buffer_object *obj)
|
||||
GLbitfield access, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
@@ -282,9 +284,9 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
|
||||
* internally uses our functions directly.
|
||||
*/
|
||||
obj->Offset = offset;
|
||||
obj->Length = length;
|
||||
obj->AccessFlags = access;
|
||||
obj->Mappings[index].Offset = offset;
|
||||
obj->Mappings[index].Length = length;
|
||||
obj->Mappings[index].AccessFlags = access;
|
||||
|
||||
if (intel_obj->sys_buffer) {
|
||||
const bool read_only =
|
||||
@@ -294,8 +296,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
release_buffer(intel_obj);
|
||||
|
||||
if (!intel_obj->buffer || intel_obj->source) {
|
||||
obj->Pointer = intel_obj->sys_buffer + offset;
|
||||
return obj->Pointer;
|
||||
obj->Mappings[index].Pointer = intel_obj->sys_buffer + offset;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
free(intel_obj->sys_buffer);
|
||||
@@ -303,7 +305,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
}
|
||||
|
||||
if (intel_obj->buffer == NULL) {
|
||||
obj->Pointer = NULL;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -346,23 +348,25 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
const unsigned extra = (uintptr_t) offset % alignment;
|
||||
|
||||
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
|
||||
intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
|
||||
alignment);
|
||||
obj->Pointer = intel_obj->range_map_buffer + extra;
|
||||
intel_obj->range_map_buffer[index] =
|
||||
_mesa_align_malloc(length + extra, alignment);
|
||||
obj->Mappings[index].Pointer =
|
||||
intel_obj->range_map_buffer[index] + extra;
|
||||
} else {
|
||||
intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
|
||||
intel_obj->range_map_bo[index] = drm_intel_bo_alloc(intel->bufmgr,
|
||||
"range map",
|
||||
length + extra,
|
||||
alignment);
|
||||
if (!(access & GL_MAP_READ_BIT)) {
|
||||
drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
|
||||
drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
|
||||
} else {
|
||||
drm_intel_bo_map(intel_obj->range_map_bo,
|
||||
drm_intel_bo_map(intel_obj->range_map_bo[index],
|
||||
(access & GL_MAP_WRITE_BIT) != 0);
|
||||
}
|
||||
obj->Pointer = intel_obj->range_map_bo->virtual + extra;
|
||||
obj->Mappings[index].Pointer =
|
||||
intel_obj->range_map_bo[index]->virtual + extra;
|
||||
}
|
||||
return obj->Pointer;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
|
||||
@@ -373,8 +377,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
|
||||
}
|
||||
|
||||
obj->Pointer = intel_obj->buffer->virtual + offset;
|
||||
return obj->Pointer;
|
||||
obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
|
||||
@@ -385,7 +389,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
static void
|
||||
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
struct gl_buffer_object *obj)
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
@@ -394,7 +399,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
/* Unless we're in the range map using a temporary system buffer,
|
||||
* there's no work to do.
|
||||
*/
|
||||
if (intel_obj->range_map_buffer == NULL)
|
||||
if (intel_obj->range_map_buffer[index] == NULL)
|
||||
return;
|
||||
|
||||
if (length == 0)
|
||||
@@ -406,10 +411,11 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
* former points to the actual mapping while the latter may be offset to
|
||||
* meet alignment guarantees.
|
||||
*/
|
||||
drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
|
||||
drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
|
||||
|
||||
intel_emit_linear_blit(intel,
|
||||
intel_obj->buffer, obj->Offset + offset,
|
||||
intel_obj->buffer,
|
||||
obj->Mappings[index].Offset + offset,
|
||||
temp_bo, 0,
|
||||
length);
|
||||
|
||||
@@ -421,33 +427,35 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
* Called via glUnmapBuffer().
|
||||
*/
|
||||
static GLboolean
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
|
||||
assert(intel_obj);
|
||||
assert(obj->Pointer);
|
||||
assert(obj->Mappings[index].Pointer);
|
||||
if (intel_obj->sys_buffer != NULL) {
|
||||
/* always keep the mapping around. */
|
||||
} else if (intel_obj->range_map_buffer != NULL) {
|
||||
} else if (intel_obj->range_map_buffer[index] != NULL) {
|
||||
/* Since we've emitted some blits to buffers that will (likely) be used
|
||||
* in rendering operations in other cache domains in this batch, emit a
|
||||
* flush. Once again, we wish for a domain tracker in libdrm to cover
|
||||
* usage inside of a batchbuffer.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
_mesa_align_free(intel_obj->range_map_buffer);
|
||||
intel_obj->range_map_buffer = NULL;
|
||||
} else if (intel_obj->range_map_bo != NULL) {
|
||||
const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
|
||||
_mesa_align_free(intel_obj->range_map_buffer[index]);
|
||||
intel_obj->range_map_buffer[index] = NULL;
|
||||
} else if (intel_obj->range_map_bo[index] != NULL) {
|
||||
const unsigned extra = obj->Mappings[index].Pointer -
|
||||
intel_obj->range_map_bo[index]->virtual;
|
||||
|
||||
drm_intel_bo_unmap(intel_obj->range_map_bo);
|
||||
drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
|
||||
|
||||
intel_emit_linear_blit(intel,
|
||||
intel_obj->buffer, obj->Offset,
|
||||
intel_obj->range_map_bo, extra,
|
||||
obj->Length);
|
||||
intel_obj->buffer, obj->Mappings[index].Offset,
|
||||
intel_obj->range_map_bo[index], extra,
|
||||
obj->Mappings[index].Length);
|
||||
|
||||
/* Since we've emitted some blits to buffers that will (likely) be used
|
||||
* in rendering operations in other cache domains in this batch, emit a
|
||||
@@ -456,14 +464,14 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
|
||||
drm_intel_bo_unreference(intel_obj->range_map_bo);
|
||||
intel_obj->range_map_bo = NULL;
|
||||
drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
|
||||
intel_obj->range_map_bo[index] = NULL;
|
||||
} else if (intel_obj->buffer != NULL) {
|
||||
drm_intel_bo_unmap(intel_obj->buffer);
|
||||
}
|
||||
obj->Pointer = NULL;
|
||||
obj->Offset = 0;
|
||||
obj->Length = 0;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
obj->Mappings[index].Offset = 0;
|
||||
obj->Mappings[index].Length = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -607,22 +615,24 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
|
||||
char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
|
||||
GL_MAP_READ_BIT |
|
||||
GL_MAP_WRITE_BIT,
|
||||
dst);
|
||||
dst, MAP_INTERNAL);
|
||||
memmove(ptr + write_offset, ptr + read_offset, size);
|
||||
intel_bufferobj_unmap(ctx, dst);
|
||||
intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
|
||||
} else {
|
||||
const char *src_ptr;
|
||||
char *dst_ptr;
|
||||
|
||||
src_ptr = intel_bufferobj_map_range(ctx, 0, src->Size,
|
||||
GL_MAP_READ_BIT, src);
|
||||
GL_MAP_READ_BIT, src,
|
||||
MAP_INTERNAL);
|
||||
dst_ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
|
||||
GL_MAP_WRITE_BIT, dst);
|
||||
GL_MAP_WRITE_BIT, dst,
|
||||
MAP_INTERNAL);
|
||||
|
||||
memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
|
||||
|
||||
intel_bufferobj_unmap(ctx, src);
|
||||
intel_bufferobj_unmap(ctx, dst);
|
||||
intel_bufferobj_unmap(ctx, src, MAP_INTERNAL);
|
||||
intel_bufferobj_unmap(ctx, dst, MAP_INTERNAL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@@ -46,10 +46,8 @@ struct intel_buffer_object
|
||||
/** System memory buffer data, if not using a BO to store the data. */
|
||||
void *sys_buffer;
|
||||
|
||||
drm_intel_bo *range_map_bo;
|
||||
void *range_map_buffer;
|
||||
unsigned int range_map_offset;
|
||||
GLsizei range_map_size;
|
||||
drm_intel_bo *range_map_bo[MAP_COUNT];
|
||||
void *range_map_buffer[MAP_COUNT];
|
||||
|
||||
bool source;
|
||||
};
|
||||
|
@@ -78,7 +78,8 @@ static const GLubyte *map_pbo( struct gl_context *ctx,
|
||||
|
||||
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
unpack->BufferObj);
|
||||
unpack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
|
||||
return NULL;
|
||||
@@ -311,7 +312,7 @@ out:
|
||||
|
||||
if (_mesa_is_bufferobj(unpack->BufferObj)) {
|
||||
/* done with PBO so unmap it now */
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
intel_check_front_buffer_rendering(intel);
|
||||
|
@@ -861,12 +861,13 @@ static void brw_upload_indices(struct brw_context *brw)
|
||||
offset,
|
||||
ib_size,
|
||||
GL_MAP_READ_BIT,
|
||||
bufferobj);
|
||||
bufferobj,
|
||||
MAP_INTERNAL);
|
||||
|
||||
intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset);
|
||||
brw->ib.start_vertex_offset = offset / ib_type_size;
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, bufferobj);
|
||||
ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
|
||||
} else {
|
||||
/* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
|
||||
* the index buffer state when we're just moving the start index
|
||||
|
@@ -82,7 +82,8 @@ brw_bo_map_gtt(struct brw_context *brw, drm_intel_bo *bo, const char *bo_name)
|
||||
}
|
||||
|
||||
static GLboolean
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index);
|
||||
|
||||
static void
|
||||
intel_bufferobj_mark_gpu_usage(struct intel_buffer_object *intel_obj,
|
||||
@@ -159,8 +160,7 @@ intel_bufferobj_free(struct gl_context * ctx, struct gl_buffer_object *obj)
|
||||
* to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
|
||||
* (though it does if you call glDeleteBuffers)
|
||||
*/
|
||||
if (obj->Pointer)
|
||||
intel_bufferobj_unmap(ctx, obj);
|
||||
_mesa_buffer_unmap_all_mappings(ctx, obj);
|
||||
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
free(intel_obj);
|
||||
@@ -197,7 +197,8 @@ intel_bufferobj_data(struct gl_context * ctx,
|
||||
intel_obj->Base.Usage = usage;
|
||||
intel_obj->Base.StorageFlags = storageFlags;
|
||||
|
||||
assert(!obj->Pointer); /* Mesa should have unmapped it */
|
||||
assert(!obj->Mappings[MAP_USER].Pointer); /* Mesa should have unmapped it */
|
||||
assert(!obj->Mappings[MAP_INTERNAL].Pointer);
|
||||
|
||||
if (intel_obj->buffer != NULL)
|
||||
release_buffer(intel_obj);
|
||||
@@ -351,7 +352,8 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
|
||||
static void *
|
||||
intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
GLbitfield access, struct gl_buffer_object *obj)
|
||||
GLbitfield access, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
@@ -361,12 +363,12 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
|
||||
* internally uses our functions directly.
|
||||
*/
|
||||
obj->Offset = offset;
|
||||
obj->Length = length;
|
||||
obj->AccessFlags = access;
|
||||
obj->Mappings[index].Offset = offset;
|
||||
obj->Mappings[index].Length = length;
|
||||
obj->Mappings[index].AccessFlags = access;
|
||||
|
||||
if (intel_obj->buffer == NULL) {
|
||||
obj->Pointer = NULL;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -410,23 +412,25 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
const unsigned extra = (uintptr_t) offset % alignment;
|
||||
|
||||
if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
|
||||
intel_obj->range_map_buffer = _mesa_align_malloc(length + extra,
|
||||
intel_obj->range_map_buffer[index] = _mesa_align_malloc(length + extra,
|
||||
alignment);
|
||||
obj->Pointer = intel_obj->range_map_buffer + extra;
|
||||
obj->Mappings[index].Pointer =
|
||||
intel_obj->range_map_buffer[index] + extra;
|
||||
} else {
|
||||
intel_obj->range_map_bo = drm_intel_bo_alloc(brw->bufmgr,
|
||||
intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
|
||||
"range map",
|
||||
length + extra,
|
||||
alignment);
|
||||
if (!(access & GL_MAP_READ_BIT)) {
|
||||
drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
|
||||
drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
|
||||
} else {
|
||||
drm_intel_bo_map(intel_obj->range_map_bo,
|
||||
drm_intel_bo_map(intel_obj->range_map_bo[index],
|
||||
(access & GL_MAP_WRITE_BIT) != 0);
|
||||
}
|
||||
obj->Pointer = intel_obj->range_map_bo->virtual + extra;
|
||||
obj->Mappings[index].Pointer =
|
||||
intel_obj->range_map_bo[index]->virtual + extra;
|
||||
}
|
||||
return obj->Pointer;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
if (access & GL_MAP_UNSYNCHRONIZED_BIT)
|
||||
@@ -439,8 +443,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
intel_bufferobj_mark_inactive(intel_obj);
|
||||
}
|
||||
|
||||
obj->Pointer = intel_obj->buffer->virtual + offset;
|
||||
return obj->Pointer;
|
||||
obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -459,7 +463,8 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
||||
static void
|
||||
intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
struct gl_buffer_object *obj)
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
@@ -468,7 +473,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
/* Unless we're in the range map using a temporary system buffer,
|
||||
* there's no work to do.
|
||||
*/
|
||||
if (intel_obj->range_map_buffer == NULL)
|
||||
if (intel_obj->range_map_buffer[index] == NULL)
|
||||
return;
|
||||
|
||||
if (length == 0)
|
||||
@@ -480,13 +485,16 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
* former points to the actual mapping while the latter may be offset to
|
||||
* meet alignment guarantees.
|
||||
*/
|
||||
drm_intel_bo_subdata(temp_bo, 0, length, obj->Pointer);
|
||||
drm_intel_bo_subdata(temp_bo, 0, length, obj->Mappings[index].Pointer);
|
||||
|
||||
intel_emit_linear_blit(brw,
|
||||
intel_obj->buffer, obj->Offset + offset,
|
||||
intel_obj->buffer,
|
||||
obj->Mappings[index].Offset + offset,
|
||||
temp_bo, 0,
|
||||
length);
|
||||
intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset + offset, length);
|
||||
intel_bufferobj_mark_gpu_usage(intel_obj,
|
||||
obj->Mappings[index].Offset + offset,
|
||||
length);
|
||||
|
||||
drm_intel_bo_unreference(temp_bo);
|
||||
}
|
||||
@@ -498,32 +506,35 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
* Implements glUnmapBuffer().
|
||||
*/
|
||||
static GLboolean
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
|
||||
assert(intel_obj);
|
||||
assert(obj->Pointer);
|
||||
if (intel_obj->range_map_buffer != NULL) {
|
||||
assert(obj->Mappings[index].Pointer);
|
||||
if (intel_obj->range_map_buffer[index] != NULL) {
|
||||
/* Since we've emitted some blits to buffers that will (likely) be used
|
||||
* in rendering operations in other cache domains in this batch, emit a
|
||||
* flush. Once again, we wish for a domain tracker in libdrm to cover
|
||||
* usage inside of a batchbuffer.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
_mesa_align_free(intel_obj->range_map_buffer);
|
||||
intel_obj->range_map_buffer = NULL;
|
||||
_mesa_align_free(intel_obj->range_map_buffer[index]);
|
||||
intel_obj->range_map_buffer[index] = NULL;
|
||||
} else if (intel_obj->range_map_bo != NULL) {
|
||||
const unsigned extra = obj->Pointer - intel_obj->range_map_bo->virtual;
|
||||
const unsigned extra = obj->Mappings[index].Pointer -
|
||||
intel_obj->range_map_bo[index]->virtual;
|
||||
|
||||
drm_intel_bo_unmap(intel_obj->range_map_bo);
|
||||
drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
|
||||
|
||||
intel_emit_linear_blit(brw,
|
||||
intel_obj->buffer, obj->Offset,
|
||||
intel_obj->range_map_bo, extra,
|
||||
obj->Length);
|
||||
intel_bufferobj_mark_gpu_usage(intel_obj, obj->Offset, obj->Length);
|
||||
intel_obj->buffer, obj->Mappings[index].Offset,
|
||||
intel_obj->range_map_bo[index], extra,
|
||||
obj->Mappings[index].Length);
|
||||
intel_bufferobj_mark_gpu_usage(intel_obj, obj->Mappings[index].Offset,
|
||||
obj->Mappings[index].Length);
|
||||
|
||||
/* Since we've emitted some blits to buffers that will (likely) be used
|
||||
* in rendering operations in other cache domains in this batch, emit a
|
||||
@@ -532,14 +543,14 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
|
||||
drm_intel_bo_unreference(intel_obj->range_map_bo);
|
||||
intel_obj->range_map_bo = NULL;
|
||||
drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
|
||||
intel_obj->range_map_bo[index] = NULL;
|
||||
} else if (intel_obj->buffer != NULL) {
|
||||
drm_intel_bo_unmap(intel_obj->buffer);
|
||||
}
|
||||
obj->Pointer = NULL;
|
||||
obj->Offset = 0;
|
||||
obj->Length = 0;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
obj->Mappings[index].Offset = 0;
|
||||
obj->Mappings[index].Length = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@@ -42,9 +42,8 @@ struct intel_buffer_object
|
||||
struct gl_buffer_object Base;
|
||||
drm_intel_bo *buffer; /* the low-level buffer manager's buffer handle */
|
||||
|
||||
drm_intel_bo *range_map_bo;
|
||||
void *range_map_buffer;
|
||||
unsigned int range_map_offset;
|
||||
drm_intel_bo *range_map_bo[MAP_COUNT];
|
||||
void *range_map_buffer[MAP_COUNT];
|
||||
|
||||
/** @{
|
||||
* Tracking for what range of the BO may currently be in use by the GPU.
|
||||
|
@@ -79,7 +79,8 @@ static const GLubyte *map_pbo( struct gl_context *ctx,
|
||||
|
||||
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
unpack->BufferObj);
|
||||
unpack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
|
||||
return NULL;
|
||||
@@ -317,7 +318,7 @@ out:
|
||||
|
||||
if (_mesa_is_bufferobj(unpack->BufferObj)) {
|
||||
/* done with PBO so unmap it now */
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
intel_check_front_buffer_rendering(brw);
|
||||
|
@@ -127,12 +127,13 @@ nouveau_bufferobj_get_subdata(struct gl_context *ctx, GLintptrARB offset,
|
||||
static void *
|
||||
nouveau_bufferobj_map_range(struct gl_context *ctx, GLintptr offset,
|
||||
GLsizeiptr length, GLbitfield access,
|
||||
struct gl_buffer_object *obj)
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
unsigned flags = 0;
|
||||
char *map;
|
||||
|
||||
assert(!obj->Pointer);
|
||||
assert(!obj->Mappings[index].Pointer);
|
||||
|
||||
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
|
||||
if (access & GL_MAP_READ_BIT)
|
||||
@@ -145,23 +146,24 @@ nouveau_bufferobj_map_range(struct gl_context *ctx, GLintptr offset,
|
||||
if (!map)
|
||||
return NULL;
|
||||
|
||||
obj->Pointer = map + offset;
|
||||
obj->Offset = offset;
|
||||
obj->Length = length;
|
||||
obj->AccessFlags = access;
|
||||
obj->Mappings[index].Pointer = map + offset;
|
||||
obj->Mappings[index].Offset = offset;
|
||||
obj->Mappings[index].Length = length;
|
||||
obj->Mappings[index].AccessFlags = access;
|
||||
|
||||
return obj->Pointer;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
static GLboolean
|
||||
nouveau_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj)
|
||||
nouveau_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
assert(obj->Pointer);
|
||||
assert(obj->Mappings[index].Pointer);
|
||||
|
||||
obj->Pointer = NULL;
|
||||
obj->Offset = 0;
|
||||
obj->Length = 0;
|
||||
obj->AccessFlags = 0;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
obj->Mappings[index].Offset = 0;
|
||||
obj->Mappings[index].Length = 0;
|
||||
obj->Mappings[index].AccessFlags = 0;
|
||||
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
@@ -60,10 +60,13 @@ radeonDeleteBufferObject(struct gl_context * ctx,
|
||||
struct gl_buffer_object *obj)
|
||||
{
|
||||
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
|
||||
int i;
|
||||
|
||||
if (obj->Pointer) {
|
||||
for (i = 0; i < MAP_COUNT; i++) {
|
||||
if (obj->Mappings[i].Pointer) {
|
||||
radeon_bo_unmap(radeon_obj->bo);
|
||||
}
|
||||
}
|
||||
|
||||
if (radeon_obj->bo) {
|
||||
radeon_bo_unref(radeon_obj->bo);
|
||||
@@ -175,7 +178,8 @@ radeonGetBufferSubData(struct gl_context * ctx,
|
||||
static void *
|
||||
radeonMapBufferRange(struct gl_context * ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
GLbitfield access, struct gl_buffer_object *obj)
|
||||
GLbitfield access, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
|
||||
const GLboolean write_only =
|
||||
@@ -186,18 +190,18 @@ radeonMapBufferRange(struct gl_context * ctx,
|
||||
}
|
||||
|
||||
if (radeon_obj->bo == NULL) {
|
||||
obj->Pointer = NULL;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
obj->Offset = offset;
|
||||
obj->Length = length;
|
||||
obj->AccessFlags = access;
|
||||
obj->Mappings[index].Offset = offset;
|
||||
obj->Mappings[index].Length = length;
|
||||
obj->Mappings[index].AccessFlags = access;
|
||||
|
||||
radeon_bo_map(radeon_obj->bo, write_only);
|
||||
|
||||
obj->Pointer = radeon_obj->bo->ptr + offset;
|
||||
return obj->Pointer;
|
||||
obj->Mappings[index].Pointer = radeon_obj->bo->ptr + offset;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
|
||||
@@ -206,7 +210,8 @@ radeonMapBufferRange(struct gl_context * ctx,
|
||||
*/
|
||||
static GLboolean
|
||||
radeonUnmapBuffer(struct gl_context * ctx,
|
||||
struct gl_buffer_object *obj)
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);
|
||||
|
||||
@@ -214,9 +219,9 @@ radeonUnmapBuffer(struct gl_context * ctx,
|
||||
radeon_bo_unmap(radeon_obj->bo);
|
||||
}
|
||||
|
||||
obj->Pointer = NULL;
|
||||
obj->Offset = 0;
|
||||
obj->Length = 0;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
obj->Mappings[index].Offset = 0;
|
||||
obj->Mappings[index].Length = 0;
|
||||
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
@@ -366,12 +366,10 @@ xmesa_DrawPixels_8R8G8B( struct gl_context *ctx,
|
||||
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
|
||||
unpack->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
unpack->BufferObj);
|
||||
unpack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
/* buffer is already mapped - that's an error */
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"glDrawPixels(PBO is mapped)");
|
||||
return;
|
||||
return; /* error */
|
||||
}
|
||||
pixels = ADD_POINTERS(buf, pixels);
|
||||
}
|
||||
@@ -417,7 +415,7 @@ xmesa_DrawPixels_8R8G8B( struct gl_context *ctx,
|
||||
}
|
||||
|
||||
if (_mesa_is_bufferobj(unpack->BufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
else {
|
||||
@@ -500,12 +498,10 @@ xmesa_DrawPixels_5R6G5B( struct gl_context *ctx,
|
||||
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
|
||||
unpack->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
unpack->BufferObj);
|
||||
unpack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
/* buffer is already mapped - that's an error */
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"glDrawPixels(PBO is mapped)");
|
||||
return;
|
||||
return; /* error */
|
||||
}
|
||||
pixels = ADD_POINTERS(buf, pixels);
|
||||
}
|
||||
@@ -550,7 +546,7 @@ xmesa_DrawPixels_5R6G5B( struct gl_context *ctx,
|
||||
}
|
||||
|
||||
if (unpack->BufferObj->Name) {
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@@ -1458,7 +1458,8 @@ _ae_destroy_context(struct gl_context *ctx)
|
||||
static void
|
||||
check_vbo(AEcontext *actx, struct gl_buffer_object *vbo)
|
||||
{
|
||||
if (_mesa_is_bufferobj(vbo) && !_mesa_bufferobj_mapped(vbo)) {
|
||||
if (_mesa_is_bufferobj(vbo) &&
|
||||
!_mesa_bufferobj_mapped(vbo, MAP_INTERNAL)) {
|
||||
GLuint i;
|
||||
for (i = 0; i < actx->nr_vbos; i++)
|
||||
if (actx->vbo[i] == vbo)
|
||||
@@ -1633,7 +1634,8 @@ _ae_map_vbos(struct gl_context *ctx)
|
||||
ctx->Driver.MapBufferRange(ctx, 0,
|
||||
actx->vbo[i]->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
actx->vbo[i]);
|
||||
actx->vbo[i],
|
||||
MAP_INTERNAL);
|
||||
|
||||
if (actx->nr_vbos)
|
||||
actx->mapped_vbos = GL_TRUE;
|
||||
@@ -1655,7 +1657,7 @@ _ae_unmap_vbos(struct gl_context *ctx)
|
||||
assert (!actx->NewState);
|
||||
|
||||
for (i = 0; i < actx->nr_vbos; i++)
|
||||
ctx->Driver.UnmapBuffer(ctx, actx->vbo[i]);
|
||||
ctx->Driver.UnmapBuffer(ctx, actx->vbo[i], MAP_INTERNAL);
|
||||
|
||||
actx->mapped_vbos = GL_FALSE;
|
||||
}
|
||||
@@ -1701,7 +1703,8 @@ _ae_ArrayElement(GLint elt)
|
||||
/* emit generic attribute elements */
|
||||
for (at = actx->attribs; at->func; at++) {
|
||||
const GLubyte *src
|
||||
= ADD_POINTERS(at->array->BufferObj->Pointer, at->array->Ptr)
|
||||
= ADD_POINTERS(at->array->BufferObj->Mappings[MAP_INTERNAL].Pointer,
|
||||
at->array->Ptr)
|
||||
+ elt * at->array->StrideB;
|
||||
at->func(at->index, src);
|
||||
}
|
||||
@@ -1709,7 +1712,8 @@ _ae_ArrayElement(GLint elt)
|
||||
/* emit conventional arrays elements */
|
||||
for (aa = actx->arrays; aa->offset != -1 ; aa++) {
|
||||
const GLubyte *src
|
||||
= ADD_POINTERS(aa->array->BufferObj->Pointer, aa->array->Ptr)
|
||||
= ADD_POINTERS(aa->array->BufferObj->Mappings[MAP_INTERNAL].Pointer,
|
||||
aa->array->Ptr)
|
||||
+ elt * aa->array->StrideB;
|
||||
CALL_by_offset(disp, (array_func), aa->offset, ((const void *) src));
|
||||
}
|
||||
|
@@ -69,7 +69,8 @@ _mesa_max_buffer_index(struct gl_context *ctx, GLuint count, GLenum type,
|
||||
if (_mesa_is_bufferobj(elementBuf)) {
|
||||
/* elements are in a user-defined buffer object. need to map it */
|
||||
map = ctx->Driver.MapBufferRange(ctx, 0, elementBuf->Size,
|
||||
GL_MAP_READ_BIT, elementBuf);
|
||||
GL_MAP_READ_BIT, elementBuf,
|
||||
MAP_INTERNAL);
|
||||
/* Actual address is the sum of pointers */
|
||||
indices = (const GLvoid *) ADD_POINTERS(map, (const GLubyte *) indices);
|
||||
}
|
||||
@@ -92,7 +93,7 @@ _mesa_max_buffer_index(struct gl_context *ctx, GLuint count, GLenum type,
|
||||
}
|
||||
|
||||
if (map) {
|
||||
ctx->Driver.UnmapBuffer(ctx, elementBuf);
|
||||
ctx->Driver.UnmapBuffer(ctx, elementBuf, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
return max;
|
||||
|
@@ -207,11 +207,12 @@ static bool
|
||||
bufferobj_range_mapped(const struct gl_buffer_object *obj,
|
||||
GLintptr offset, GLsizeiptr size)
|
||||
{
|
||||
if (_mesa_bufferobj_mapped(obj)) {
|
||||
if (_mesa_bufferobj_mapped(obj, MAP_USER)) {
|
||||
const GLintptr end = offset + size;
|
||||
const GLintptr mapEnd = obj->Offset + obj->Length;
|
||||
const GLintptr mapEnd = obj->Mappings[MAP_USER].Offset +
|
||||
obj->Mappings[MAP_USER].Length;
|
||||
|
||||
if (!(end <= obj->Offset || offset >= mapEnd)) {
|
||||
if (!(end <= obj->Mappings[MAP_USER].Offset || offset >= mapEnd)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -269,7 +270,7 @@ buffer_object_subdata_range_good(struct gl_context * ctx, GLenum target,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bufObj->AccessFlags & GL_MAP_PERSISTENT_BIT)
|
||||
if (bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT)
|
||||
return bufObj;
|
||||
|
||||
if (mappedRange) {
|
||||
@@ -279,7 +280,7 @@ buffer_object_subdata_range_good(struct gl_context * ctx, GLenum target,
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "%s", caller);
|
||||
return NULL;
|
||||
}
|
||||
@@ -503,7 +504,6 @@ _mesa_initialize_buffer_object( struct gl_context *ctx,
|
||||
obj->RefCount = 1;
|
||||
obj->Name = name;
|
||||
obj->Usage = GL_STATIC_DRAW_ARB;
|
||||
obj->AccessFlags = 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -675,33 +675,11 @@ _mesa_buffer_clear_subdata(struct gl_context *ctx,
|
||||
GLsizeiptr i;
|
||||
GLubyte *dest;
|
||||
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
GLubyte *data = malloc(size);
|
||||
GLubyte *dataStart = data;
|
||||
if (data == NULL) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glClearBuffer[Sub]Data");
|
||||
return;
|
||||
}
|
||||
|
||||
if (clearValue == NULL) {
|
||||
/* Clear with zeros, per the spec */
|
||||
memset(data, 0, size);
|
||||
}
|
||||
else {
|
||||
for (i = 0; i < size/clearValueSize; ++i) {
|
||||
memcpy(data, clearValue, clearValueSize);
|
||||
data += clearValueSize;
|
||||
}
|
||||
}
|
||||
ctx->Driver.BufferSubData(ctx, offset, size, dataStart, bufObj);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(ctx->Driver.MapBufferRange);
|
||||
dest = ctx->Driver.MapBufferRange(ctx, offset, size,
|
||||
GL_MAP_WRITE_BIT |
|
||||
GL_MAP_INVALIDATE_RANGE_BIT,
|
||||
bufObj);
|
||||
bufObj, MAP_INTERNAL);
|
||||
|
||||
if (!dest) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glClearBuffer[Sub]Data");
|
||||
@@ -711,7 +689,7 @@ _mesa_buffer_clear_subdata(struct gl_context *ctx,
|
||||
if (clearValue == NULL) {
|
||||
/* Clear with zeros, per the spec */
|
||||
memset(dest, 0, size);
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_INTERNAL);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -720,7 +698,7 @@ _mesa_buffer_clear_subdata(struct gl_context *ctx,
|
||||
dest += clearValueSize;
|
||||
}
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
|
||||
@@ -731,16 +709,17 @@ _mesa_buffer_clear_subdata(struct gl_context *ctx,
|
||||
static void *
|
||||
_mesa_buffer_map_range( struct gl_context *ctx, GLintptr offset,
|
||||
GLsizeiptr length, GLbitfield access,
|
||||
struct gl_buffer_object *bufObj )
|
||||
struct gl_buffer_object *bufObj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
(void) ctx;
|
||||
assert(!_mesa_bufferobj_mapped(bufObj));
|
||||
assert(!_mesa_bufferobj_mapped(bufObj, index));
|
||||
/* Just return a direct pointer to the data */
|
||||
bufObj->Pointer = bufObj->Data + offset;
|
||||
bufObj->Length = length;
|
||||
bufObj->Offset = offset;
|
||||
bufObj->AccessFlags = access;
|
||||
return bufObj->Pointer;
|
||||
bufObj->Mappings[index].Pointer = bufObj->Data + offset;
|
||||
bufObj->Mappings[index].Length = length;
|
||||
bufObj->Mappings[index].Offset = offset;
|
||||
bufObj->Mappings[index].AccessFlags = access;
|
||||
return bufObj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
|
||||
@@ -751,7 +730,8 @@ _mesa_buffer_map_range( struct gl_context *ctx, GLintptr offset,
|
||||
static void
|
||||
_mesa_buffer_flush_mapped_range( struct gl_context *ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
struct gl_buffer_object *obj )
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
(void) ctx;
|
||||
(void) offset;
|
||||
@@ -769,14 +749,15 @@ _mesa_buffer_flush_mapped_range( struct gl_context *ctx,
|
||||
* \sa glUnmapBufferARB, dd_function_table::UnmapBuffer
|
||||
*/
|
||||
static GLboolean
|
||||
_mesa_buffer_unmap( struct gl_context *ctx, struct gl_buffer_object *bufObj )
|
||||
_mesa_buffer_unmap(struct gl_context *ctx, struct gl_buffer_object *bufObj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
(void) ctx;
|
||||
/* XXX we might assert here that bufObj->Pointer is non-null */
|
||||
bufObj->Pointer = NULL;
|
||||
bufObj->Length = 0;
|
||||
bufObj->Offset = 0;
|
||||
bufObj->AccessFlags = 0x0;
|
||||
bufObj->Mappings[index].Pointer = NULL;
|
||||
bufObj->Mappings[index].Length = 0;
|
||||
bufObj->Mappings[index].Offset = 0;
|
||||
bufObj->Mappings[index].AccessFlags = 0x0;
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
@@ -794,14 +775,11 @@ _mesa_copy_buffer_subdata(struct gl_context *ctx,
|
||||
{
|
||||
GLubyte *srcPtr, *dstPtr;
|
||||
|
||||
/* the buffers should not be mapped */
|
||||
assert(!_mesa_bufferobj_mapped(src));
|
||||
assert(!_mesa_bufferobj_mapped(dst));
|
||||
|
||||
if (src == dst) {
|
||||
srcPtr = dstPtr = ctx->Driver.MapBufferRange(ctx, 0, src->Size,
|
||||
GL_MAP_READ_BIT |
|
||||
GL_MAP_WRITE_BIT, src);
|
||||
GL_MAP_WRITE_BIT, src,
|
||||
MAP_INTERNAL);
|
||||
|
||||
if (!srcPtr)
|
||||
return;
|
||||
@@ -810,10 +788,12 @@ _mesa_copy_buffer_subdata(struct gl_context *ctx,
|
||||
dstPtr += writeOffset;
|
||||
} else {
|
||||
srcPtr = ctx->Driver.MapBufferRange(ctx, readOffset, size,
|
||||
GL_MAP_READ_BIT, src);
|
||||
GL_MAP_READ_BIT, src,
|
||||
MAP_INTERNAL);
|
||||
dstPtr = ctx->Driver.MapBufferRange(ctx, writeOffset, size,
|
||||
(GL_MAP_WRITE_BIT |
|
||||
GL_MAP_INVALIDATE_RANGE_BIT), dst);
|
||||
GL_MAP_INVALIDATE_RANGE_BIT), dst,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
|
||||
/* Note: the src and dst regions will never overlap. Trying to do so
|
||||
@@ -822,9 +802,9 @@ _mesa_copy_buffer_subdata(struct gl_context *ctx,
|
||||
if (srcPtr && dstPtr)
|
||||
memcpy(dstPtr, srcPtr, size);
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, src);
|
||||
ctx->Driver.UnmapBuffer(ctx, src, MAP_INTERNAL);
|
||||
if (dst != src)
|
||||
ctx->Driver.UnmapBuffer(ctx, dst);
|
||||
ctx->Driver.UnmapBuffer(ctx, dst, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
|
||||
@@ -1039,6 +1019,21 @@ _mesa_init_buffer_object_functions(struct dd_function_table *driver)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_mesa_buffer_unmap_all_mappings(struct gl_context *ctx,
|
||||
struct gl_buffer_object *bufObj)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAP_COUNT; i++) {
|
||||
if (_mesa_bufferobj_mapped(bufObj, i)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj, i);
|
||||
ASSERT(bufObj->Mappings[i].Pointer == NULL);
|
||||
bufObj->Mappings[i].AccessFlags = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**********************************************************************/
|
||||
/* API Functions */
|
||||
@@ -1085,12 +1080,7 @@ _mesa_DeleteBuffers(GLsizei n, const GLuint *ids)
|
||||
|
||||
ASSERT(bufObj->Name == ids[i] || bufObj == &DummyBufferObject);
|
||||
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
/* if mapped, unmap it now */
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj);
|
||||
bufObj->AccessFlags = 0;
|
||||
bufObj->Pointer = NULL;
|
||||
}
|
||||
_mesa_buffer_unmap_all_mappings(ctx, bufObj);
|
||||
|
||||
/* unbind any vertex pointers bound to this buffer */
|
||||
for (j = 0; j < Elements(vao->VertexBinding); j++) {
|
||||
@@ -1278,12 +1268,8 @@ _mesa_BufferStorage(GLenum target, GLsizeiptr size, const GLvoid *data,
|
||||
return;
|
||||
}
|
||||
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
/* Unmap the existing buffer. We'll replace it now. Not an error. */
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj);
|
||||
bufObj->AccessFlags = 0;
|
||||
ASSERT(bufObj->Pointer == NULL);
|
||||
}
|
||||
_mesa_buffer_unmap_all_mappings(ctx, bufObj);
|
||||
|
||||
FLUSH_VERTICES(ctx, _NEW_BUFFER_OBJECT);
|
||||
|
||||
@@ -1355,12 +1341,8 @@ _mesa_BufferData(GLenum target, GLsizeiptrARB size,
|
||||
return;
|
||||
}
|
||||
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
/* Unmap the existing buffer. We'll replace it now. Not an error. */
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj);
|
||||
bufObj->AccessFlags = 0;
|
||||
ASSERT(bufObj->Pointer == NULL);
|
||||
}
|
||||
_mesa_buffer_unmap_all_mappings(ctx, bufObj);
|
||||
|
||||
FLUSH_VERTICES(ctx, _NEW_BUFFER_OBJECT);
|
||||
|
||||
@@ -1594,7 +1576,7 @@ _mesa_MapBuffer(GLenum target, GLenum access)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "glMapBufferARB(already mapped)");
|
||||
return NULL;
|
||||
}
|
||||
@@ -1606,7 +1588,8 @@ _mesa_MapBuffer(GLenum target, GLenum access)
|
||||
}
|
||||
|
||||
ASSERT(ctx->Driver.MapBufferRange);
|
||||
map = ctx->Driver.MapBufferRange(ctx, 0, bufObj->Size, accessFlags, bufObj);
|
||||
map = ctx->Driver.MapBufferRange(ctx, 0, bufObj->Size, accessFlags, bufObj,
|
||||
MAP_USER);
|
||||
if (!map) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glMapBufferARB(map failed)");
|
||||
return NULL;
|
||||
@@ -1616,10 +1599,10 @@ _mesa_MapBuffer(GLenum target, GLenum access)
|
||||
* This is important because other modules (like VBO) might call
|
||||
* the driver function directly.
|
||||
*/
|
||||
ASSERT(bufObj->Pointer == map);
|
||||
ASSERT(bufObj->Length == bufObj->Size);
|
||||
ASSERT(bufObj->Offset == 0);
|
||||
bufObj->AccessFlags = accessFlags;
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Pointer == map);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Length == bufObj->Size);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Offset == 0);
|
||||
bufObj->Mappings[MAP_USER].AccessFlags = accessFlags;
|
||||
}
|
||||
|
||||
if (access == GL_WRITE_ONLY_ARB || access == GL_READ_WRITE_ARB)
|
||||
@@ -1647,7 +1630,7 @@ _mesa_MapBuffer(GLenum target, GLenum access)
|
||||
}
|
||||
#endif
|
||||
|
||||
return bufObj->Pointer;
|
||||
return bufObj->Mappings[MAP_USER].Pointer;
|
||||
}
|
||||
|
||||
|
||||
@@ -1663,7 +1646,7 @@ _mesa_UnmapBuffer(GLenum target)
|
||||
if (!bufObj)
|
||||
return GL_FALSE;
|
||||
|
||||
if (!_mesa_bufferobj_mapped(bufObj)) {
|
||||
if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "glUnmapBufferARB");
|
||||
return GL_FALSE;
|
||||
}
|
||||
@@ -1704,11 +1687,11 @@ _mesa_UnmapBuffer(GLenum target)
|
||||
}
|
||||
#endif
|
||||
|
||||
status = ctx->Driver.UnmapBuffer( ctx, bufObj );
|
||||
bufObj->AccessFlags = 0;
|
||||
ASSERT(bufObj->Pointer == NULL);
|
||||
ASSERT(bufObj->Offset == 0);
|
||||
ASSERT(bufObj->Length == 0);
|
||||
status = ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_USER);
|
||||
bufObj->Mappings[MAP_USER].AccessFlags = 0;
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Pointer == NULL);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Offset == 0);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Length == 0);
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -1733,25 +1716,26 @@ _mesa_GetBufferParameteriv(GLenum target, GLenum pname, GLint *params)
|
||||
*params = bufObj->Usage;
|
||||
return;
|
||||
case GL_BUFFER_ACCESS_ARB:
|
||||
*params = simplified_access_mode(ctx, bufObj->AccessFlags);
|
||||
*params = simplified_access_mode(ctx,
|
||||
bufObj->Mappings[MAP_USER].AccessFlags);
|
||||
return;
|
||||
case GL_BUFFER_MAPPED_ARB:
|
||||
*params = _mesa_bufferobj_mapped(bufObj);
|
||||
*params = _mesa_bufferobj_mapped(bufObj, MAP_USER);
|
||||
return;
|
||||
case GL_BUFFER_ACCESS_FLAGS:
|
||||
if (!ctx->Extensions.ARB_map_buffer_range)
|
||||
goto invalid_pname;
|
||||
*params = bufObj->AccessFlags;
|
||||
*params = bufObj->Mappings[MAP_USER].AccessFlags;
|
||||
return;
|
||||
case GL_BUFFER_MAP_OFFSET:
|
||||
if (!ctx->Extensions.ARB_map_buffer_range)
|
||||
goto invalid_pname;
|
||||
*params = (GLint) bufObj->Offset;
|
||||
*params = (GLint) bufObj->Mappings[MAP_USER].Offset;
|
||||
return;
|
||||
case GL_BUFFER_MAP_LENGTH:
|
||||
if (!ctx->Extensions.ARB_map_buffer_range)
|
||||
goto invalid_pname;
|
||||
*params = (GLint) bufObj->Length;
|
||||
*params = (GLint) bufObj->Mappings[MAP_USER].Length;
|
||||
return;
|
||||
case GL_BUFFER_IMMUTABLE_STORAGE:
|
||||
if (!ctx->Extensions.ARB_buffer_storage)
|
||||
@@ -1797,25 +1781,26 @@ _mesa_GetBufferParameteri64v(GLenum target, GLenum pname, GLint64 *params)
|
||||
*params = bufObj->Usage;
|
||||
return;
|
||||
case GL_BUFFER_ACCESS_ARB:
|
||||
*params = simplified_access_mode(ctx, bufObj->AccessFlags);
|
||||
*params = simplified_access_mode(ctx,
|
||||
bufObj->Mappings[MAP_USER].AccessFlags);
|
||||
return;
|
||||
case GL_BUFFER_ACCESS_FLAGS:
|
||||
if (!ctx->Extensions.ARB_map_buffer_range)
|
||||
goto invalid_pname;
|
||||
*params = bufObj->AccessFlags;
|
||||
*params = bufObj->Mappings[MAP_USER].AccessFlags;
|
||||
return;
|
||||
case GL_BUFFER_MAPPED_ARB:
|
||||
*params = _mesa_bufferobj_mapped(bufObj);
|
||||
*params = _mesa_bufferobj_mapped(bufObj, MAP_USER);
|
||||
return;
|
||||
case GL_BUFFER_MAP_OFFSET:
|
||||
if (!ctx->Extensions.ARB_map_buffer_range)
|
||||
goto invalid_pname;
|
||||
*params = bufObj->Offset;
|
||||
*params = bufObj->Mappings[MAP_USER].Offset;
|
||||
return;
|
||||
case GL_BUFFER_MAP_LENGTH:
|
||||
if (!ctx->Extensions.ARB_map_buffer_range)
|
||||
goto invalid_pname;
|
||||
*params = bufObj->Length;
|
||||
*params = bufObj->Mappings[MAP_USER].Length;
|
||||
return;
|
||||
case GL_BUFFER_IMMUTABLE_STORAGE:
|
||||
if (!ctx->Extensions.ARB_buffer_storage)
|
||||
@@ -1853,7 +1838,7 @@ _mesa_GetBufferPointerv(GLenum target, GLenum pname, GLvoid **params)
|
||||
if (!bufObj)
|
||||
return;
|
||||
|
||||
*params = bufObj->Pointer;
|
||||
*params = bufObj->Mappings[MAP_USER].Pointer;
|
||||
}
|
||||
|
||||
|
||||
@@ -2061,7 +2046,7 @@ _mesa_MapBufferRange(GLenum target, GLintptr offset, GLsizeiptr length,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
if (_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"glMapBufferRange(buffer already mapped)");
|
||||
return NULL;
|
||||
@@ -2076,15 +2061,16 @@ _mesa_MapBufferRange(GLenum target, GLintptr offset, GLsizeiptr length,
|
||||
/* Mapping zero bytes should return a non-null pointer. */
|
||||
if (!length) {
|
||||
static long dummy = 0;
|
||||
bufObj->Pointer = &dummy;
|
||||
bufObj->Length = length;
|
||||
bufObj->Offset = offset;
|
||||
bufObj->AccessFlags = access;
|
||||
return bufObj->Pointer;
|
||||
bufObj->Mappings[MAP_USER].Pointer = &dummy;
|
||||
bufObj->Mappings[MAP_USER].Length = length;
|
||||
bufObj->Mappings[MAP_USER].Offset = offset;
|
||||
bufObj->Mappings[MAP_USER].AccessFlags = access;
|
||||
return bufObj->Mappings[MAP_USER].Pointer;
|
||||
}
|
||||
|
||||
ASSERT(ctx->Driver.MapBufferRange);
|
||||
map = ctx->Driver.MapBufferRange(ctx, offset, length, access, bufObj);
|
||||
map = ctx->Driver.MapBufferRange(ctx, offset, length, access, bufObj,
|
||||
MAP_USER);
|
||||
if (!map) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glMapBufferARB(map failed)");
|
||||
}
|
||||
@@ -2093,10 +2079,10 @@ _mesa_MapBufferRange(GLenum target, GLintptr offset, GLsizeiptr length,
|
||||
* This is important because other modules (like VBO) might call
|
||||
* the driver function directly.
|
||||
*/
|
||||
ASSERT(bufObj->Pointer == map);
|
||||
ASSERT(bufObj->Length == length);
|
||||
ASSERT(bufObj->Offset == offset);
|
||||
ASSERT(bufObj->AccessFlags == access);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Pointer == map);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Length == length);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].Offset == offset);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].AccessFlags == access);
|
||||
}
|
||||
|
||||
return map;
|
||||
@@ -2135,30 +2121,33 @@ _mesa_FlushMappedBufferRange(GLenum target, GLintptr offset, GLsizeiptr length)
|
||||
if (!bufObj)
|
||||
return;
|
||||
|
||||
if (!_mesa_bufferobj_mapped(bufObj)) {
|
||||
if (!_mesa_bufferobj_mapped(bufObj, MAP_USER)) {
|
||||
/* buffer is not mapped */
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"glFlushMappedBufferRange(buffer is not mapped)");
|
||||
return;
|
||||
}
|
||||
|
||||
if ((bufObj->AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
|
||||
if ((bufObj->Mappings[MAP_USER].AccessFlags &
|
||||
GL_MAP_FLUSH_EXPLICIT_BIT) == 0) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"glFlushMappedBufferRange(GL_MAP_FLUSH_EXPLICIT_BIT not set)");
|
||||
return;
|
||||
}
|
||||
|
||||
if (offset + length > bufObj->Length) {
|
||||
if (offset + length > bufObj->Mappings[MAP_USER].Length) {
|
||||
_mesa_error(ctx, GL_INVALID_VALUE,
|
||||
"glFlushMappedBufferRange(offset %ld + length %ld > mapped length %ld)",
|
||||
(long)offset, (long)length, (long)bufObj->Length);
|
||||
(long)offset, (long)length,
|
||||
(long)bufObj->Mappings[MAP_USER].Length);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(bufObj->AccessFlags & GL_MAP_WRITE_BIT);
|
||||
ASSERT(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_WRITE_BIT);
|
||||
|
||||
if (ctx->Driver.FlushMappedBufferRange)
|
||||
ctx->Driver.FlushMappedBufferRange(ctx, offset, length, bufObj);
|
||||
ctx->Driver.FlushMappedBufferRange(ctx, offset, length, bufObj,
|
||||
MAP_USER);
|
||||
}
|
||||
|
||||
|
||||
@@ -2812,7 +2801,7 @@ _mesa_InvalidateBufferSubData(GLuint buffer, GLintptr offset,
|
||||
* currently mapped by MapBufferRange, unless it was mapped
|
||||
* with MAP_PERSISTENT_BIT set in the MapBufferRange access flags."
|
||||
*/
|
||||
if (!(bufObj->AccessFlags & GL_MAP_PERSISTENT_BIT) &&
|
||||
if (!(bufObj->Mappings[MAP_USER].AccessFlags & GL_MAP_PERSISTENT_BIT) &&
|
||||
bufferobj_range_mapped(bufObj, offset, length)) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"glInvalidateBufferSubData(intersection with mapped "
|
||||
|
@@ -37,19 +37,21 @@
|
||||
*/
|
||||
|
||||
|
||||
/** Is the given buffer object currently mapped? */
|
||||
/** Is the given buffer object currently mapped by the GL user? */
|
||||
static inline GLboolean
|
||||
_mesa_bufferobj_mapped(const struct gl_buffer_object *obj)
|
||||
_mesa_bufferobj_mapped(const struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
return obj->Pointer != NULL;
|
||||
return obj->Mappings[index].Pointer != NULL;
|
||||
}
|
||||
|
||||
/** Can we not use this buffer while mapped? */
|
||||
static inline GLboolean
|
||||
_mesa_check_disallowed_mapping(const struct gl_buffer_object *obj)
|
||||
{
|
||||
return _mesa_bufferobj_mapped(obj) &&
|
||||
!(obj->AccessFlags & GL_MAP_PERSISTENT_BIT);
|
||||
return _mesa_bufferobj_mapped(obj, MAP_USER) &&
|
||||
!(obj->Mappings[MAP_USER].AccessFlags &
|
||||
GL_MAP_PERSISTENT_BIT);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -109,6 +111,9 @@ _mesa_total_buffer_object_memory(struct gl_context *ctx);
|
||||
extern void
|
||||
_mesa_init_buffer_object_functions(struct dd_function_table *driver);
|
||||
|
||||
extern void
|
||||
_mesa_buffer_unmap_all_mappings(struct gl_context *ctx,
|
||||
struct gl_buffer_object *bufObj);
|
||||
|
||||
/*
|
||||
* API functions
|
||||
|
@@ -605,14 +605,17 @@ struct dd_function_table {
|
||||
*/
|
||||
void * (*MapBufferRange)( struct gl_context *ctx, GLintptr offset,
|
||||
GLsizeiptr length, GLbitfield access,
|
||||
struct gl_buffer_object *obj);
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index);
|
||||
|
||||
void (*FlushMappedBufferRange)(struct gl_context *ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
struct gl_buffer_object *obj);
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index);
|
||||
|
||||
GLboolean (*UnmapBuffer)( struct gl_context *ctx,
|
||||
struct gl_buffer_object *obj );
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index);
|
||||
/*@}*/
|
||||
|
||||
/**
|
||||
|
@@ -914,7 +914,8 @@ unpack_image(struct gl_context *ctx, GLuint dimensions,
|
||||
|
||||
map = (GLubyte *)
|
||||
ctx->Driver.MapBufferRange(ctx, 0, unpack->BufferObj->Size,
|
||||
GL_MAP_READ_BIT, unpack->BufferObj);
|
||||
GL_MAP_READ_BIT, unpack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!map) {
|
||||
/* unable to map src buffer! */
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "unable to map PBO");
|
||||
@@ -928,7 +929,7 @@ unpack_image(struct gl_context *ctx, GLuint dimensions,
|
||||
image = _mesa_unpack_image(dimensions, width, height, depth,
|
||||
format, type, src, unpack);
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
|
||||
|
||||
if (!image) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "display list construction");
|
||||
|
@@ -1439,6 +1439,25 @@ struct gl_viewport_attrib
|
||||
};
|
||||
|
||||
|
||||
typedef enum {
|
||||
MAP_USER,
|
||||
MAP_INTERNAL,
|
||||
|
||||
MAP_COUNT
|
||||
} gl_map_buffer_index;
|
||||
|
||||
|
||||
/**
|
||||
* Fields describing a mapped buffer range.
|
||||
*/
|
||||
struct gl_buffer_mapping {
|
||||
GLbitfield AccessFlags; /**< Mask of GL_MAP_x_BIT flags */
|
||||
GLvoid *Pointer; /**< User-space address of mapping */
|
||||
GLintptr Offset; /**< Mapped offset */
|
||||
GLsizeiptr Length; /**< Mapped length */
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* GL_ARB_vertex/pixel_buffer_object buffer object
|
||||
*/
|
||||
@@ -1452,17 +1471,12 @@ struct gl_buffer_object
|
||||
GLbitfield StorageFlags; /**< GL_MAP_PERSISTENT_BIT, etc. */
|
||||
GLsizeiptrARB Size; /**< Size of buffer storage in bytes */
|
||||
GLubyte *Data; /**< Location of storage either in RAM or VRAM. */
|
||||
/** Fields describing a mapped buffer */
|
||||
/*@{*/
|
||||
GLbitfield AccessFlags; /**< Mask of GL_MAP_x_BIT flags */
|
||||
GLvoid *Pointer; /**< User-space address of mapping */
|
||||
GLintptr Offset; /**< Mapped offset */
|
||||
GLsizeiptr Length; /**< Mapped length */
|
||||
/*@}*/
|
||||
GLboolean DeletePending; /**< true if buffer object is removed from the hash */
|
||||
GLboolean Written; /**< Ever written to? (for debugging) */
|
||||
GLboolean Purgeable; /**< Is the buffer purgeable under memory pressure? */
|
||||
GLboolean Immutable; /**< GL_ARB_buffer_storage */
|
||||
|
||||
struct gl_buffer_mapping Mappings[MAP_COUNT];
|
||||
};
|
||||
|
||||
|
||||
|
@@ -149,7 +149,8 @@ _mesa_map_pbo_source(struct gl_context *ctx,
|
||||
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
|
||||
unpack->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
unpack->BufferObj);
|
||||
unpack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
@@ -221,7 +222,7 @@ _mesa_unmap_pbo_source(struct gl_context *ctx,
|
||||
{
|
||||
ASSERT(unpack != &ctx->Pack); /* catch pack/unpack mismatch */
|
||||
if (_mesa_is_bufferobj(unpack->BufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -246,7 +247,8 @@ _mesa_map_pbo_dest(struct gl_context *ctx,
|
||||
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
|
||||
pack->BufferObj->Size,
|
||||
GL_MAP_WRITE_BIT,
|
||||
pack->BufferObj);
|
||||
pack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
@@ -317,7 +319,7 @@ _mesa_unmap_pbo_dest(struct gl_context *ctx,
|
||||
{
|
||||
ASSERT(pack != &ctx->Unpack); /* catch pack/unpack mismatch */
|
||||
if (_mesa_is_bufferobj(pack->BufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, pack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, pack->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,7 +353,8 @@ _mesa_validate_pbo_teximage(struct gl_context *ctx, GLuint dimensions,
|
||||
buf = (GLubyte *) ctx->Driver.MapBufferRange(ctx, 0,
|
||||
unpack->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
unpack->BufferObj);
|
||||
unpack->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "%s%uD(PBO is mapped)", funcName,
|
||||
dimensions);
|
||||
@@ -393,7 +396,8 @@ _mesa_validate_pbo_compressed_teximage(struct gl_context *ctx,
|
||||
buf = (GLubyte*) ctx->Driver.MapBufferRange(ctx, 0,
|
||||
packing->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
packing->BufferObj);
|
||||
packing->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "%s%uD(PBO is mapped)", funcName,
|
||||
dimensions);
|
||||
@@ -413,6 +417,6 @@ _mesa_unmap_teximage_pbo(struct gl_context *ctx,
|
||||
const struct gl_pixelstore_attrib *unpack)
|
||||
{
|
||||
if (_mesa_is_bufferobj(unpack->BufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
@@ -186,10 +186,8 @@ delete_bufferobj_cb(GLuint id, void *data, void *userData)
|
||||
{
|
||||
struct gl_buffer_object *bufObj = (struct gl_buffer_object *) data;
|
||||
struct gl_context *ctx = (struct gl_context *) userData;
|
||||
if (_mesa_bufferobj_mapped(bufObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj);
|
||||
bufObj->Pointer = NULL;
|
||||
}
|
||||
|
||||
_mesa_buffer_unmap_all_mappings(ctx, bufObj);
|
||||
_mesa_reference_buffer_object(ctx, &bufObj, NULL);
|
||||
}
|
||||
|
||||
|
@@ -635,7 +635,8 @@ _mesa_get_teximage(struct gl_context *ctx,
|
||||
*/
|
||||
GLubyte *buf = (GLubyte *)
|
||||
ctx->Driver.MapBufferRange(ctx, 0, ctx->Pack.BufferObj->Size,
|
||||
GL_MAP_WRITE_BIT, ctx->Pack.BufferObj);
|
||||
GL_MAP_WRITE_BIT, ctx->Pack.BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
/* out of memory or other unexpected error */
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glGetTexImage(map PBO failed)");
|
||||
@@ -664,7 +665,7 @@ _mesa_get_teximage(struct gl_context *ctx,
|
||||
}
|
||||
|
||||
if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -689,7 +690,8 @@ _mesa_get_compressed_teximage(struct gl_context *ctx,
|
||||
/* pack texture image into a PBO */
|
||||
GLubyte *buf = (GLubyte *)
|
||||
ctx->Driver.MapBufferRange(ctx, 0, ctx->Pack.BufferObj->Size,
|
||||
GL_MAP_WRITE_BIT, ctx->Pack.BufferObj);
|
||||
GL_MAP_WRITE_BIT, ctx->Pack.BufferObj,
|
||||
MAP_INTERNAL);
|
||||
if (!buf) {
|
||||
/* out of memory or other unexpected error */
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY,
|
||||
@@ -731,7 +733,7 @@ _mesa_get_compressed_teximage(struct gl_context *ctx,
|
||||
}
|
||||
|
||||
if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Pack.BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -76,7 +76,7 @@ st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
assert(obj->RefCount == 0);
|
||||
assert(st_obj->transfer == NULL);
|
||||
_mesa_buffer_unmap_all_mappings(ctx, obj);
|
||||
|
||||
if (st_obj->buffer)
|
||||
pipe_resource_reference(&st_obj->buffer, NULL);
|
||||
@@ -310,7 +310,8 @@ st_bufferobj_data(struct gl_context *ctx,
|
||||
static void *
|
||||
st_bufferobj_map_range(struct gl_context *ctx,
|
||||
GLintptr offset, GLsizeiptr length, GLbitfield access,
|
||||
struct gl_buffer_object *obj)
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
@@ -355,28 +356,29 @@ st_bufferobj_map_range(struct gl_context *ctx,
|
||||
assert(offset < obj->Size);
|
||||
assert(offset + length <= obj->Size);
|
||||
|
||||
obj->Pointer = pipe_buffer_map_range(pipe,
|
||||
obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
|
||||
st_obj->buffer,
|
||||
offset, length,
|
||||
flags,
|
||||
&st_obj->transfer);
|
||||
if (obj->Pointer) {
|
||||
obj->Offset = offset;
|
||||
obj->Length = length;
|
||||
obj->AccessFlags = access;
|
||||
&st_obj->transfer[index]);
|
||||
if (obj->Mappings[index].Pointer) {
|
||||
obj->Mappings[index].Offset = offset;
|
||||
obj->Mappings[index].Length = length;
|
||||
obj->Mappings[index].AccessFlags = access;
|
||||
}
|
||||
else {
|
||||
st_obj->transfer = NULL;
|
||||
st_obj->transfer[index] = NULL;
|
||||
}
|
||||
|
||||
return obj->Pointer;
|
||||
return obj->Mappings[index].Pointer;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
st_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
GLintptr offset, GLsizeiptr length,
|
||||
struct gl_buffer_object *obj)
|
||||
struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
@@ -384,14 +386,15 @@ st_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
/* Subrange is relative to mapped range */
|
||||
assert(offset >= 0);
|
||||
assert(length >= 0);
|
||||
assert(offset + length <= obj->Length);
|
||||
assert(obj->Pointer);
|
||||
assert(offset + length <= obj->Mappings[index].Length);
|
||||
assert(obj->Mappings[index].Pointer);
|
||||
|
||||
if (!length)
|
||||
return;
|
||||
|
||||
pipe_buffer_flush_mapped_range(pipe, st_obj->transfer,
|
||||
obj->Offset + offset, length);
|
||||
pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
|
||||
obj->Mappings[index].Offset + offset,
|
||||
length);
|
||||
}
|
||||
|
||||
|
||||
@@ -399,18 +402,19 @@ st_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
||||
* Called via glUnmapBufferARB().
|
||||
*/
|
||||
static GLboolean
|
||||
st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj)
|
||||
st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
|
||||
gl_map_buffer_index index)
|
||||
{
|
||||
struct pipe_context *pipe = st_context(ctx)->pipe;
|
||||
struct st_buffer_object *st_obj = st_buffer_object(obj);
|
||||
|
||||
if (obj->Length)
|
||||
pipe_buffer_unmap(pipe, st_obj->transfer);
|
||||
if (obj->Mappings[index].Length)
|
||||
pipe_buffer_unmap(pipe, st_obj->transfer[index]);
|
||||
|
||||
st_obj->transfer = NULL;
|
||||
obj->Pointer = NULL;
|
||||
obj->Offset = 0;
|
||||
obj->Length = 0;
|
||||
st_obj->transfer[index] = NULL;
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
obj->Mappings[index].Offset = 0;
|
||||
obj->Mappings[index].Length = 0;
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
|
@@ -43,7 +43,7 @@ struct st_buffer_object
|
||||
{
|
||||
struct gl_buffer_object Base;
|
||||
struct pipe_resource *buffer; /* GPU storage */
|
||||
struct pipe_transfer *transfer; /* In-progress map information */
|
||||
struct pipe_transfer *transfer[MAP_COUNT];
|
||||
};
|
||||
|
||||
|
||||
|
@@ -279,17 +279,18 @@ static void bind_inputs( struct gl_context *ctx,
|
||||
const void *ptr;
|
||||
|
||||
if (inputs[i]->BufferObj->Name) {
|
||||
if (!inputs[i]->BufferObj->Pointer) {
|
||||
if (!inputs[i]->BufferObj->Mappings[MAP_INTERNAL].Pointer) {
|
||||
bo[*nr_bo] = inputs[i]->BufferObj;
|
||||
(*nr_bo)++;
|
||||
ctx->Driver.MapBufferRange(ctx, 0, inputs[i]->BufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
inputs[i]->BufferObj);
|
||||
inputs[i]->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
|
||||
assert(inputs[i]->BufferObj->Pointer);
|
||||
assert(inputs[i]->BufferObj->Mappings[MAP_INTERNAL].Pointer);
|
||||
}
|
||||
|
||||
ptr = ADD_POINTERS(inputs[i]->BufferObj->Pointer,
|
||||
ptr = ADD_POINTERS(inputs[i]->BufferObj->Mappings[MAP_INTERNAL].Pointer,
|
||||
inputs[i]->Ptr);
|
||||
}
|
||||
else
|
||||
@@ -348,17 +349,19 @@ static void bind_indices( struct gl_context *ctx,
|
||||
return;
|
||||
}
|
||||
|
||||
if (_mesa_is_bufferobj(ib->obj) && !_mesa_bufferobj_mapped(ib->obj)) {
|
||||
if (_mesa_is_bufferobj(ib->obj) &&
|
||||
!_mesa_bufferobj_mapped(ib->obj, MAP_INTERNAL)) {
|
||||
/* if the buffer object isn't mapped yet, map it now */
|
||||
bo[*nr_bo] = ib->obj;
|
||||
(*nr_bo)++;
|
||||
ptr = ctx->Driver.MapBufferRange(ctx, (GLsizeiptr) ib->ptr,
|
||||
ib->count * vbo_sizeof_ib_type(ib->type),
|
||||
GL_MAP_READ_BIT, ib->obj);
|
||||
assert(ib->obj->Pointer);
|
||||
GL_MAP_READ_BIT, ib->obj,
|
||||
MAP_INTERNAL);
|
||||
assert(ib->obj->Mappings[MAP_INTERNAL].Pointer);
|
||||
} else {
|
||||
/* user-space elements, or buffer already mapped */
|
||||
ptr = ADD_POINTERS(ib->obj->Pointer, ib->ptr);
|
||||
ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
|
||||
}
|
||||
|
||||
if (ib->type == GL_UNSIGNED_INT && VB->Primitive[0].basevertex == 0) {
|
||||
@@ -403,7 +406,7 @@ static void unmap_vbos( struct gl_context *ctx,
|
||||
{
|
||||
GLuint i;
|
||||
for (i = 0; i < nr_bo; i++) {
|
||||
ctx->Driver.UnmapBuffer(ctx, bo[i]);
|
||||
ctx->Driver.UnmapBuffer(ctx, bo[i], MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1118,8 +1118,8 @@ void vbo_exec_vtx_destroy( struct vbo_exec_context *exec )
|
||||
|
||||
/* Free the vertex buffer. Unmap first if needed.
|
||||
*/
|
||||
if (_mesa_bufferobj_mapped(exec->vtx.bufferobj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj);
|
||||
if (_mesa_bufferobj_mapped(exec->vtx.bufferobj, MAP_INTERNAL)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
|
||||
}
|
||||
_mesa_reference_buffer_object(ctx, &exec->vtx.bufferobj, NULL);
|
||||
}
|
||||
|
@@ -101,7 +101,8 @@ vbo_get_minmax_index(struct gl_context *ctx,
|
||||
if (_mesa_is_bufferobj(ib->obj)) {
|
||||
GLsizeiptr size = MIN2(count * index_size, ib->obj->Size);
|
||||
indices = ctx->Driver.MapBufferRange(ctx, (GLintptr) indices, size,
|
||||
GL_MAP_READ_BIT, ib->obj);
|
||||
GL_MAP_READ_BIT, ib->obj,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
|
||||
switch (ib->type) {
|
||||
@@ -177,7 +178,7 @@ vbo_get_minmax_index(struct gl_context *ctx,
|
||||
}
|
||||
|
||||
if (_mesa_is_bufferobj(ib->obj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, ib->obj);
|
||||
ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -229,13 +230,15 @@ check_array_data(struct gl_context *ctx, struct gl_client_array *array,
|
||||
if (array->Enabled) {
|
||||
const void *data = array->Ptr;
|
||||
if (_mesa_is_bufferobj(array->BufferObj)) {
|
||||
if (!array->BufferObj->Pointer) {
|
||||
if (!array->BufferObj->Mappings[MAP_INTERNAL].Pointer) {
|
||||
/* need to map now */
|
||||
array->BufferObj->Pointer =
|
||||
array->BufferObj->Mappings[MAP_INTERNAL].Pointer =
|
||||
ctx->Driver.MapBufferRange(ctx, 0, array->BufferObj->Size,
|
||||
GL_MAP_READ_BIT, array->BufferObj);
|
||||
GL_MAP_READ_BIT, array->BufferObj,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
data = ADD_POINTERS(data, array->BufferObj->Pointer);
|
||||
data = ADD_POINTERS(data,
|
||||
array->BufferObj->Mappings[MAP_INTERNAL].Pointer);
|
||||
}
|
||||
switch (array->Type) {
|
||||
case GL_FLOAT:
|
||||
@@ -273,8 +276,8 @@ unmap_array_buffer(struct gl_context *ctx, struct gl_client_array *array)
|
||||
{
|
||||
if (array->Enabled &&
|
||||
_mesa_is_bufferobj(array->BufferObj) &&
|
||||
_mesa_bufferobj_mapped(array->BufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, array->BufferObj);
|
||||
_mesa_bufferobj_mapped(array->BufferObj, MAP_INTERNAL)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, array->BufferObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,7 +298,8 @@ check_draw_elements_data(struct gl_context *ctx, GLsizei count, GLenum elemType,
|
||||
elemMap = ctx->Driver.MapBufferRange(ctx, 0,
|
||||
ctx->Array.VAO->IndexBufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
ctx->Array.VAO->IndexBufferObj);
|
||||
ctx->Array.VAO->IndexBufferObj,
|
||||
MAP_INTERNAL);
|
||||
elements = ADD_POINTERS(elements, elemMap);
|
||||
}
|
||||
|
||||
@@ -324,7 +328,8 @@ check_draw_elements_data(struct gl_context *ctx, GLsizei count, GLenum elemType,
|
||||
}
|
||||
|
||||
if (_mesa_is_bufferobj(vao->IndexBufferObj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
|
||||
for (k = 0; k < Elements(vao->_VertexAttrib); k++) {
|
||||
@@ -374,7 +379,8 @@ print_draw_arrays(struct gl_context *ctx,
|
||||
|
||||
if (bufName) {
|
||||
GLubyte *p = ctx->Driver.MapBufferRange(ctx, 0, bufObj->Size,
|
||||
GL_MAP_READ_BIT, bufObj);
|
||||
GL_MAP_READ_BIT, bufObj,
|
||||
MAP_INTERNAL);
|
||||
int offset = (int) (GLintptr) exec->array.inputs[i]->Ptr;
|
||||
float *f = (float *) (p + offset);
|
||||
int *k = (int *) f;
|
||||
@@ -386,7 +392,7 @@ print_draw_arrays(struct gl_context *ctx,
|
||||
for (i = 0; i < n; i++) {
|
||||
printf(" float[%d] = 0x%08x %f\n", i, k[i], f[i]);
|
||||
}
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, bufObj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -885,7 +891,8 @@ dump_element_buffer(struct gl_context *ctx, GLenum type)
|
||||
ctx->Driver.MapBufferRange(ctx, 0,
|
||||
ctx->Array.VAO->IndexBufferObj->Size,
|
||||
GL_MAP_READ_BIT,
|
||||
ctx->Array.VAO->IndexBufferObj);
|
||||
ctx->Array.VAO->IndexBufferObj,
|
||||
MAP_INTERNAL);
|
||||
switch (type) {
|
||||
case GL_UNSIGNED_BYTE:
|
||||
{
|
||||
@@ -927,7 +934,8 @@ dump_element_buffer(struct gl_context *ctx, GLenum type)
|
||||
;
|
||||
}
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj);
|
||||
ctx->Driver.UnmapBuffer(ctx, ctx->Array.VAO->IndexBufferObj,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -220,9 +220,10 @@ vbo_exec_bind_arrays( struct gl_context *ctx )
|
||||
|
||||
if (_mesa_is_bufferobj(exec->vtx.bufferobj)) {
|
||||
/* a real buffer obj: Ptr is an offset, not a pointer*/
|
||||
assert(exec->vtx.bufferobj->Pointer); /* buf should be mapped */
|
||||
assert(exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Pointer);
|
||||
assert(offset >= 0);
|
||||
arrays[attr].Ptr = (GLubyte *)exec->vtx.bufferobj->Offset + offset;
|
||||
arrays[attr].Ptr = (GLubyte *)
|
||||
exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset + offset;
|
||||
}
|
||||
else {
|
||||
/* Ptr into ordinary app memory */
|
||||
@@ -261,12 +262,15 @@ vbo_exec_vtx_unmap( struct vbo_exec_context *exec )
|
||||
struct gl_context *ctx = exec->ctx;
|
||||
|
||||
if (ctx->Driver.FlushMappedBufferRange) {
|
||||
GLintptr offset = exec->vtx.buffer_used - exec->vtx.bufferobj->Offset;
|
||||
GLsizeiptr length = (exec->vtx.buffer_ptr - exec->vtx.buffer_map) * sizeof(float);
|
||||
GLintptr offset = exec->vtx.buffer_used -
|
||||
exec->vtx.bufferobj->Mappings[MAP_INTERNAL].Offset;
|
||||
GLsizeiptr length = (exec->vtx.buffer_ptr - exec->vtx.buffer_map) *
|
||||
sizeof(float);
|
||||
|
||||
if (length)
|
||||
ctx->Driver.FlushMappedBufferRange(ctx, offset, length,
|
||||
exec->vtx.bufferobj);
|
||||
exec->vtx.bufferobj,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
|
||||
exec->vtx.buffer_used += (exec->vtx.buffer_ptr -
|
||||
@@ -275,7 +279,7 @@ vbo_exec_vtx_unmap( struct vbo_exec_context *exec )
|
||||
assert(exec->vtx.buffer_used <= VBO_VERT_BUFFER_SIZE);
|
||||
assert(exec->vtx.buffer_ptr != NULL);
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj);
|
||||
ctx->Driver.UnmapBuffer(ctx, exec->vtx.bufferobj, MAP_INTERNAL);
|
||||
exec->vtx.buffer_map = NULL;
|
||||
exec->vtx.buffer_ptr = NULL;
|
||||
exec->vtx.max_vert = 0;
|
||||
@@ -312,7 +316,8 @@ vbo_exec_vtx_map( struct vbo_exec_context *exec )
|
||||
(VBO_VERT_BUFFER_SIZE -
|
||||
exec->vtx.buffer_used),
|
||||
accessRange,
|
||||
exec->vtx.bufferobj);
|
||||
exec->vtx.bufferobj,
|
||||
MAP_INTERNAL);
|
||||
exec->vtx.buffer_ptr = exec->vtx.buffer_map;
|
||||
}
|
||||
else {
|
||||
@@ -336,7 +341,8 @@ vbo_exec_vtx_map( struct vbo_exec_context *exec )
|
||||
(GLfloat *)ctx->Driver.MapBufferRange(ctx,
|
||||
0, VBO_VERT_BUFFER_SIZE,
|
||||
accessRange,
|
||||
exec->vtx.bufferobj);
|
||||
exec->vtx.bufferobj,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
else {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "VBO allocation");
|
||||
|
@@ -177,7 +177,7 @@ vbo_sw_primitive_restart(struct gl_context *ctx,
|
||||
struct _mesa_prim temp_prim;
|
||||
struct vbo_context *vbo = vbo_context(ctx);
|
||||
vbo_draw_func draw_prims_func = vbo->draw_prims;
|
||||
GLboolean map_ib = ib->obj->Name && !ib->obj->Pointer;
|
||||
GLboolean map_ib = ib->obj->Name && !ib->obj->Mappings[MAP_INTERNAL].Pointer;
|
||||
void *ptr;
|
||||
|
||||
/* If there is an indirect buffer, map it and extract the draw params */
|
||||
@@ -186,7 +186,7 @@ vbo_sw_primitive_restart(struct gl_context *ctx,
|
||||
struct _mesa_index_buffer new_ib = *ib;
|
||||
const uint32_t *indirect_params;
|
||||
if (!ctx->Driver.MapBufferRange(ctx, 0, indirect->Size, GL_MAP_READ_BIT,
|
||||
indirect)) {
|
||||
indirect, MAP_INTERNAL)) {
|
||||
|
||||
/* something went wrong with mapping, give up */
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY,
|
||||
@@ -195,7 +195,8 @@ vbo_sw_primitive_restart(struct gl_context *ctx,
|
||||
}
|
||||
|
||||
assert(nr_prims == 1);
|
||||
indirect_params = (const uint32_t *) ADD_POINTERS(indirect->Pointer,
|
||||
indirect_params = (const uint32_t *)
|
||||
ADD_POINTERS(indirect->Mappings[MAP_INTERNAL].Pointer,
|
||||
new_prim.indirect_offset);
|
||||
|
||||
new_prim.is_indirect = 0;
|
||||
@@ -210,7 +211,7 @@ vbo_sw_primitive_restart(struct gl_context *ctx,
|
||||
prims = &new_prim;
|
||||
ib = &new_ib;
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, indirect);
|
||||
ctx->Driver.UnmapBuffer(ctx, indirect, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
/* Find the sub-primitives. These are regions in the index buffer which
|
||||
@@ -218,17 +219,17 @@ vbo_sw_primitive_restart(struct gl_context *ctx,
|
||||
*/
|
||||
if (map_ib) {
|
||||
ctx->Driver.MapBufferRange(ctx, 0, ib->obj->Size, GL_MAP_READ_BIT,
|
||||
ib->obj);
|
||||
ib->obj, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
ptr = ADD_POINTERS(ib->obj->Pointer, ib->ptr);
|
||||
ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
|
||||
|
||||
sub_prims = find_sub_primitives(ptr, vbo_sizeof_ib_type(ib->type),
|
||||
0, ib->count, restart_index,
|
||||
&num_sub_prims);
|
||||
|
||||
if (map_ib) {
|
||||
ctx->Driver.UnmapBuffer(ctx, ib->obj);
|
||||
ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
/* Loop over the primitives, and use the located sub-primitives to draw
|
||||
|
@@ -157,15 +157,16 @@ void vbo_rebase_prims( struct gl_context *ctx,
|
||||
} else if (ib) {
|
||||
/* Unfortunately need to adjust each index individually.
|
||||
*/
|
||||
GLboolean map_ib = ib->obj->Name && !ib->obj->Pointer;
|
||||
GLboolean map_ib = ib->obj->Name &&
|
||||
!ib->obj->Mappings[MAP_INTERNAL].Pointer;
|
||||
void *ptr;
|
||||
|
||||
if (map_ib)
|
||||
ctx->Driver.MapBufferRange(ctx, 0, ib->obj->Size, GL_MAP_READ_BIT,
|
||||
ib->obj);
|
||||
ib->obj, MAP_INTERNAL);
|
||||
|
||||
|
||||
ptr = ADD_POINTERS(ib->obj->Pointer, ib->ptr);
|
||||
ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);
|
||||
|
||||
/* Some users might prefer it if we translated elements to
|
||||
* GLuints here. Others wouldn't...
|
||||
@@ -183,7 +184,7 @@ void vbo_rebase_prims( struct gl_context *ctx,
|
||||
}
|
||||
|
||||
if (map_ib)
|
||||
ctx->Driver.UnmapBuffer(ctx, ib->obj);
|
||||
ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);
|
||||
|
||||
tmp_ib.obj = ctx->Shared->NullBufferObj;
|
||||
tmp_ib.ptr = tmp_indices;
|
||||
|
@@ -253,7 +253,8 @@ vbo_save_map_vertex_store(struct gl_context *ctx,
|
||||
GLsizeiptr size = vertex_store->bufferobj->Size - offset;
|
||||
GLfloat *range = (GLfloat *)
|
||||
ctx->Driver.MapBufferRange(ctx, offset, size, access,
|
||||
vertex_store->bufferobj);
|
||||
vertex_store->bufferobj,
|
||||
MAP_INTERNAL);
|
||||
if (range) {
|
||||
/* compute address of start of whole buffer (needed elsewhere) */
|
||||
vertex_store->buffer = range - vertex_store->used;
|
||||
@@ -279,13 +280,14 @@ vbo_save_unmap_vertex_store(struct gl_context *ctx,
|
||||
if (vertex_store->bufferobj->Size > 0) {
|
||||
GLintptr offset = 0;
|
||||
GLsizeiptr length = vertex_store->used * sizeof(GLfloat)
|
||||
- vertex_store->bufferobj->Offset;
|
||||
- vertex_store->bufferobj->Mappings[MAP_INTERNAL].Offset;
|
||||
|
||||
/* Explicitly flush the region we wrote to */
|
||||
ctx->Driver.FlushMappedBufferRange(ctx, offset, length,
|
||||
vertex_store->bufferobj);
|
||||
vertex_store->bufferobj,
|
||||
MAP_INTERNAL);
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, vertex_store->bufferobj);
|
||||
ctx->Driver.UnmapBuffer(ctx, vertex_store->bufferobj, MAP_INTERNAL);
|
||||
}
|
||||
vertex_store->buffer = NULL;
|
||||
}
|
||||
@@ -1118,6 +1120,7 @@ _save_OBE_DrawElements(GLenum mode, GLsizei count, GLenum type,
|
||||
{
|
||||
GET_CURRENT_CONTEXT(ctx);
|
||||
struct vbo_save_context *save = &vbo_context(ctx)->save;
|
||||
struct gl_buffer_object *indexbuf = ctx->Array.VAO->IndexBufferObj;
|
||||
GLint i;
|
||||
|
||||
if (!_mesa_is_valid_prim_mode(ctx, mode)) {
|
||||
@@ -1140,9 +1143,9 @@ _save_OBE_DrawElements(GLenum mode, GLsizei count, GLenum type,
|
||||
|
||||
_ae_map_vbos(ctx);
|
||||
|
||||
if (_mesa_is_bufferobj(ctx->Array.VAO->IndexBufferObj))
|
||||
if (_mesa_is_bufferobj(indexbuf))
|
||||
indices =
|
||||
ADD_POINTERS(ctx->Array.VAO->IndexBufferObj->Pointer, indices);
|
||||
ADD_POINTERS(indexbuf->Mappings[MAP_INTERNAL].Pointer, indices);
|
||||
|
||||
vbo_save_NotifyBegin(ctx, (mode | VBO_SAVE_PRIM_WEAK |
|
||||
VBO_SAVE_PRIM_NO_CURRENT_UPDATE));
|
||||
|
@@ -225,7 +225,8 @@ vbo_save_loopback_vertex_list(struct gl_context *ctx,
|
||||
ctx->Driver.MapBufferRange(ctx, 0,
|
||||
list->vertex_store->bufferobj->Size,
|
||||
GL_MAP_READ_BIT, /* ? */
|
||||
list->vertex_store->bufferobj);
|
||||
list->vertex_store->bufferobj,
|
||||
MAP_INTERNAL);
|
||||
|
||||
vbo_loopback_vertex_list(ctx,
|
||||
(const GLfloat *)(buffer + list->buffer_offset),
|
||||
@@ -235,7 +236,8 @@ vbo_save_loopback_vertex_list(struct gl_context *ctx,
|
||||
list->wrap_count,
|
||||
list->vertex_size);
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, list->vertex_store->bufferobj);
|
||||
ctx->Driver.UnmapBuffer(ctx, list->vertex_store->bufferobj,
|
||||
MAP_INTERNAL);
|
||||
}
|
||||
|
||||
|
||||
|
@@ -451,10 +451,13 @@ replay_init( struct copy_context *copy )
|
||||
copy->varying[j].size = attr_size(copy->array[i]);
|
||||
copy->vertex_size += attr_size(copy->array[i]);
|
||||
|
||||
if (_mesa_is_bufferobj(vbo) && !_mesa_bufferobj_mapped(vbo))
|
||||
ctx->Driver.MapBufferRange(ctx, 0, vbo->Size, GL_MAP_READ_BIT, vbo);
|
||||
if (_mesa_is_bufferobj(vbo) &&
|
||||
!_mesa_bufferobj_mapped(vbo, MAP_INTERNAL))
|
||||
ctx->Driver.MapBufferRange(ctx, 0, vbo->Size, GL_MAP_READ_BIT, vbo,
|
||||
MAP_INTERNAL);
|
||||
|
||||
copy->varying[j].src_ptr = ADD_POINTERS(vbo->Pointer,
|
||||
copy->varying[j].src_ptr =
|
||||
ADD_POINTERS(vbo->Mappings[MAP_INTERNAL].Pointer,
|
||||
copy->array[i]->Ptr);
|
||||
|
||||
copy->dstarray_ptr[i] = ©->varying[j].dstarray;
|
||||
@@ -466,11 +469,12 @@ replay_init( struct copy_context *copy )
|
||||
* do it internally.
|
||||
*/
|
||||
if (_mesa_is_bufferobj(copy->ib->obj) &&
|
||||
!_mesa_bufferobj_mapped(copy->ib->obj))
|
||||
!_mesa_bufferobj_mapped(copy->ib->obj, MAP_INTERNAL))
|
||||
ctx->Driver.MapBufferRange(ctx, 0, copy->ib->obj->Size, GL_MAP_READ_BIT,
|
||||
copy->ib->obj);
|
||||
copy->ib->obj, MAP_INTERNAL);
|
||||
|
||||
srcptr = (const GLubyte *) ADD_POINTERS(copy->ib->obj->Pointer,
|
||||
srcptr = (const GLubyte *)
|
||||
ADD_POINTERS(copy->ib->obj->Mappings[MAP_INTERNAL].Pointer,
|
||||
copy->ib->ptr);
|
||||
|
||||
switch (copy->ib->type) {
|
||||
@@ -572,15 +576,15 @@ replay_finish( struct copy_context *copy )
|
||||
*/
|
||||
for (i = 0; i < copy->nr_varying; i++) {
|
||||
struct gl_buffer_object *vbo = copy->varying[i].array->BufferObj;
|
||||
if (_mesa_is_bufferobj(vbo) && _mesa_bufferobj_mapped(vbo))
|
||||
ctx->Driver.UnmapBuffer(ctx, vbo);
|
||||
if (_mesa_is_bufferobj(vbo) && _mesa_bufferobj_mapped(vbo, MAP_INTERNAL))
|
||||
ctx->Driver.UnmapBuffer(ctx, vbo, MAP_INTERNAL);
|
||||
}
|
||||
|
||||
/* Unmap index buffer:
|
||||
*/
|
||||
if (_mesa_is_bufferobj(copy->ib->obj) &&
|
||||
_mesa_bufferobj_mapped(copy->ib->obj)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, copy->ib->obj);
|
||||
_mesa_bufferobj_mapped(copy->ib->obj, MAP_INTERNAL)) {
|
||||
ctx->Driver.UnmapBuffer(ctx, copy->ib->obj, MAP_INTERNAL);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user