[intel] Fix build for GEM. TTM is now disabled, and fencing is gone.
Fencing was used in two places: ensuring that we didn't get too many frames ahead of ourselves, and glFinish. glFinish will be satisfied by waiting on buffers like we would do for CPU access on them. The "don't get too far ahead" is now the responsibility of the execution manager (kernel).
This commit is contained in:
@@ -90,27 +90,6 @@ dri_bo_unmap(dri_bo *buf)
|
||||
return buf->bufmgr->bo_unmap(buf);
|
||||
}
|
||||
|
||||
void
|
||||
dri_fence_wait(dri_fence *fence)
|
||||
{
|
||||
fence->bufmgr->fence_wait(fence);
|
||||
}
|
||||
|
||||
void
|
||||
dri_fence_reference(dri_fence *fence)
|
||||
{
|
||||
fence->bufmgr->fence_reference(fence);
|
||||
}
|
||||
|
||||
void
|
||||
dri_fence_unreference(dri_fence *fence)
|
||||
{
|
||||
if (fence == NULL)
|
||||
return;
|
||||
|
||||
fence->bufmgr->fence_unreference(fence);
|
||||
}
|
||||
|
||||
void
|
||||
dri_bo_subdata(dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data)
|
||||
@@ -153,9 +132,9 @@ void *dri_process_relocs(dri_bo *batch_buf)
|
||||
return batch_buf->bufmgr->process_relocs(batch_buf);
|
||||
}
|
||||
|
||||
void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
|
||||
void dri_post_submit(dri_bo *batch_buf)
|
||||
{
|
||||
batch_buf->bufmgr->post_submit(batch_buf, last_fence);
|
||||
batch_buf->bufmgr->post_submit(batch_buf);
|
||||
}
|
||||
|
||||
void
|
||||
|
@@ -38,7 +38,6 @@
|
||||
|
||||
typedef struct _dri_bufmgr dri_bufmgr;
|
||||
typedef struct _dri_bo dri_bo;
|
||||
typedef struct _dri_fence dri_fence;
|
||||
|
||||
struct _dri_bo {
|
||||
/**
|
||||
@@ -61,18 +60,6 @@ struct _dri_bo {
|
||||
dri_bufmgr *bufmgr;
|
||||
};
|
||||
|
||||
struct _dri_fence {
|
||||
/**
|
||||
* This is an ORed mask of DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE, and
|
||||
* DRM_FLAG_EXE indicating the operations associated with this fence.
|
||||
*
|
||||
* It is constant for the life of the fence object.
|
||||
*/
|
||||
unsigned int type;
|
||||
/** Buffer manager context associated with this fence */
|
||||
dri_bufmgr *bufmgr;
|
||||
};
|
||||
|
||||
/**
|
||||
* Context for a buffer manager instance.
|
||||
*
|
||||
@@ -113,28 +100,15 @@ struct _dri_bufmgr {
|
||||
/**
|
||||
* Maps the buffer into userspace.
|
||||
*
|
||||
* This function will block waiting for any existing fence on the buffer to
|
||||
* clear, first. The resulting mapping is available at buf->virtual.
|
||||
\ */
|
||||
* This function will block waiting for any existing execution on the
|
||||
* buffer to complete, first. The resulting mapping is available at
|
||||
* buf->virtual.
|
||||
*/
|
||||
int (*bo_map)(dri_bo *buf, GLboolean write_enable);
|
||||
|
||||
/** Reduces the refcount on the userspace mapping of the buffer object. */
|
||||
int (*bo_unmap)(dri_bo *buf);
|
||||
|
||||
/** Takes a reference on a fence object */
|
||||
void (*fence_reference)(dri_fence *fence);
|
||||
|
||||
/**
|
||||
* Releases a reference on a fence object, freeing the data if
|
||||
* rerefences remain.
|
||||
*/
|
||||
void (*fence_unreference)(dri_fence *fence);
|
||||
|
||||
/**
|
||||
* Blocks until the given fence is signaled.
|
||||
*/
|
||||
void (*fence_wait)(dri_fence *fence);
|
||||
|
||||
/**
|
||||
* Tears down the buffer manager instance.
|
||||
*/
|
||||
@@ -179,7 +153,7 @@ struct _dri_bufmgr {
|
||||
*/
|
||||
void *(*process_relocs)(dri_bo *batch_buf);
|
||||
|
||||
void (*post_submit)(dri_bo *batch_buf, dri_fence **fence);
|
||||
void (*post_submit)(dri_bo *batch_buf);
|
||||
|
||||
int (*check_aperture_space)(dri_bo *bo);
|
||||
GLboolean debug; /**< Enables verbose debugging printouts */
|
||||
@@ -194,9 +168,6 @@ void dri_bo_reference(dri_bo *bo);
|
||||
void dri_bo_unreference(dri_bo *bo);
|
||||
int dri_bo_map(dri_bo *buf, GLboolean write_enable);
|
||||
int dri_bo_unmap(dri_bo *buf);
|
||||
void dri_fence_wait(dri_fence *fence);
|
||||
void dri_fence_reference(dri_fence *fence);
|
||||
void dri_fence_unreference(dri_fence *fence);
|
||||
|
||||
void dri_bo_subdata(dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data);
|
||||
@@ -221,7 +192,7 @@ int dri_emit_reloc(dri_bo *reloc_buf, uint64_t flags, GLuint delta,
|
||||
GLuint offset, dri_bo *target_buf);
|
||||
void *dri_process_relocs(dri_bo *batch_buf);
|
||||
void dri_post_process_relocs(dri_bo *batch_buf);
|
||||
void dri_post_submit(dri_bo *batch_buf, dri_fence **last_fence);
|
||||
void dri_post_submit(dri_bo *batch_buf);
|
||||
int dri_bufmgr_check_aperture_space(dri_bo *bo);
|
||||
|
||||
#endif
|
||||
|
@@ -170,15 +170,6 @@ typedef struct _dri_bo_fake {
|
||||
void *invalidate_ptr;
|
||||
} dri_bo_fake;
|
||||
|
||||
typedef struct _dri_fence_fake {
|
||||
dri_fence fence;
|
||||
|
||||
const char *name;
|
||||
unsigned int refcount;
|
||||
unsigned int fence_cookie;
|
||||
GLboolean flushed;
|
||||
} dri_fence_fake;
|
||||
|
||||
static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
|
||||
unsigned int fence_cookie);
|
||||
|
||||
@@ -898,63 +889,16 @@ dri_fake_bo_validate(dri_bo *bo, uint64_t flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dri_fence *
|
||||
dri_fake_fence_validated(dri_bufmgr *bufmgr, const char *name,
|
||||
GLboolean flushed)
|
||||
static void
|
||||
dri_fake_fence_validated(dri_bufmgr *bufmgr)
|
||||
{
|
||||
dri_fence_fake *fence_fake;
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
unsigned int cookie;
|
||||
|
||||
fence_fake = malloc(sizeof(*fence_fake));
|
||||
if (!fence_fake)
|
||||
return NULL;
|
||||
|
||||
fence_fake->refcount = 1;
|
||||
fence_fake->name = name;
|
||||
fence_fake->flushed = flushed;
|
||||
fence_fake->fence.bufmgr = bufmgr;
|
||||
|
||||
cookie = _fence_emit_internal(bufmgr_fake);
|
||||
fence_fake->fence_cookie = cookie;
|
||||
fence_blocks(bufmgr_fake, cookie);
|
||||
|
||||
DBG("drm_fence_validated: 0x%08x cookie\n", fence_fake->fence_cookie);
|
||||
|
||||
return &fence_fake->fence;
|
||||
}
|
||||
|
||||
static void
|
||||
dri_fake_fence_reference(dri_fence *fence)
|
||||
{
|
||||
dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
|
||||
|
||||
++fence_fake->refcount;
|
||||
}
|
||||
|
||||
static void
|
||||
dri_fake_fence_unreference(dri_fence *fence)
|
||||
{
|
||||
dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
|
||||
if (--fence_fake->refcount == 0) {
|
||||
free(fence);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dri_fake_fence_wait(dri_fence *fence)
|
||||
{
|
||||
dri_fence_fake *fence_fake = (dri_fence_fake *)fence;
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)fence->bufmgr;
|
||||
|
||||
DBG("drm_fence_wait: 0x%08x cookie\n", fence_fake->fence_cookie);
|
||||
|
||||
_fence_wait_internal(bufmgr_fake, fence_fake->fence_cookie);
|
||||
DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1156,19 +1100,9 @@ dri_bo_fake_post_submit(dri_bo *bo)
|
||||
|
||||
|
||||
static void
|
||||
dri_fake_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
|
||||
dri_fake_post_submit(dri_bo *batch_buf)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
|
||||
dri_fence *fo;
|
||||
|
||||
fo = dri_fake_fence_validated(batch_buf->bufmgr, "Batch fence", GL_TRUE);
|
||||
|
||||
if (bufmgr_fake->performed_rendering) {
|
||||
dri_fence_unreference(*last_fence);
|
||||
*last_fence = fo;
|
||||
} else {
|
||||
dri_fence_unreference(fo);
|
||||
}
|
||||
dri_fake_fence_validated(batch_buf->bufmgr);
|
||||
|
||||
dri_bo_fake_post_submit(batch_buf);
|
||||
}
|
||||
@@ -1224,9 +1158,6 @@ dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
|
||||
bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
|
||||
bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
|
||||
bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
|
||||
bufmgr_fake->bufmgr.fence_wait = dri_fake_fence_wait;
|
||||
bufmgr_fake->bufmgr.fence_reference = dri_fake_fence_reference;
|
||||
bufmgr_fake->bufmgr.fence_unreference = dri_fake_fence_unreference;
|
||||
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
|
||||
bufmgr_fake->bufmgr.emit_reloc = dri_fake_emit_reloc;
|
||||
bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
|
||||
|
@@ -52,7 +52,7 @@ DRIVER_SOURCES = \
|
||||
intel_tris.c \
|
||||
intel_fbo.c \
|
||||
intel_depthstencil.c \
|
||||
intel_bufmgr_ttm.c
|
||||
intel_bufmgr_gem.c
|
||||
|
||||
C_SOURCES = \
|
||||
$(COMMON_SOURCES) \
|
||||
|
@@ -99,7 +99,6 @@ intel_batchbuffer_alloc(struct intel_context *intel)
|
||||
struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
|
||||
|
||||
batch->intel = intel;
|
||||
batch->last_fence = NULL;
|
||||
intel_batchbuffer_reset(batch);
|
||||
|
||||
return batch;
|
||||
@@ -108,11 +107,6 @@ intel_batchbuffer_alloc(struct intel_context *intel)
|
||||
void
|
||||
intel_batchbuffer_free(struct intel_batchbuffer *batch)
|
||||
{
|
||||
if (batch->last_fence) {
|
||||
dri_fence_wait(batch->last_fence);
|
||||
dri_fence_unreference(batch->last_fence);
|
||||
batch->last_fence = NULL;
|
||||
}
|
||||
if (batch->map) {
|
||||
dri_bo_unmap(batch->buf);
|
||||
batch->map = NULL;
|
||||
@@ -152,7 +146,7 @@ do_flush_locked(struct intel_batchbuffer *batch,
|
||||
used,
|
||||
batch->cliprect_mode != LOOP_CLIPRECTS,
|
||||
allow_unlock,
|
||||
execbuf, &batch->last_fence);
|
||||
execbuf);
|
||||
} else {
|
||||
dri_process_relocs(batch->buf);
|
||||
intel_batch_ioctl(batch->intel,
|
||||
@@ -163,7 +157,7 @@ do_flush_locked(struct intel_batchbuffer *batch,
|
||||
}
|
||||
}
|
||||
|
||||
dri_post_submit(batch->buf, &batch->last_fence);
|
||||
dri_post_submit(batch->buf);
|
||||
|
||||
if (intel->numClipRects == 0 &&
|
||||
batch->cliprect_mode == LOOP_CLIPRECTS) {
|
||||
@@ -243,9 +237,13 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
|
||||
UNLOCK_HARDWARE(intel);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SYNC) {
|
||||
int irq;
|
||||
|
||||
fprintf(stderr, "waiting for idle\n");
|
||||
if (batch->last_fence != NULL)
|
||||
dri_fence_wait(batch->last_fence);
|
||||
LOCK_HARDWARE(intel);
|
||||
irq = intelEmitIrqLocked(intel);
|
||||
UNLOCK_HARDWARE(intel);
|
||||
intelWaitIrq(intel, irq);
|
||||
}
|
||||
|
||||
/* Reset the buffer:
|
||||
@@ -253,14 +251,6 @@ _intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
|
||||
intel_batchbuffer_reset(batch);
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_finish(struct intel_batchbuffer *batch)
|
||||
{
|
||||
intel_batchbuffer_flush(batch);
|
||||
if (batch->last_fence != NULL)
|
||||
dri_fence_wait(batch->last_fence);
|
||||
}
|
||||
|
||||
|
||||
/* This is the only way buffers get added to the validate list.
|
||||
*/
|
||||
|
@@ -40,7 +40,6 @@ struct intel_batchbuffer
|
||||
struct intel_context *intel;
|
||||
|
||||
dri_bo *buf;
|
||||
dri_fence *last_fence;
|
||||
|
||||
GLubyte *map;
|
||||
GLubyte *ptr;
|
||||
@@ -58,8 +57,6 @@ struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
|
||||
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
|
||||
|
||||
|
||||
void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
|
||||
|
||||
void _intel_batchbuffer_flush(struct intel_batchbuffer *batch,
|
||||
const char *file, int line);
|
||||
|
||||
|
@@ -66,14 +66,6 @@ intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
|
||||
|
||||
intelScreen = intel->intelScreen;
|
||||
|
||||
if (intel->last_swap_fence) {
|
||||
dri_fence_wait(intel->last_swap_fence);
|
||||
dri_fence_unreference(intel->last_swap_fence);
|
||||
intel->last_swap_fence = NULL;
|
||||
}
|
||||
intel->last_swap_fence = intel->first_swap_fence;
|
||||
intel->first_swap_fence = NULL;
|
||||
|
||||
/* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
|
||||
* should work regardless.
|
||||
*/
|
||||
@@ -163,12 +155,7 @@ intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
|
||||
ADVANCE_BATCH();
|
||||
}
|
||||
|
||||
if (intel->first_swap_fence)
|
||||
dri_fence_unreference(intel->first_swap_fence);
|
||||
intel_batchbuffer_flush(intel->batch);
|
||||
intel->first_swap_fence = intel->batch->last_fence;
|
||||
if (intel->first_swap_fence)
|
||||
dri_fence_reference(intel->first_swap_fence);
|
||||
}
|
||||
|
||||
UNLOCK_HARDWARE(intel);
|
||||
|
@@ -127,15 +127,6 @@ typedef struct _dri_bo_gem {
|
||||
void *virtual;
|
||||
} dri_bo_gem;
|
||||
|
||||
typedef struct _dri_fence_gem
|
||||
{
|
||||
dri_fence fence;
|
||||
|
||||
int refcount;
|
||||
const char *name;
|
||||
drmFence drm_fence;
|
||||
} dri_fence_gem;
|
||||
|
||||
static int
|
||||
logbase2(int n)
|
||||
{
|
||||
@@ -526,58 +517,6 @@ dri_gem_bo_unmap(dri_bo *bo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
dri_gem_fence_reference(dri_fence *fence)
|
||||
{
|
||||
dri_fence_gem *fence_gem = (dri_fence_gem *)fence;
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)fence->bufmgr;
|
||||
|
||||
++fence_gem->refcount;
|
||||
DBG("fence_reference: %p (%s)\n", &fence_gem->fence, fence_gem->name);
|
||||
}
|
||||
|
||||
static void
|
||||
dri_gem_fence_unreference(dri_fence *fence)
|
||||
{
|
||||
dri_fence_gem *fence_gem = (dri_fence_gem *)fence;
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)fence->bufmgr;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
|
||||
DBG("fence_unreference: %p (%s)\n", &fence_gem->fence, fence_gem->name);
|
||||
|
||||
if (--fence_gem->refcount == 0) {
|
||||
int ret;
|
||||
|
||||
ret = drmFenceUnreference(bufmgr_gem->fd, &fence_gem->drm_fence);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
|
||||
fence_gem->name, strerror(-ret));
|
||||
}
|
||||
|
||||
free(fence);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dri_gem_fence_wait(dri_fence *fence)
|
||||
{
|
||||
dri_fence_gem *fence_gem = (dri_fence_gem *)fence;
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)fence->bufmgr;
|
||||
int ret;
|
||||
|
||||
ret = drmFenceWait(bufmgr_gem->fd, DRM_FENCE_FLAG_WAIT_LAZY, &fence_gem->drm_fence, 0);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "%s:%d: Error waiting for fence %s: %s.\n",
|
||||
__FILE__, __LINE__, fence_gem->name, strerror(-ret));
|
||||
abort();
|
||||
}
|
||||
|
||||
DBG("fence_wait: %p (%s)\n", &fence_gem->fence, fence_gem->name);
|
||||
}
|
||||
|
||||
static void
|
||||
dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
|
||||
{
|
||||
@@ -717,7 +656,7 @@ intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
|
||||
}
|
||||
|
||||
static void
|
||||
dri_gem_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
|
||||
dri_gem_post_submit(dri_bo *batch_buf)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
|
||||
int i;
|
||||
@@ -773,9 +712,6 @@ dri_gem_check_aperture_space(dri_bo *bo)
|
||||
* and manage map buffer objections.
|
||||
*
|
||||
* \param fd File descriptor of the opened DRM device.
|
||||
* \param fence_type Driver-specific fence type used for fences with no flush.
|
||||
* \param fence_type_flush Driver-specific fence type used for fences with a
|
||||
* flush.
|
||||
*/
|
||||
dri_bufmgr *
|
||||
intel_bufmgr_gem_init(int fd, int batch_size)
|
||||
@@ -800,9 +736,6 @@ intel_bufmgr_gem_init(int fd, int batch_size)
|
||||
bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
|
||||
bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
|
||||
bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
|
||||
bufmgr_gem->bufmgr.fence_reference = dri_gem_fence_reference;
|
||||
bufmgr_gem->bufmgr.fence_unreference = dri_gem_fence_unreference;
|
||||
bufmgr_gem->bufmgr.fence_wait = dri_gem_fence_wait;
|
||||
bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
|
||||
bufmgr_gem->bufmgr.emit_reloc = dri_gem_emit_reloc;
|
||||
bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
|
||||
|
@@ -4,13 +4,10 @@
|
||||
|
||||
#include "dri_bufmgr.h"
|
||||
|
||||
extern dri_bo *intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
|
||||
extern dri_bo *intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned int handle);
|
||||
|
||||
dri_fence *intel_gem_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
|
||||
drm_fence_arg_t *arg);
|
||||
|
||||
|
||||
dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
|
||||
|
||||
void
|
||||
|
@@ -59,7 +59,7 @@
|
||||
#include "intel_buffer_objects.h"
|
||||
#include "intel_fbo.h"
|
||||
#include "intel_decode.h"
|
||||
#include "intel_bufmgr_ttm.h"
|
||||
#include "intel_bufmgr_gem.h"
|
||||
|
||||
#include "drirenderbuffer.h"
|
||||
#include "vblank.h"
|
||||
@@ -368,12 +368,16 @@ intelFlush(GLcontext * ctx)
|
||||
void
|
||||
intelFinish(GLcontext * ctx)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct gl_framebuffer *fb = ctx->DrawBuffer;
|
||||
int i;
|
||||
|
||||
intelFlush(ctx);
|
||||
if (intel->batch->last_fence) {
|
||||
dri_fence_wait(intel->batch->last_fence);
|
||||
dri_fence_unreference(intel->batch->last_fence);
|
||||
intel->batch->last_fence = NULL;
|
||||
|
||||
for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
|
||||
/* XXX: Wait on buffer idle */
|
||||
}
|
||||
if (fb->_DepthBuffer) {
|
||||
/* XXX: Wait on buffer idle */
|
||||
}
|
||||
}
|
||||
|
||||
@@ -439,28 +443,25 @@ static GLboolean
|
||||
intel_init_bufmgr(struct intel_context *intel)
|
||||
{
|
||||
intelScreenPrivate *intelScreen = intel->intelScreen;
|
||||
GLboolean ttm_disable = getenv("INTEL_NO_TTM") != NULL;
|
||||
GLboolean ttm_supported;
|
||||
GLboolean gem_disable = getenv("INTEL_NO_GEM") != NULL;
|
||||
GLboolean gem_supported;
|
||||
|
||||
/* If we've got a new enough DDX that's initializing TTM and giving us
|
||||
/* If we've got a new enough DDX that's initializing GEM and giving us
|
||||
* object handles for the shared buffers, use that.
|
||||
*/
|
||||
intel->ttm = GL_FALSE;
|
||||
if (intel->intelScreen->driScrnPriv->dri2.enabled)
|
||||
ttm_supported = GL_TRUE;
|
||||
gem_supported = GL_TRUE;
|
||||
else if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
|
||||
intel->intelScreen->drmMinor >= 11 &&
|
||||
intel->intelScreen->front.bo_handle != -1)
|
||||
ttm_supported = GL_TRUE;
|
||||
gem_supported = GL_TRUE;
|
||||
else
|
||||
ttm_supported = GL_FALSE;
|
||||
gem_supported = GL_FALSE;
|
||||
|
||||
if (!ttm_disable && ttm_supported) {
|
||||
if (!gem_disable && gem_supported) {
|
||||
int bo_reuse_mode;
|
||||
intel->bufmgr = intel_bufmgr_ttm_init(intel->driFd,
|
||||
DRM_FENCE_TYPE_EXE,
|
||||
DRM_FENCE_TYPE_EXE |
|
||||
DRM_I915_FENCE_TYPE_RW,
|
||||
intel->bufmgr = intel_bufmgr_gem_init(intel->driFd,
|
||||
BATCH_SZ);
|
||||
if (intel->bufmgr != NULL)
|
||||
intel->ttm = GL_TRUE;
|
||||
@@ -470,16 +471,16 @@ intel_init_bufmgr(struct intel_context *intel)
|
||||
case DRI_CONF_BO_REUSE_DISABLED:
|
||||
break;
|
||||
case DRI_CONF_BO_REUSE_ALL:
|
||||
intel_ttm_enable_bo_reuse(intel->bufmgr);
|
||||
intel_gem_enable_bo_reuse(intel->bufmgr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Otherwise, use the classic buffer manager. */
|
||||
if (intel->bufmgr == NULL) {
|
||||
if (ttm_disable) {
|
||||
fprintf(stderr, "TTM buffer manager disabled. Using classic.\n");
|
||||
if (gem_disable) {
|
||||
fprintf(stderr, "GEM disabled. Using classic.\n");
|
||||
} else {
|
||||
fprintf(stderr, "Failed to initialize TTM buffer manager. "
|
||||
fprintf(stderr, "Failed to initialize GEM. "
|
||||
"Falling back to classic.\n");
|
||||
}
|
||||
|
||||
@@ -663,8 +664,6 @@ intelInitContext(struct intel_context *intel,
|
||||
intel_recreate_static_regions(intel);
|
||||
|
||||
intel->batch = intel_batchbuffer_alloc(intel);
|
||||
intel->last_swap_fence = NULL;
|
||||
intel->first_swap_fence = NULL;
|
||||
|
||||
intel_bufferobj_init(intel);
|
||||
intel_fbo_init(intel);
|
||||
@@ -718,17 +717,6 @@ intelDestroyContext(__DRIcontextPrivate * driContextPriv)
|
||||
|
||||
intel_batchbuffer_free(intel->batch);
|
||||
|
||||
if (intel->last_swap_fence) {
|
||||
dri_fence_wait(intel->last_swap_fence);
|
||||
dri_fence_unreference(intel->last_swap_fence);
|
||||
intel->last_swap_fence = NULL;
|
||||
}
|
||||
if (intel->first_swap_fence) {
|
||||
dri_fence_wait(intel->first_swap_fence);
|
||||
dri_fence_unreference(intel->first_swap_fence);
|
||||
intel->first_swap_fence = NULL;
|
||||
}
|
||||
|
||||
if (release_texture_heaps) {
|
||||
/* This share group is about to go away, free our private
|
||||
* texture object data.
|
||||
|
@@ -174,9 +174,6 @@ struct intel_context
|
||||
*/
|
||||
GLboolean ttm;
|
||||
|
||||
dri_fence *last_swap_fence;
|
||||
dri_fence *first_swap_fence;
|
||||
|
||||
struct intel_batchbuffer *batch;
|
||||
GLboolean no_batch_wrap;
|
||||
unsigned batch_id;
|
||||
|
@@ -43,7 +43,7 @@
|
||||
#include "drm.h"
|
||||
#include "i915_drm.h"
|
||||
|
||||
#include "intel_bufmgr_ttm.h"
|
||||
#include "intel_bufmgr_gem.h"
|
||||
|
||||
#define FILE_DEBUG_FLAG DEBUG_IOCTL
|
||||
|
||||
@@ -151,10 +151,8 @@ void
|
||||
intel_exec_ioctl(struct intel_context *intel,
|
||||
GLuint used,
|
||||
GLboolean ignore_cliprects, GLboolean allow_unlock,
|
||||
struct drm_i915_gem_execbuffer *execbuf,
|
||||
dri_fence **fence)
|
||||
struct drm_i915_gem_execbuffer *execbuf)
|
||||
{
|
||||
dri_fence *fo;
|
||||
int ret;
|
||||
|
||||
assert(intel->locked);
|
||||
@@ -163,10 +161,6 @@ intel_exec_ioctl(struct intel_context *intel,
|
||||
if (intel->no_hw)
|
||||
return;
|
||||
|
||||
if (*fence) {
|
||||
dri_fence_unreference(*fence);
|
||||
}
|
||||
|
||||
memset(&execbuf, 0, sizeof(execbuf));
|
||||
|
||||
execbuf->batch_start_offset = 0;
|
||||
@@ -187,13 +181,4 @@ intel_exec_ioctl(struct intel_context *intel,
|
||||
UNLOCK_HARDWARE(intel);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
fo = intel_ttm_fence_create_from_arg(intel->bufmgr, "fence buffers",
|
||||
&execbuf.fence_arg);
|
||||
if (!fo) {
|
||||
fprintf(stderr, "failed to fence handle: %08x\n", execbuf.fence_arg.handle);
|
||||
UNLOCK_HARDWARE(intel);
|
||||
exit(1);
|
||||
}
|
||||
*fence = fo;
|
||||
}
|
||||
|
@@ -41,7 +41,6 @@ void intel_batch_ioctl( struct intel_context *intel,
|
||||
void intel_exec_ioctl(struct intel_context *intel,
|
||||
GLuint used,
|
||||
GLboolean ignore_cliprects, GLboolean allow_unlock,
|
||||
struct drm_i915_gem_execbuffer *execbuf,
|
||||
dri_fence **fence);
|
||||
struct drm_i915_gem_execbuffer *execbuf);
|
||||
|
||||
#endif
|
||||
|
@@ -44,7 +44,7 @@
|
||||
#include "intel_blit.h"
|
||||
#include "intel_buffer_objects.h"
|
||||
#include "dri_bufmgr.h"
|
||||
#include "intel_bufmgr_ttm.h"
|
||||
#include "intel_bufmgr_gem.h"
|
||||
#include "intel_batchbuffer.h"
|
||||
|
||||
#define FILE_DEBUG_FLAG DEBUG_REGION
|
||||
@@ -121,7 +121,7 @@ intel_region_alloc_for_handle(struct intel_context *intel,
|
||||
{
|
||||
dri_bo *buffer;
|
||||
|
||||
buffer = intel_ttm_bo_create_from_handle(intel->bufmgr, "region", handle);
|
||||
buffer = intel_gem_bo_create_from_handle(intel->bufmgr, "region", handle);
|
||||
|
||||
return intel_region_alloc_internal(intel,
|
||||
cpp, pitch, height, tiled, buffer);
|
||||
@@ -440,7 +440,7 @@ intel_recreate_static(struct intel_context *intel,
|
||||
|
||||
if (intel->ttm) {
|
||||
assert(region_desc->bo_handle != -1);
|
||||
region->buffer = intel_ttm_bo_create_from_handle(intel->bufmgr,
|
||||
region->buffer = intel_gem_bo_create_from_handle(intel->bufmgr,
|
||||
name,
|
||||
region_desc->bo_handle);
|
||||
} else {
|
||||
|
@@ -49,7 +49,7 @@
|
||||
#include "i830_dri.h"
|
||||
#include "intel_regions.h"
|
||||
#include "intel_batchbuffer.h"
|
||||
#include "intel_bufmgr_ttm.h"
|
||||
#include "intel_bufmgr_gem.h"
|
||||
|
||||
PUBLIC const char __driConfigOptions[] =
|
||||
DRI_CONF_BEGIN
|
||||
|
Reference in New Issue
Block a user