i915/sync: Replace prefix 'intel_sync' -> 'intel_gl_sync'

This is the porting of corresponding patch for i965,
i.e. commit 2516d83 i965/sync: Replace prefix 'intel_sync' -> 'intel_gl_sync'

The only difference compared to i965 one is that intel_check_sync() was renamed
to intel_gl_check_sync() here, as it is more appropriate.

Here follows original commit message by Chad Versace:

"I'm about to implement DRI2_Fenc in intel_syncobj.c.  To prevent
madness, we need to prefix functions for GL_ARB_sync with 'gl' and
functions for DRI2_Fence with 'dri'. Otherwise, the file will become
a jumble of similiarly named functions.

For example:
    old-name:      intel_client_wait_sync()
    new-name:      intel_gl_client_wait_sync()
    soon-to-come:  intel_dri_client_wait_sync()

I wrote this renaming commit separately from the commit that implements
DRI2_Fence because I wanted the latter diff to be reviewable."

[Emil Velikov: rename the outstanding intel_sync instances]
Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
This commit is contained in:
Mauro Rossi
2016-07-14 05:33:15 +02:00
committed by Emil Velikov
parent 284795616a
commit 19fa29a592
2 changed files with 32 additions and 28 deletions

View File

@@ -104,13 +104,6 @@ extern void intelFallback(struct intel_context *intel, GLbitfield bit,
#endif #endif
#endif #endif
struct intel_sync_object {
struct gl_sync_object Base;
/** Batch associated with this sync object */
drm_intel_bo *bo;
};
struct intel_batchbuffer { struct intel_batchbuffer {
/** Current batchbuffer being queued up. */ /** Current batchbuffer being queued up. */
drm_intel_bo *bo; drm_intel_bo *bo;

View File

@@ -45,12 +45,19 @@
#include "intel_batchbuffer.h" #include "intel_batchbuffer.h"
#include "intel_reg.h" #include "intel_reg.h"
static struct gl_sync_object * struct intel_gl_sync_object {
intel_new_sync_object(struct gl_context *ctx, GLuint id) struct gl_sync_object Base;
{
struct intel_sync_object *sync;
sync = calloc(1, sizeof(struct intel_sync_object)); /** Batch associated with this sync object */
drm_intel_bo *bo;
};
static struct gl_sync_object *
intel_gl_new_sync_object(struct gl_context *ctx, GLuint id)
{
struct intel_gl_sync_object *sync;
sync = calloc(1, sizeof(*sync));
if (!sync) if (!sync)
return NULL; return NULL;
@@ -58,9 +65,9 @@ intel_new_sync_object(struct gl_context *ctx, GLuint id)
} }
static void static void
intel_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s) intel_gl_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
{ {
struct intel_sync_object *sync = (struct intel_sync_object *)s; struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo) if (sync->bo)
drm_intel_bo_unreference(sync->bo); drm_intel_bo_unreference(sync->bo);
@@ -69,11 +76,11 @@ intel_delete_sync_object(struct gl_context *ctx, struct gl_sync_object *s)
} }
static void static void
intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s, intel_gl_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLenum condition, GLbitfield flags) GLenum condition, GLbitfield flags)
{ {
struct intel_context *intel = intel_context(ctx); struct intel_context *intel = intel_context(ctx);
struct intel_sync_object *sync = (struct intel_sync_object *)s; struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE); assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
intel_batchbuffer_emit_mi_flush(intel); intel_batchbuffer_emit_mi_flush(intel);
@@ -84,10 +91,11 @@ intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
intel_flush(ctx); intel_flush(ctx);
} }
static void intel_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s, static void
intel_gl_client_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLbitfield flags, GLuint64 timeout) GLbitfield flags, GLuint64 timeout)
{ {
struct intel_sync_object *sync = (struct intel_sync_object *)s; struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo && drm_intel_gem_bo_wait(sync->bo, timeout) == 0) { if (sync->bo && drm_intel_gem_bo_wait(sync->bo, timeout) == 0) {
s->StatusFlag = 1; s->StatusFlag = 1;
@@ -101,14 +109,16 @@ static void intel_client_wait_sync(struct gl_context *ctx, struct gl_sync_object
* any batchbuffers coming after this waitsync will naturally not occur until * any batchbuffers coming after this waitsync will naturally not occur until
* the previous one is done. * the previous one is done.
*/ */
static void intel_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s, static void
intel_gl_server_wait_sync(struct gl_context *ctx, struct gl_sync_object *s,
GLbitfield flags, GLuint64 timeout) GLbitfield flags, GLuint64 timeout)
{ {
} }
static void intel_check_sync(struct gl_context *ctx, struct gl_sync_object *s) static void
intel_gl_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
{ {
struct intel_sync_object *sync = (struct intel_sync_object *)s; struct intel_gl_sync_object *sync = (struct intel_gl_sync_object *)s;
if (sync->bo && !drm_intel_bo_busy(sync->bo)) { if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
drm_intel_bo_unreference(sync->bo); drm_intel_bo_unreference(sync->bo);
@@ -117,12 +127,13 @@ static void intel_check_sync(struct gl_context *ctx, struct gl_sync_object *s)
} }
} }
void intel_init_syncobj_functions(struct dd_function_table *functions) void
intel_init_syncobj_functions(struct dd_function_table *functions)
{ {
functions->NewSyncObject = intel_new_sync_object; functions->NewSyncObject = intel_gl_new_sync_object;
functions->DeleteSyncObject = intel_delete_sync_object; functions->DeleteSyncObject = intel_gl_delete_sync_object;
functions->FenceSync = intel_fence_sync; functions->FenceSync = intel_gl_fence_sync;
functions->CheckSync = intel_check_sync; functions->CheckSync = intel_gl_check_sync;
functions->ClientWaitSync = intel_client_wait_sync; functions->ClientWaitSync = intel_gl_client_wait_sync;
functions->ServerWaitSync = intel_server_wait_sync; functions->ServerWaitSync = intel_gl_server_wait_sync;
} }