radeon/r200: make radeon_context subclass of gl_context
radeon_context now contains a gl_context, rather than a pointer to one. This will allow some minor core Mesa clean-up.
This commit is contained in:
@@ -525,7 +525,7 @@ unsigned r200_blit(struct gl_context *ctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Flush is needed to make sure that source buffer has correct data */
|
/* Flush is needed to make sure that source buffer has correct data */
|
||||||
radeonFlush(r200->radeon.glCtx);
|
radeonFlush(&r200->radeon.glCtx);
|
||||||
|
|
||||||
rcommonEnsureCmdBufSpace(&r200->radeon, 102, __FUNCTION__);
|
rcommonEnsureCmdBufSpace(&r200->radeon, 102, __FUNCTION__);
|
||||||
|
|
||||||
|
@@ -56,7 +56,7 @@ void r200SetUpAtomList( r200ContextPtr rmesa )
|
|||||||
{
|
{
|
||||||
int i, mtu;
|
int i, mtu;
|
||||||
|
|
||||||
mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
|
mtu = rmesa->radeon.glCtx.Const.MaxTextureUnits;
|
||||||
|
|
||||||
make_empty_list(&rmesa->radeon.hw.atomlist);
|
make_empty_list(&rmesa->radeon.hw.atomlist);
|
||||||
rmesa->radeon.hw.atomlist.name = "atom-list";
|
rmesa->radeon.hw.atomlist.name = "atom-list";
|
||||||
@@ -201,7 +201,7 @@ GLushort *r200AllocEltsOpenEnded( r200ContextPtr rmesa,
|
|||||||
retval = rmesa->radeon.tcl.elt_dma_bo->ptr + rmesa->radeon.tcl.elt_dma_offset;
|
retval = rmesa->radeon.tcl.elt_dma_bo->ptr + rmesa->radeon.tcl.elt_dma_offset;
|
||||||
|
|
||||||
assert(!rmesa->radeon.dma.flush);
|
assert(!rmesa->radeon.dma.flush);
|
||||||
rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
rmesa->radeon.glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
||||||
rmesa->radeon.dma.flush = r200FlushElts;
|
rmesa->radeon.dma.flush = r200FlushElts;
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
|
@@ -295,7 +295,7 @@ GLboolean r200CreateContext( gl_api api,
|
|||||||
* setting allow larger textures.
|
* setting allow larger textures.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ctx = rmesa->radeon.glCtx;
|
ctx = &rmesa->radeon.glCtx;
|
||||||
ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
|
ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
|
||||||
"texture_units");
|
"texture_units");
|
||||||
ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
|
ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
|
||||||
@@ -406,7 +406,7 @@ GLboolean r200CreateContext( gl_api api,
|
|||||||
others get the bit ordering right but don't actually do YUV-RGB conversion */
|
others get the bit ordering right but don't actually do YUV-RGB conversion */
|
||||||
ctx->Extensions.MESA_ycbcr_texture = true;
|
ctx->Extensions.MESA_ycbcr_texture = true;
|
||||||
}
|
}
|
||||||
if (rmesa->radeon.glCtx->Mesa_DXTn) {
|
if (rmesa->radeon.glCtx.Mesa_DXTn) {
|
||||||
ctx->Extensions.EXT_texture_compression_s3tc = true;
|
ctx->Extensions.EXT_texture_compression_s3tc = true;
|
||||||
ctx->Extensions.S3_s3tc = true;
|
ctx->Extensions.S3_s3tc = true;
|
||||||
}
|
}
|
||||||
@@ -458,7 +458,7 @@ GLboolean r200CreateContext( gl_api api,
|
|||||||
rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
|
rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
|
||||||
fprintf(stderr, "Disabling HW TCL support\n");
|
fprintf(stderr, "Disabling HW TCL support\n");
|
||||||
}
|
}
|
||||||
TCL_FALLBACK(rmesa->radeon.glCtx, R200_TCL_FALLBACK_TCL_DISABLE, 1);
|
TCL_FALLBACK(&rmesa->radeon.glCtx, R200_TCL_FALLBACK_TCL_DISABLE, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
_mesa_compute_version(ctx);
|
_mesa_compute_version(ctx);
|
||||||
|
@@ -76,7 +76,7 @@ void r200SetUpAtomList( r200ContextPtr rmesa );
|
|||||||
#define R200_NEWPRIM( rmesa ) \
|
#define R200_NEWPRIM( rmesa ) \
|
||||||
do { \
|
do { \
|
||||||
if ( rmesa->radeon.dma.flush ) \
|
if ( rmesa->radeon.dma.flush ) \
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx ); \
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx ); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/* Can accomodate several state changes and primitive changes without
|
/* Can accomodate several state changes and primitive changes without
|
||||||
|
@@ -55,7 +55,7 @@ extern void r200Fallback( struct gl_context *ctx, GLuint bit, GLboolean mode );
|
|||||||
#define FALLBACK( rmesa, bit, mode ) do { \
|
#define FALLBACK( rmesa, bit, mode ) do { \
|
||||||
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
|
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
|
||||||
__FUNCTION__, bit, mode ); \
|
__FUNCTION__, bit, mode ); \
|
||||||
r200Fallback( rmesa->radeon.glCtx, bit, mode ); \
|
r200Fallback( &rmesa->radeon.glCtx, bit, mode ); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
extern void r200LightingSpaceChange( struct gl_context *ctx );
|
extern void r200LightingSpaceChange( struct gl_context *ctx );
|
||||||
|
@@ -617,7 +617,7 @@ static void cube_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
|
|||||||
*/
|
*/
|
||||||
void r200InitState( r200ContextPtr rmesa )
|
void r200InitState( r200ContextPtr rmesa )
|
||||||
{
|
{
|
||||||
struct gl_context *ctx = rmesa->radeon.glCtx;
|
struct gl_context *ctx = &rmesa->radeon.glCtx;
|
||||||
GLuint i;
|
GLuint i;
|
||||||
|
|
||||||
rmesa->radeon.Fallback = 0;
|
rmesa->radeon.Fallback = 0;
|
||||||
|
@@ -153,7 +153,7 @@ static GLushort *r200AllocElts( r200ContextPtr rmesa, GLuint nr )
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (rmesa->radeon.dma.flush)
|
if (rmesa->radeon.dma.flush)
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
|
||||||
|
|
||||||
r200EmitAOS( rmesa,
|
r200EmitAOS( rmesa,
|
||||||
rmesa->radeon.tcl.aos_count, 0 );
|
rmesa->radeon.tcl.aos_count, 0 );
|
||||||
@@ -312,7 +312,7 @@ static GLuint r200EnsureEmitSize( struct gl_context * ctx , GLubyte* vimap_rev )
|
|||||||
state_size = radeonCountStateEmitSize( &rmesa->radeon );
|
state_size = radeonCountStateEmitSize( &rmesa->radeon );
|
||||||
/* vtx may be changed in r200EmitArrays so account for it if not dirty */
|
/* vtx may be changed in r200EmitArrays so account for it if not dirty */
|
||||||
if (!rmesa->hw.vtx.dirty)
|
if (!rmesa->hw.vtx.dirty)
|
||||||
state_size += rmesa->hw.vtx.check(rmesa->radeon.glCtx, &rmesa->hw.vtx);
|
state_size += rmesa->hw.vtx.check(&rmesa->radeon.glCtx, &rmesa->hw.vtx);
|
||||||
/* predict size for elements */
|
/* predict size for elements */
|
||||||
for (i = 0; i < VB->PrimitiveCount; ++i)
|
for (i = 0; i < VB->PrimitiveCount; ++i)
|
||||||
{
|
{
|
||||||
@@ -546,7 +546,7 @@ static void transition_to_hwtnl( struct gl_context *ctx )
|
|||||||
tnl->Driver.NotifyMaterialChange = r200UpdateMaterial;
|
tnl->Driver.NotifyMaterialChange = r200UpdateMaterial;
|
||||||
|
|
||||||
if ( rmesa->radeon.dma.flush )
|
if ( rmesa->radeon.dma.flush )
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
|
||||||
|
|
||||||
rmesa->radeon.dma.flush = NULL;
|
rmesa->radeon.dma.flush = NULL;
|
||||||
|
|
||||||
@@ -613,7 +613,7 @@ void r200TclFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
|
|||||||
if (oldfallback == 0) {
|
if (oldfallback == 0) {
|
||||||
/* We have to flush before transition */
|
/* We have to flush before transition */
|
||||||
if ( rmesa->radeon.dma.flush )
|
if ( rmesa->radeon.dma.flush )
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
|
||||||
|
|
||||||
if (R200_DEBUG & RADEON_FALLBACKS)
|
if (R200_DEBUG & RADEON_FALLBACKS)
|
||||||
fprintf(stderr, "R200 begin tcl fallback %s\n",
|
fprintf(stderr, "R200 begin tcl fallback %s\n",
|
||||||
@@ -626,7 +626,7 @@ void r200TclFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
|
|||||||
if (oldfallback == bit) {
|
if (oldfallback == bit) {
|
||||||
/* We have to flush before transition */
|
/* We have to flush before transition */
|
||||||
if ( rmesa->radeon.dma.flush )
|
if ( rmesa->radeon.dma.flush )
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
|
||||||
|
|
||||||
if (R200_DEBUG & RADEON_FALLBACKS)
|
if (R200_DEBUG & RADEON_FALLBACKS)
|
||||||
fprintf(stderr, "R200 end tcl fallback %s\n",
|
fprintf(stderr, "R200 end tcl fallback %s\n",
|
||||||
|
@@ -422,7 +422,7 @@ static void r200DeleteTexture(struct gl_context * ctx, struct gl_texture_object
|
|||||||
if (rmesa) {
|
if (rmesa) {
|
||||||
int i;
|
int i;
|
||||||
radeon_firevertices(&rmesa->radeon);
|
radeon_firevertices(&rmesa->radeon);
|
||||||
for ( i = 0 ; i < rmesa->radeon.glCtx->Const.MaxTextureUnits ; i++ ) {
|
for ( i = 0 ; i < rmesa->radeon.glCtx.Const.MaxTextureUnits ; i++ ) {
|
||||||
if ( t == rmesa->state.texture.unit[i].texobj ) {
|
if ( t == rmesa->state.texture.unit[i].texobj ) {
|
||||||
rmesa->state.texture.unit[i].texobj = NULL;
|
rmesa->state.texture.unit[i].texobj = NULL;
|
||||||
rmesa->hw.tex[i].dirty = GL_FALSE;
|
rmesa->hw.tex[i].dirty = GL_FALSE;
|
||||||
|
@@ -742,9 +742,9 @@ void r200SetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_format
|
|||||||
radeon = pDRICtx->driverPrivate;
|
radeon = pDRICtx->driverPrivate;
|
||||||
|
|
||||||
rfb = dPriv->driverPrivate;
|
rfb = dPriv->driverPrivate;
|
||||||
texUnit = &radeon->glCtx->Texture.Unit[radeon->glCtx->Texture.CurrentUnit];
|
texUnit = &radeon->glCtx.Texture.Unit[radeon->glCtx.Texture.CurrentUnit];
|
||||||
texObj = _mesa_select_tex_object(radeon->glCtx, texUnit, target);
|
texObj = _mesa_select_tex_object(&radeon->glCtx, texUnit, target);
|
||||||
texImage = _mesa_get_tex_image(radeon->glCtx, texObj, target, 0);
|
texImage = _mesa_get_tex_image(&radeon->glCtx, texObj, target, 0);
|
||||||
|
|
||||||
rImage = get_radeon_texture_image(texImage);
|
rImage = get_radeon_texture_image(texImage);
|
||||||
t = radeon_tex_obj(texObj);
|
t = radeon_tex_obj(texObj);
|
||||||
@@ -759,7 +759,7 @@ void r200SetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_format
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_mesa_lock_texture(radeon->glCtx, texObj);
|
_mesa_lock_texture(&radeon->glCtx, texObj);
|
||||||
if (t->bo) {
|
if (t->bo) {
|
||||||
radeon_bo_unref(t->bo);
|
radeon_bo_unref(t->bo);
|
||||||
t->bo = NULL;
|
t->bo = NULL;
|
||||||
@@ -806,7 +806,7 @@ void r200SetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_format
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
_mesa_init_teximage_fields(radeon->glCtx, texImage,
|
_mesa_init_teximage_fields(&radeon->glCtx, texImage,
|
||||||
rb->base.Base.Width, rb->base.Base.Height,
|
rb->base.Base.Width, rb->base.Base.Height,
|
||||||
1, 0,
|
1, 0,
|
||||||
rb->cpp, texFormat);
|
rb->cpp, texFormat);
|
||||||
@@ -831,7 +831,7 @@ void r200SetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_format
|
|||||||
}
|
}
|
||||||
|
|
||||||
t->validated = GL_TRUE;
|
t->validated = GL_TRUE;
|
||||||
_mesa_unlock_texture(radeon->glCtx, texObj);
|
_mesa_unlock_texture(&radeon->glCtx, texObj);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1046,7 +1046,7 @@ static void disable_tex_obj_state( r200ContextPtr rmesa,
|
|||||||
R200_STATECHANGE( rmesa, ctx );
|
R200_STATECHANGE( rmesa, ctx );
|
||||||
rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~(R200_TEX_0_ENABLE << unit);
|
rmesa->hw.ctx.cmd[CTX_PP_CNTL] &= ~(R200_TEX_0_ENABLE << unit);
|
||||||
if (rmesa->radeon.TclFallback & (R200_TCL_FALLBACK_TEXGEN_0<<unit)) {
|
if (rmesa->radeon.TclFallback & (R200_TCL_FALLBACK_TEXGEN_0<<unit)) {
|
||||||
TCL_FALLBACK( rmesa->radeon.glCtx, (R200_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
|
TCL_FALLBACK( &rmesa->radeon.glCtx, (R200_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Actually want to keep all units less than max active texture
|
/* Actually want to keep all units less than max active texture
|
||||||
|
@@ -107,7 +107,7 @@ void rcommonBeginBatch(radeonContextPtr rmesa,
|
|||||||
static INLINE void radeon_firevertices(radeonContextPtr radeon)
|
static INLINE void radeon_firevertices(radeonContextPtr radeon)
|
||||||
{
|
{
|
||||||
if (radeon->cmdbuf.cs->cdw || radeon->dma.flush )
|
if (radeon->cmdbuf.cs->cdw || radeon->dma.flush )
|
||||||
radeon->glCtx->Driver.Flush(radeon->glCtx); /* +r6/r7 */
|
radeon->glCtx.Driver.Flush(&radeon->glCtx); /* +r6/r7 */
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -83,7 +83,7 @@ void radeonSetCliprects(radeonContextPtr radeon)
|
|||||||
|
|
||||||
if ((draw_rfb->base.Width != drawable->w) ||
|
if ((draw_rfb->base.Width != drawable->w) ||
|
||||||
(draw_rfb->base.Height != drawable->h)) {
|
(draw_rfb->base.Height != drawable->h)) {
|
||||||
_mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
|
_mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
|
||||||
drawable->w, drawable->h);
|
drawable->w, drawable->h);
|
||||||
draw_rfb->base.Initialized = GL_TRUE;
|
draw_rfb->base.Initialized = GL_TRUE;
|
||||||
}
|
}
|
||||||
@@ -91,14 +91,14 @@ void radeonSetCliprects(radeonContextPtr radeon)
|
|||||||
if (drawable != readable) {
|
if (drawable != readable) {
|
||||||
if ((read_rfb->base.Width != readable->w) ||
|
if ((read_rfb->base.Width != readable->w) ||
|
||||||
(read_rfb->base.Height != readable->h)) {
|
(read_rfb->base.Height != readable->h)) {
|
||||||
_mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
|
_mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
|
||||||
readable->w, readable->h);
|
readable->w, readable->h);
|
||||||
read_rfb->base.Initialized = GL_TRUE;
|
read_rfb->base.Initialized = GL_TRUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (radeon->state.scissor.enabled)
|
if (radeon->state.scissor.enabled)
|
||||||
radeonUpdateScissor(radeon->glCtx);
|
radeonUpdateScissor(&radeon->glCtx);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -428,7 +428,7 @@ void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GL
|
|||||||
old_viewport = ctx->Driver.Viewport;
|
old_viewport = ctx->Driver.Viewport;
|
||||||
ctx->Driver.Viewport = NULL;
|
ctx->Driver.Viewport = NULL;
|
||||||
radeon_window_moved(radeon);
|
radeon_window_moved(radeon);
|
||||||
radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
|
radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
|
||||||
ctx->Driver.Viewport = old_viewport;
|
ctx->Driver.Viewport = old_viewport;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -440,7 +440,7 @@ static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state
|
|||||||
if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
|
if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
dwords = (*state->check) (radeon->glCtx, state);
|
dwords = (*state->check) (&radeon->glCtx, state);
|
||||||
|
|
||||||
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
|
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
|
||||||
|
|
||||||
@@ -478,7 +478,7 @@ GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
|
|||||||
goto out;
|
goto out;
|
||||||
foreach(atom, &radeon->hw.atomlist) {
|
foreach(atom, &radeon->hw.atomlist) {
|
||||||
if (atom->dirty) {
|
if (atom->dirty) {
|
||||||
const GLuint atom_size = atom->check(radeon->glCtx, atom);
|
const GLuint atom_size = atom->check(&radeon->glCtx, atom);
|
||||||
dwords += atom_size;
|
dwords += atom_size;
|
||||||
if (RADEON_CMDBUF && atom_size) {
|
if (RADEON_CMDBUF && atom_size) {
|
||||||
radeon_print_state_atom(radeon, atom);
|
radeon_print_state_atom(radeon, atom);
|
||||||
@@ -487,7 +487,7 @@ GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
foreach(atom, &radeon->hw.atomlist) {
|
foreach(atom, &radeon->hw.atomlist) {
|
||||||
const GLuint atom_size = atom->check(radeon->glCtx, atom);
|
const GLuint atom_size = atom->check(&radeon->glCtx, atom);
|
||||||
dwords += atom_size;
|
dwords += atom_size;
|
||||||
if (RADEON_CMDBUF && atom_size) {
|
if (RADEON_CMDBUF && atom_size) {
|
||||||
radeon_print_state_atom(radeon, atom);
|
radeon_print_state_atom(radeon, atom);
|
||||||
@@ -505,13 +505,13 @@ static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state
|
|||||||
BATCH_LOCALS(radeon);
|
BATCH_LOCALS(radeon);
|
||||||
int dwords;
|
int dwords;
|
||||||
|
|
||||||
dwords = (*atom->check) (radeon->glCtx, atom);
|
dwords = (*atom->check) (&radeon->glCtx, atom);
|
||||||
if (dwords) {
|
if (dwords) {
|
||||||
|
|
||||||
radeon_print_state_atom(radeon, atom);
|
radeon_print_state_atom(radeon, atom);
|
||||||
|
|
||||||
if (atom->emit) {
|
if (atom->emit) {
|
||||||
(*atom->emit)(radeon->glCtx, atom);
|
(*atom->emit)(&radeon->glCtx, atom);
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH_NO_AUTOSTATE(dwords);
|
BEGIN_BATCH_NO_AUTOSTATE(dwords);
|
||||||
OUT_BATCH_TABLE(atom->cmd, dwords);
|
OUT_BATCH_TABLE(atom->cmd, dwords);
|
||||||
@@ -666,7 +666,7 @@ int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
|
|||||||
fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
|
fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
|
||||||
}
|
}
|
||||||
|
|
||||||
radeonEmitQueryEnd(rmesa->glCtx);
|
radeonEmitQueryEnd(&rmesa->glCtx);
|
||||||
|
|
||||||
if (rmesa->cmdbuf.cs->cdw) {
|
if (rmesa->cmdbuf.cs->cdw) {
|
||||||
ret = radeon_cs_emit(rmesa->cmdbuf.cs);
|
ret = radeon_cs_emit(rmesa->cmdbuf.cs);
|
||||||
@@ -675,7 +675,7 @@ int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
|
|||||||
radeon_cs_erase(rmesa->cmdbuf.cs);
|
radeon_cs_erase(rmesa->cmdbuf.cs);
|
||||||
rmesa->cmdbuf.flushing = 0;
|
rmesa->cmdbuf.flushing = 0;
|
||||||
|
|
||||||
if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
|
if (radeon_revalidate_bos(&rmesa->glCtx) == GL_FALSE) {
|
||||||
fprintf(stderr,"failed to revalidate buffers\n");
|
fprintf(stderr,"failed to revalidate buffers\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -751,7 +751,7 @@ void rcommonInitCmdBuf(radeonContextPtr rmesa)
|
|||||||
rmesa->cmdbuf.size = size;
|
rmesa->cmdbuf.size = size;
|
||||||
|
|
||||||
radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
|
radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
|
||||||
(void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
|
(void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
|
||||||
|
|
||||||
|
|
||||||
if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
|
if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
|
||||||
|
@@ -143,15 +143,16 @@ GLboolean radeonInitContext(radeonContextPtr radeon,
|
|||||||
radeon->radeonScreen = screen;
|
radeon->radeonScreen = screen;
|
||||||
/* Allocate and initialize the Mesa context */
|
/* Allocate and initialize the Mesa context */
|
||||||
if (sharedContextPrivate)
|
if (sharedContextPrivate)
|
||||||
shareCtx = ((radeonContextPtr)sharedContextPrivate)->glCtx;
|
shareCtx = &((radeonContextPtr)sharedContextPrivate)->glCtx;
|
||||||
else
|
else
|
||||||
shareCtx = NULL;
|
shareCtx = NULL;
|
||||||
radeon->glCtx = _mesa_create_context(API_OPENGL, glVisual, shareCtx,
|
|
||||||
functions, (void *)radeon);
|
if (!_mesa_initialize_context(&radeon->glCtx, API_OPENGL,
|
||||||
if (!radeon->glCtx)
|
glVisual, shareCtx,
|
||||||
|
functions, (void *)radeon))
|
||||||
return GL_FALSE;
|
return GL_FALSE;
|
||||||
|
|
||||||
ctx = radeon->glCtx;
|
ctx = &radeon->glCtx;
|
||||||
driContextPriv->driverPrivate = radeon;
|
driContextPriv->driverPrivate = radeon;
|
||||||
|
|
||||||
_mesa_meta_init(ctx);
|
_mesa_meta_init(ctx);
|
||||||
@@ -223,7 +224,7 @@ void radeonDestroyContext(__DRIcontext *driContextPriv )
|
|||||||
|
|
||||||
assert(radeon);
|
assert(radeon);
|
||||||
|
|
||||||
_mesa_meta_free(radeon->glCtx);
|
_mesa_meta_free(&radeon->glCtx);
|
||||||
|
|
||||||
if (radeon == current) {
|
if (radeon == current) {
|
||||||
_mesa_make_current(NULL, NULL, NULL);
|
_mesa_make_current(NULL, NULL, NULL);
|
||||||
@@ -235,17 +236,17 @@ void radeonDestroyContext(__DRIcontext *driContextPriv )
|
|||||||
}
|
}
|
||||||
|
|
||||||
radeonFreeDmaRegions(radeon);
|
radeonFreeDmaRegions(radeon);
|
||||||
radeonReleaseArrays(radeon->glCtx, ~0);
|
radeonReleaseArrays(&radeon->glCtx, ~0);
|
||||||
if (radeon->vtbl.free_context)
|
if (radeon->vtbl.free_context)
|
||||||
radeon->vtbl.free_context(radeon->glCtx);
|
radeon->vtbl.free_context(&radeon->glCtx);
|
||||||
_swsetup_DestroyContext( radeon->glCtx );
|
_swsetup_DestroyContext( &radeon->glCtx );
|
||||||
_tnl_DestroyContext( radeon->glCtx );
|
_tnl_DestroyContext( &radeon->glCtx );
|
||||||
_vbo_DestroyContext( radeon->glCtx );
|
_vbo_DestroyContext( &radeon->glCtx );
|
||||||
_swrast_DestroyContext( radeon->glCtx );
|
_swrast_DestroyContext( &radeon->glCtx );
|
||||||
|
|
||||||
/* free atom list */
|
/* free atom list */
|
||||||
/* free the Mesa context */
|
/* free the Mesa context data */
|
||||||
_mesa_destroy_context(radeon->glCtx);
|
_mesa_free_context_data(&radeon->glCtx);
|
||||||
|
|
||||||
/* _mesa_destroy_context() might result in calls to functions that
|
/* _mesa_destroy_context() might result in calls to functions that
|
||||||
* depend on the DriverCtx, so don't set it to NULL before.
|
* depend on the DriverCtx, so don't set it to NULL before.
|
||||||
@@ -277,7 +278,7 @@ GLboolean radeonUnbindContext(__DRIcontext * driContextPriv)
|
|||||||
|
|
||||||
if (RADEON_DEBUG & RADEON_DRI)
|
if (RADEON_DEBUG & RADEON_DRI)
|
||||||
fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
|
fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
|
||||||
radeon->glCtx);
|
&radeon->glCtx);
|
||||||
|
|
||||||
/* Unset current context and dispath table */
|
/* Unset current context and dispath table */
|
||||||
_mesa_make_current(NULL, NULL, NULL);
|
_mesa_make_current(NULL, NULL, NULL);
|
||||||
@@ -316,7 +317,7 @@ void radeon_prepare_render(radeonContextPtr radeon)
|
|||||||
radeon_update_renderbuffers(driContext, drawable, GL_FALSE);
|
radeon_update_renderbuffers(driContext, drawable, GL_FALSE);
|
||||||
|
|
||||||
/* Intel driver does the equivalent of this, no clue if it is needed:*/
|
/* Intel driver does the equivalent of this, no clue if it is needed:*/
|
||||||
radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
|
radeon_draw_buffer(&radeon->glCtx, radeon->glCtx.DrawBuffer);
|
||||||
|
|
||||||
driContext->dri2.draw_stamp = drawable->dri2.stamp;
|
driContext->dri2.draw_stamp = drawable->dri2.stamp;
|
||||||
}
|
}
|
||||||
@@ -549,7 +550,7 @@ radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
driUpdateFramebufferSize(radeon->glCtx, drawable);
|
driUpdateFramebufferSize(&radeon->glCtx, drawable);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Force the context `c' to be the current context and associate with it
|
/* Force the context `c' to be the current context and associate with it
|
||||||
@@ -584,7 +585,7 @@ GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(driDrawPriv == NULL && driReadPriv == NULL) {
|
if(driDrawPriv == NULL && driReadPriv == NULL) {
|
||||||
drfb = _mesa_create_framebuffer(&radeon->glCtx->Visual);
|
drfb = _mesa_create_framebuffer(&radeon->glCtx.Visual);
|
||||||
readfb = drfb;
|
readfb = drfb;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@@ -602,25 +603,25 @@ GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
|
|||||||
&(radeon_get_renderbuffer(drfb, BUFFER_DEPTH)->base.Base));
|
&(radeon_get_renderbuffer(drfb, BUFFER_DEPTH)->base.Base));
|
||||||
|
|
||||||
if (RADEON_DEBUG & RADEON_DRI)
|
if (RADEON_DEBUG & RADEON_DRI)
|
||||||
fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, radeon->glCtx, drfb, readfb);
|
fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, &radeon->glCtx, drfb, readfb);
|
||||||
|
|
||||||
if(driDrawPriv)
|
if(driDrawPriv)
|
||||||
driUpdateFramebufferSize(radeon->glCtx, driDrawPriv);
|
driUpdateFramebufferSize(&radeon->glCtx, driDrawPriv);
|
||||||
if (driReadPriv != driDrawPriv)
|
if (driReadPriv != driDrawPriv)
|
||||||
driUpdateFramebufferSize(radeon->glCtx, driReadPriv);
|
driUpdateFramebufferSize(&radeon->glCtx, driReadPriv);
|
||||||
|
|
||||||
_mesa_make_current(radeon->glCtx, drfb, readfb);
|
_mesa_make_current(&radeon->glCtx, drfb, readfb);
|
||||||
if (driDrawPriv == NULL && driReadPriv == NULL)
|
if (driDrawPriv == NULL && driReadPriv == NULL)
|
||||||
_mesa_reference_framebuffer(&drfb, NULL);
|
_mesa_reference_framebuffer(&drfb, NULL);
|
||||||
|
|
||||||
_mesa_update_state(radeon->glCtx);
|
_mesa_update_state(&radeon->glCtx);
|
||||||
|
|
||||||
if (radeon->glCtx->DrawBuffer == drfb) {
|
if (radeon->glCtx.DrawBuffer == drfb) {
|
||||||
if(driDrawPriv != NULL) {
|
if(driDrawPriv != NULL) {
|
||||||
radeon_window_moved(radeon);
|
radeon_window_moved(radeon);
|
||||||
}
|
}
|
||||||
|
|
||||||
radeon_draw_buffer(radeon->glCtx, drfb);
|
radeon_draw_buffer(&radeon->glCtx, drfb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -384,7 +384,7 @@ struct radeon_cmdbuf {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_context {
|
struct radeon_context {
|
||||||
struct gl_context *glCtx;
|
struct gl_context glCtx; /**< base class, must be first */
|
||||||
radeonScreenPtr radeonScreen; /* Screen private DRI data */
|
radeonScreenPtr radeonScreen; /* Screen private DRI data */
|
||||||
|
|
||||||
/* Texture object bookkeeping
|
/* Texture object bookkeeping
|
||||||
|
@@ -257,7 +257,7 @@ r100CreateContext( gl_api api,
|
|||||||
* setting allow larger textures.
|
* setting allow larger textures.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ctx = rmesa->radeon.glCtx;
|
ctx = &rmesa->radeon.glCtx;
|
||||||
ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
|
ctx->Const.MaxTextureUnits = driQueryOptioni (&rmesa->radeon.optionCache,
|
||||||
"texture_units");
|
"texture_units");
|
||||||
ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
|
ctx->Const.MaxTextureImageUnits = ctx->Const.MaxTextureUnits;
|
||||||
@@ -357,7 +357,7 @@ r100CreateContext( gl_api api,
|
|||||||
ctx->Extensions.EXT_framebuffer_object = true;
|
ctx->Extensions.EXT_framebuffer_object = true;
|
||||||
ctx->Extensions.ARB_texture_cube_map = true;
|
ctx->Extensions.ARB_texture_cube_map = true;
|
||||||
|
|
||||||
if (rmesa->radeon.glCtx->Mesa_DXTn) {
|
if (rmesa->radeon.glCtx.Mesa_DXTn) {
|
||||||
ctx->Extensions.EXT_texture_compression_s3tc = true;
|
ctx->Extensions.EXT_texture_compression_s3tc = true;
|
||||||
ctx->Extensions.S3_s3tc = true;
|
ctx->Extensions.S3_s3tc = true;
|
||||||
}
|
}
|
||||||
@@ -403,7 +403,7 @@ r100CreateContext( gl_api api,
|
|||||||
rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
|
rmesa->radeon.radeonScreen->chip_flags &= ~RADEON_CHIPSET_TCL;
|
||||||
fprintf(stderr, "Disabling HW TCL support\n");
|
fprintf(stderr, "Disabling HW TCL support\n");
|
||||||
}
|
}
|
||||||
TCL_FALLBACK(rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
|
TCL_FALLBACK(&rmesa->radeon.glCtx, RADEON_TCL_FALLBACK_TCL_DISABLE, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
|
if (rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL) {
|
||||||
|
@@ -270,7 +270,7 @@ void radeonAllocDmaRegion(radeonContextPtr rmesa,
|
|||||||
fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
|
fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
|
||||||
|
|
||||||
if (rmesa->dma.flush)
|
if (rmesa->dma.flush)
|
||||||
rmesa->dma.flush(rmesa->glCtx);
|
rmesa->dma.flush(&rmesa->glCtx);
|
||||||
|
|
||||||
assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
|
assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
|
||||||
|
|
||||||
@@ -459,7 +459,7 @@ rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
|
|||||||
if(is_empty_list(&rmesa->dma.reserved)
|
if(is_empty_list(&rmesa->dma.reserved)
|
||||||
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
|
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
|
||||||
if (rmesa->dma.flush) {
|
if (rmesa->dma.flush) {
|
||||||
rmesa->dma.flush(rmesa->glCtx);
|
rmesa->dma.flush(&rmesa->glCtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
radeonRefillCurrentDmaRegion(rmesa, bytes);
|
radeonRefillCurrentDmaRegion(rmesa, bytes);
|
||||||
@@ -469,7 +469,7 @@ rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
|
|||||||
|
|
||||||
if (!rmesa->dma.flush) {
|
if (!rmesa->dma.flush) {
|
||||||
/* if cmdbuf flushed DMA restart */
|
/* if cmdbuf flushed DMA restart */
|
||||||
rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
rmesa->glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
||||||
rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
|
rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -499,7 +499,7 @@ void radeonReleaseArrays( struct gl_context *ctx, GLuint newinputs )
|
|||||||
fprintf(stderr, "%s\n", __FUNCTION__);
|
fprintf(stderr, "%s\n", __FUNCTION__);
|
||||||
|
|
||||||
if (radeon->dma.flush) {
|
if (radeon->dma.flush) {
|
||||||
radeon->dma.flush(radeon->glCtx);
|
radeon->dma.flush(&radeon->glCtx);
|
||||||
}
|
}
|
||||||
for (i = 0; i < radeon->tcl.aos_count; i++) {
|
for (i = 0; i < radeon->tcl.aos_count; i++) {
|
||||||
if (radeon->tcl.aos[i].bo) {
|
if (radeon->tcl.aos[i].bo) {
|
||||||
|
@@ -596,7 +596,7 @@ radeon_image_target_renderbuffer_storage(struct gl_context *ctx,
|
|||||||
rb->Width = image->width;
|
rb->Width = image->width;
|
||||||
rb->Height = image->height;
|
rb->Height = image->height;
|
||||||
rb->Format = image->format;
|
rb->Format = image->format;
|
||||||
rb->_BaseFormat = _mesa_base_fbo_format(radeon->glCtx,
|
rb->_BaseFormat = _mesa_base_fbo_format(&radeon->glCtx,
|
||||||
image->internal_format);
|
image->internal_format);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -944,18 +944,18 @@ radeon_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
|
|||||||
|
|
||||||
void radeon_fbo_init(struct radeon_context *radeon)
|
void radeon_fbo_init(struct radeon_context *radeon)
|
||||||
{
|
{
|
||||||
radeon->glCtx->Driver.NewFramebuffer = radeon_new_framebuffer;
|
radeon->glCtx.Driver.NewFramebuffer = radeon_new_framebuffer;
|
||||||
radeon->glCtx->Driver.NewRenderbuffer = radeon_new_renderbuffer;
|
radeon->glCtx.Driver.NewRenderbuffer = radeon_new_renderbuffer;
|
||||||
radeon->glCtx->Driver.MapRenderbuffer = radeon_map_renderbuffer;
|
radeon->glCtx.Driver.MapRenderbuffer = radeon_map_renderbuffer;
|
||||||
radeon->glCtx->Driver.UnmapRenderbuffer = radeon_unmap_renderbuffer;
|
radeon->glCtx.Driver.UnmapRenderbuffer = radeon_unmap_renderbuffer;
|
||||||
radeon->glCtx->Driver.BindFramebuffer = radeon_bind_framebuffer;
|
radeon->glCtx.Driver.BindFramebuffer = radeon_bind_framebuffer;
|
||||||
radeon->glCtx->Driver.FramebufferRenderbuffer = radeon_framebuffer_renderbuffer;
|
radeon->glCtx.Driver.FramebufferRenderbuffer = radeon_framebuffer_renderbuffer;
|
||||||
radeon->glCtx->Driver.RenderTexture = radeon_render_texture;
|
radeon->glCtx.Driver.RenderTexture = radeon_render_texture;
|
||||||
radeon->glCtx->Driver.FinishRenderTexture = radeon_finish_render_texture;
|
radeon->glCtx.Driver.FinishRenderTexture = radeon_finish_render_texture;
|
||||||
radeon->glCtx->Driver.ResizeBuffers = radeon_resize_buffers;
|
radeon->glCtx.Driver.ResizeBuffers = radeon_resize_buffers;
|
||||||
radeon->glCtx->Driver.ValidateFramebuffer = radeon_validate_framebuffer;
|
radeon->glCtx.Driver.ValidateFramebuffer = radeon_validate_framebuffer;
|
||||||
radeon->glCtx->Driver.BlitFramebuffer = _mesa_meta_BlitFramebuffer;
|
radeon->glCtx.Driver.BlitFramebuffer = _mesa_meta_BlitFramebuffer;
|
||||||
radeon->glCtx->Driver.EGLImageTargetRenderbufferStorage =
|
radeon->glCtx.Driver.EGLImageTargetRenderbufferStorage =
|
||||||
radeon_image_target_renderbuffer_storage;
|
radeon_image_target_renderbuffer_storage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -64,7 +64,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||||||
*/
|
*/
|
||||||
void radeonSetUpAtomList( r100ContextPtr rmesa )
|
void radeonSetUpAtomList( r100ContextPtr rmesa )
|
||||||
{
|
{
|
||||||
int i, mtu = rmesa->radeon.glCtx->Const.MaxTextureUnits;
|
int i, mtu = rmesa->radeon.glCtx.Const.MaxTextureUnits;
|
||||||
|
|
||||||
make_empty_list(&rmesa->radeon.hw.atomlist);
|
make_empty_list(&rmesa->radeon.hw.atomlist);
|
||||||
rmesa->radeon.hw.atomlist.name = "atom-list";
|
rmesa->radeon.hw.atomlist.name = "atom-list";
|
||||||
@@ -208,7 +208,7 @@ void radeonFlushElts( struct gl_context *ctx )
|
|||||||
|
|
||||||
if (RADEON_DEBUG & RADEON_SYNC) {
|
if (RADEON_DEBUG & RADEON_SYNC) {
|
||||||
fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
|
fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
|
||||||
radeonFinish( rmesa->radeon.glCtx );
|
radeonFinish( &rmesa->radeon.glCtx );
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -267,7 +267,7 @@ GLushort *radeonAllocEltsOpenEnded( r100ContextPtr rmesa,
|
|||||||
__FUNCTION__, primitive);
|
__FUNCTION__, primitive);
|
||||||
|
|
||||||
assert(!rmesa->radeon.dma.flush);
|
assert(!rmesa->radeon.dma.flush);
|
||||||
rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
rmesa->radeon.glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
||||||
rmesa->radeon.dma.flush = radeonFlushElts;
|
rmesa->radeon.dma.flush = radeonFlushElts;
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
|
@@ -92,7 +92,7 @@ extern void radeonSetUpAtomList( r100ContextPtr rmesa );
|
|||||||
#define RADEON_NEWPRIM( rmesa ) \
|
#define RADEON_NEWPRIM( rmesa ) \
|
||||||
do { \
|
do { \
|
||||||
if ( rmesa->radeon.dma.flush ) \
|
if ( rmesa->radeon.dma.flush ) \
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx ); \
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx ); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/* Can accomodate several state changes and primitive changes without
|
/* Can accomodate several state changes and primitive changes without
|
||||||
|
@@ -160,7 +160,7 @@ static void calculate_miptree_layout(radeonContextPtr rmesa, radeon_mipmap_tree
|
|||||||
{
|
{
|
||||||
GLuint curOffset, i, face, level;
|
GLuint curOffset, i, face, level;
|
||||||
|
|
||||||
assert(mt->numLevels <= rmesa->glCtx->Const.MaxTextureLevels);
|
assert(mt->numLevels <= rmesa->glCtx.Const.MaxTextureLevels);
|
||||||
|
|
||||||
curOffset = 0;
|
curOffset = 0;
|
||||||
for(face = 0; face < mt->faces; face++) {
|
for(face = 0; face < mt->faces; face++) {
|
||||||
|
@@ -111,7 +111,7 @@ static void radeonBeginQuery(struct gl_context *ctx, struct gl_query_object *q)
|
|||||||
assert(radeon->query.current == NULL);
|
assert(radeon->query.current == NULL);
|
||||||
|
|
||||||
if (radeon->dma.flush)
|
if (radeon->dma.flush)
|
||||||
radeon->dma.flush(radeon->glCtx);
|
radeon->dma.flush(&radeon->glCtx);
|
||||||
|
|
||||||
if (!query->bo) {
|
if (!query->bo) {
|
||||||
query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
|
query->bo = radeon_bo_open(radeon->radeonScreen->bom, 0, RADEON_QUERY_PAGE_SIZE, RADEON_QUERY_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT, 0);
|
||||||
@@ -151,7 +151,7 @@ static void radeonEndQuery(struct gl_context *ctx, struct gl_query_object *q)
|
|||||||
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
|
radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
|
||||||
|
|
||||||
if (radeon->dma.flush)
|
if (radeon->dma.flush)
|
||||||
radeon->dma.flush(radeon->glCtx);
|
radeon->dma.flush(&radeon->glCtx);
|
||||||
radeonEmitQueryEnd(ctx);
|
radeonEmitQueryEnd(ctx);
|
||||||
|
|
||||||
radeon->query.current = NULL;
|
radeon->query.current = NULL;
|
||||||
|
@@ -192,7 +192,7 @@ radeonDRI2Flush(__DRIdrawable *drawable)
|
|||||||
radeonContextPtr rmesa;
|
radeonContextPtr rmesa;
|
||||||
|
|
||||||
rmesa = (radeonContextPtr) drawable->driContextPriv->driverPrivate;
|
rmesa = (radeonContextPtr) drawable->driContextPriv->driverPrivate;
|
||||||
radeonFlush(rmesa->glCtx);
|
radeonFlush(&rmesa->glCtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct __DRI2flushExtensionRec radeonFlushExtension = {
|
static const struct __DRI2flushExtensionRec radeonFlushExtension = {
|
||||||
@@ -267,9 +267,9 @@ radeon_create_image_from_renderbuffer(__DRIcontext *context,
|
|||||||
struct gl_renderbuffer *rb;
|
struct gl_renderbuffer *rb;
|
||||||
struct radeon_renderbuffer *rrb;
|
struct radeon_renderbuffer *rrb;
|
||||||
|
|
||||||
rb = _mesa_lookup_renderbuffer(radeon->glCtx, renderbuffer);
|
rb = _mesa_lookup_renderbuffer(&radeon->glCtx, renderbuffer);
|
||||||
if (!rb) {
|
if (!rb) {
|
||||||
_mesa_error(radeon->glCtx,
|
_mesa_error(&radeon->glCtx,
|
||||||
GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
|
GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@@ -1850,7 +1850,7 @@ void radeonUploadTexMatrix( r100ContextPtr rmesa,
|
|||||||
int idx = TEXMAT_0 + unit;
|
int idx = TEXMAT_0 + unit;
|
||||||
float *dest = ((float *)RADEON_DB_STATE( mat[idx] )) + MAT_ELT_0;
|
float *dest = ((float *)RADEON_DB_STATE( mat[idx] )) + MAT_ELT_0;
|
||||||
int i;
|
int i;
|
||||||
struct gl_texture_unit tUnit = rmesa->radeon.glCtx->Texture.Unit[unit];
|
struct gl_texture_unit tUnit = rmesa->radeon.glCtx.Texture.Unit[unit];
|
||||||
GLfloat *src = rmesa->tmpmat[unit].m;
|
GLfloat *src = rmesa->tmpmat[unit].m;
|
||||||
|
|
||||||
rmesa->TexMatColSwap &= ~(1 << unit);
|
rmesa->TexMatColSwap &= ~(1 << unit);
|
||||||
|
@@ -57,7 +57,7 @@ extern void radeonFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
|
|||||||
#define FALLBACK( rmesa, bit, mode ) do { \
|
#define FALLBACK( rmesa, bit, mode ) do { \
|
||||||
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
|
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
|
||||||
__FUNCTION__, bit, mode ); \
|
__FUNCTION__, bit, mode ); \
|
||||||
radeonFallback( rmesa->radeon.glCtx, bit, mode ); \
|
radeonFallback( &rmesa->radeon.glCtx, bit, mode ); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
|
@@ -503,7 +503,7 @@ static void tex_emit_cs(struct gl_context *ctx, struct radeon_state_atom *atom)
|
|||||||
*/
|
*/
|
||||||
void radeonInitState( r100ContextPtr rmesa )
|
void radeonInitState( r100ContextPtr rmesa )
|
||||||
{
|
{
|
||||||
struct gl_context *ctx = rmesa->radeon.glCtx;
|
struct gl_context *ctx = &rmesa->radeon.glCtx;
|
||||||
GLuint i;
|
GLuint i;
|
||||||
|
|
||||||
rmesa->radeon.Fallback = 0;
|
rmesa->radeon.Fallback = 0;
|
||||||
|
@@ -147,7 +147,7 @@ static GLboolean discrete_prim[0x10] = {
|
|||||||
static GLushort *radeonAllocElts( r100ContextPtr rmesa, GLuint nr )
|
static GLushort *radeonAllocElts( r100ContextPtr rmesa, GLuint nr )
|
||||||
{
|
{
|
||||||
if (rmesa->radeon.dma.flush)
|
if (rmesa->radeon.dma.flush)
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
|
||||||
|
|
||||||
radeonEmitAOS( rmesa,
|
radeonEmitAOS( rmesa,
|
||||||
rmesa->radeon.tcl.aos_count, 0 );
|
rmesa->radeon.tcl.aos_count, 0 );
|
||||||
@@ -314,7 +314,7 @@ static GLuint radeonEnsureEmitSize( struct gl_context * ctx , GLuint inputs )
|
|||||||
state_size = radeonCountStateEmitSize( &rmesa->radeon );
|
state_size = radeonCountStateEmitSize( &rmesa->radeon );
|
||||||
/* tcl may be changed in radeonEmitArrays so account for it if not dirty */
|
/* tcl may be changed in radeonEmitArrays so account for it if not dirty */
|
||||||
if (!rmesa->hw.tcl.dirty)
|
if (!rmesa->hw.tcl.dirty)
|
||||||
state_size += rmesa->hw.tcl.check( rmesa->radeon.glCtx, &rmesa->hw.tcl );
|
state_size += rmesa->hw.tcl.check( &rmesa->radeon.glCtx, &rmesa->hw.tcl );
|
||||||
/* predict size for elements */
|
/* predict size for elements */
|
||||||
for (i = 0; i < VB->PrimitiveCount; ++i)
|
for (i = 0; i < VB->PrimitiveCount; ++i)
|
||||||
{
|
{
|
||||||
@@ -500,7 +500,7 @@ static void transition_to_hwtnl( struct gl_context *ctx )
|
|||||||
tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial;
|
tnl->Driver.NotifyMaterialChange = radeonUpdateMaterial;
|
||||||
|
|
||||||
if ( rmesa->radeon.dma.flush )
|
if ( rmesa->radeon.dma.flush )
|
||||||
rmesa->radeon.dma.flush( rmesa->radeon.glCtx );
|
rmesa->radeon.dma.flush( &rmesa->radeon.glCtx );
|
||||||
|
|
||||||
rmesa->radeon.dma.flush = NULL;
|
rmesa->radeon.dma.flush = NULL;
|
||||||
rmesa->swtcl.vertex_format = 0;
|
rmesa->swtcl.vertex_format = 0;
|
||||||
|
@@ -364,7 +364,7 @@ static void radeonDeleteTexture( struct gl_context *ctx,
|
|||||||
|
|
||||||
if ( rmesa ) {
|
if ( rmesa ) {
|
||||||
radeon_firevertices(&rmesa->radeon);
|
radeon_firevertices(&rmesa->radeon);
|
||||||
for ( i = 0 ; i < rmesa->radeon.glCtx->Const.MaxTextureUnits ; i++ ) {
|
for ( i = 0 ; i < rmesa->radeon.glCtx.Const.MaxTextureUnits ; i++ ) {
|
||||||
if ( t == rmesa->state.texture.unit[i].texobj ) {
|
if ( t == rmesa->state.texture.unit[i].texobj ) {
|
||||||
rmesa->state.texture.unit[i].texobj = NULL;
|
rmesa->state.texture.unit[i].texobj = NULL;
|
||||||
rmesa->hw.tex[i].dirty = GL_FALSE;
|
rmesa->hw.tex[i].dirty = GL_FALSE;
|
||||||
|
@@ -619,9 +619,9 @@ void radeonSetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_form
|
|||||||
radeon = pDRICtx->driverPrivate;
|
radeon = pDRICtx->driverPrivate;
|
||||||
|
|
||||||
rfb = dPriv->driverPrivate;
|
rfb = dPriv->driverPrivate;
|
||||||
texUnit = _mesa_get_current_tex_unit(radeon->glCtx);
|
texUnit = _mesa_get_current_tex_unit(&radeon->glCtx);
|
||||||
texObj = _mesa_select_tex_object(radeon->glCtx, texUnit, target);
|
texObj = _mesa_select_tex_object(&radeon->glCtx, texUnit, target);
|
||||||
texImage = _mesa_get_tex_image(radeon->glCtx, texObj, target, 0);
|
texImage = _mesa_get_tex_image(&radeon->glCtx, texObj, target, 0);
|
||||||
|
|
||||||
rImage = get_radeon_texture_image(texImage);
|
rImage = get_radeon_texture_image(texImage);
|
||||||
t = radeon_tex_obj(texObj);
|
t = radeon_tex_obj(texObj);
|
||||||
@@ -636,7 +636,7 @@ void radeonSetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_form
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_mesa_lock_texture(radeon->glCtx, texObj);
|
_mesa_lock_texture(&radeon->glCtx, texObj);
|
||||||
if (t->bo) {
|
if (t->bo) {
|
||||||
radeon_bo_unref(t->bo);
|
radeon_bo_unref(t->bo);
|
||||||
t->bo = NULL;
|
t->bo = NULL;
|
||||||
@@ -681,7 +681,7 @@ void radeonSetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_form
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
_mesa_init_teximage_fields(radeon->glCtx, texImage,
|
_mesa_init_teximage_fields(&radeon->glCtx, texImage,
|
||||||
rb->base.Base.Width, rb->base.Base.Height,
|
rb->base.Base.Width, rb->base.Base.Height,
|
||||||
1, 0,
|
1, 0,
|
||||||
rb->cpp, texFormat);
|
rb->cpp, texFormat);
|
||||||
@@ -706,7 +706,7 @@ void radeonSetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint texture_form
|
|||||||
(texImage->HeightLog2 << RADEON_TXFORMAT_HEIGHT_SHIFT));
|
(texImage->HeightLog2 << RADEON_TXFORMAT_HEIGHT_SHIFT));
|
||||||
}
|
}
|
||||||
t->validated = GL_TRUE;
|
t->validated = GL_TRUE;
|
||||||
_mesa_unlock_texture(radeon->glCtx, texObj);
|
_mesa_unlock_texture(&radeon->glCtx, texObj);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -747,7 +747,7 @@ static void disable_tex_obj_state( r100ContextPtr rmesa,
|
|||||||
RADEON_Q_BIT(unit));
|
RADEON_Q_BIT(unit));
|
||||||
|
|
||||||
if (rmesa->radeon.TclFallback & (RADEON_TCL_FALLBACK_TEXGEN_0<<unit)) {
|
if (rmesa->radeon.TclFallback & (RADEON_TCL_FALLBACK_TEXGEN_0<<unit)) {
|
||||||
TCL_FALLBACK( rmesa->radeon.glCtx, (RADEON_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
|
TCL_FALLBACK( &rmesa->radeon.glCtx, (RADEON_TCL_FALLBACK_TEXGEN_0<<unit), GL_FALSE);
|
||||||
rmesa->recheck_texgen[unit] = GL_TRUE;
|
rmesa->recheck_texgen[unit] = GL_TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user