i965: Add support for streaming indirect state rather than caching objects.

This commit is contained in:
Eric Anholt
2010-03-11 14:33:00 -08:00
parent f5bb775fd1
commit 321014156b
5 changed files with 66 additions and 1 deletions

View File

@@ -185,6 +185,11 @@ GLboolean brw_cached_batch_struct( struct brw_context *brw,
GLuint sz );
void brw_destroy_batch_cache( struct brw_context *brw );
void brw_clear_batch_cache( struct brw_context *brw );
void *brw_state_batch(struct brw_context *brw,
int size,
int alignment,
drm_intel_bo **out_bo,
uint32_t *out_offset);
/* brw_wm_surface_state.c */
drm_intel_bo *

View File

@@ -97,3 +97,52 @@ void brw_destroy_batch_cache( struct brw_context *brw )
{
brw_clear_batch_cache(brw);
}
/**
* Allocates a block of space in the batchbuffer for indirect state.
*
* We don't want to allocate separate BOs for every bit of indirect
* state in the driver. It means overallocating by a significant
* margin (4096 bytes, even if the object is just a 20-byte surface
* state), and more buffers to walk and count for aperture size checking.
*
* However, due to the restrictions inposed by the aperture size
* checking performance hacks, we can't have the batch point at a
* separate indirect state buffer, because once the batch points at
* it, no more relocations can be added to it. So, we sneak these
* buffers in at the top of the batchbuffer.
*/
void *
brw_state_batch(struct brw_context *brw,
int size,
int alignment,
drm_intel_bo **out_bo,
uint32_t *out_offset)
{
struct intel_batchbuffer *batch = brw->intel.batch;
uint32_t offset;
assert(size < batch->buf->size);
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
/* If allocating from the top would wrap below the batchbuffer, or
* if the batch's used space (plus the reserved pad) collides with our
* space, then flush and try again.
*/
if (batch->state_batch_offset < size ||
offset < batch->ptr - batch->map + batch->reserved_space) {
intel_batchbuffer_flush(batch);
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
}
batch->state_batch_offset = offset;
if (*out_bo != batch->buf) {
drm_intel_bo_unreference(*out_bo);
drm_intel_bo_reference(batch->buf);
*out_bo = batch->buf;
}
*out_offset = offset;
return batch->map + offset;
}

View File

@@ -49,6 +49,7 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
batch->ptr = batch->map;
batch->reserved_space = BATCH_RESERVED;
batch->dirty_state = ~0;
batch->state_batch_offset = batch->size;
}
struct intel_batchbuffer *
@@ -84,6 +85,12 @@ do_flush_locked(struct intel_batchbuffer *batch, GLuint used)
int x_off = 0, y_off = 0;
drm_intel_bo_subdata(batch->buf, 0, used, batch->buffer);
if (batch->state_batch_offset != batch->size) {
drm_intel_bo_subdata(batch->buf,
batch->state_batch_offset,
batch->size - batch->state_batch_offset,
batch->buffer + batch->state_batch_offset);
}
batch->ptr = NULL;

View File

@@ -23,6 +23,7 @@ struct intel_batchbuffer
GLubyte *ptr;
GLuint size;
uint32_t state_batch_offset;
#ifdef DEBUG
/** Tracking of BEGIN_BATCH()/OUT_BATCH()/ADVANCE_BATCH() debugging */
@@ -92,7 +93,8 @@ static INLINE uint32_t float_as_int(float f)
static INLINE GLint
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (batch->size - batch->reserved_space) - (batch->ptr - batch->map);
return (batch->state_batch_offset - batch->reserved_space) -
(batch->ptr - batch->map);
}

View File

@@ -261,6 +261,8 @@ extern char *__progname;
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
#define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1))
#define ROUND_DOWN_TO(value, alignment) (ALIGN(value - alignment - 1, \
alignment))
#define IS_POWER_OF_TWO(val) (((val) & (val - 1)) == 0)
static INLINE uint32_t