i915: Remove gen6+ batchbuffer support.

While i915 does have hardware contexts in hardware, we don't expect there
to ever be SW support for it (given that support hasn't even made it back
to gen5 or gen4).

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Eric Anholt
2013-06-20 15:18:05 -07:00
committed by Kenneth Graunke
parent c25e3c34d6
commit f6426509dc
6 changed files with 25 additions and 91 deletions

View File

@@ -366,7 +366,7 @@ i830_emit_invarient_state(struct intel_context *intel)
#define emit( intel, state, size ) \
intel_batchbuffer_data(intel, state, size, false)
intel_batchbuffer_data(intel, state, size)
static GLuint
get_dirty(struct i830_hw_state *state)
@@ -431,8 +431,8 @@ i830_emit_state(struct intel_context *intel)
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel,
get_state_size(state) + INTEL_PRIM_EMIT_SIZE,
false);
get_state_size(state) +
INTEL_PRIM_EMIT_SIZE);
count = 0;
again:
aper_count = 0;

View File

@@ -223,7 +223,7 @@ i915_emit_invarient_state(struct intel_context *intel)
#define emit(intel, state, size ) \
intel_batchbuffer_data(intel, state, size, false)
intel_batchbuffer_data(intel, state, size)
static GLuint
get_dirty(struct i915_hw_state *state)
@@ -306,8 +306,8 @@ i915_emit_state(struct intel_context *intel)
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel,
get_state_size(state) + INTEL_PRIM_EMIT_SIZE,
false);
get_state_size(state) +
INTEL_PRIM_EMIT_SIZE);
count = 0;
again:
if (intel->batch.bo == NULL) {

View File

@@ -59,7 +59,6 @@ intel_batchbuffer_reset(struct intel_context *intel)
intel->batch.reserved_space = BATCH_RESERVED;
intel->batch.state_batch_offset = intel->batch.bo->size;
intel->batch.used = 0;
intel->batch.needs_sol_reset = false;
}
void
@@ -127,20 +126,11 @@ do_flush_locked(struct intel_context *intel)
}
if (!intel->intelScreen->no_hw) {
int flags = I915_EXEC_RENDER;
if (batch->needs_sol_reset)
flags |= I915_EXEC_GEN7_SOL_RESET;
if (ret == 0) {
if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
intel->vtbl.annotate_aub(intel);
if (intel->hw_ctx == NULL || batch->is_blit) {
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
flags);
} else {
ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
4 * batch->used, flags);
}
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
I915_EXEC_RENDER);
}
}
@@ -259,10 +249,10 @@ intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
void
intel_batchbuffer_data(struct intel_context *intel,
const void *data, GLuint bytes, bool is_blit)
const void *data, GLuint bytes)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(intel, bytes, is_blit);
intel_batchbuffer_require_space(intel, bytes);
__memcpy(intel->batch.map + intel->batch.used, data, bytes);
intel->batch.used += bytes >> 2;
}

View File

@@ -40,7 +40,7 @@ int _intel_batchbuffer_flush(struct intel_context *intel,
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_context *intel,
const void *data, GLuint bytes, bool is_blit);
const void *data, GLuint bytes);
bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
drm_intel_bo *buffer,
@@ -95,16 +95,8 @@ intel_batchbuffer_emit_float(struct intel_context *intel, float f)
static INLINE void
intel_batchbuffer_require_space(struct intel_context *intel,
GLuint sz, int is_blit)
GLuint sz)
{
if (intel->gen >= 6 &&
intel->batch.is_blit != is_blit && intel->batch.used) {
intel_batchbuffer_flush(intel);
}
intel->batch.is_blit = is_blit;
#ifdef DEBUG
assert(sz < intel->maxBatchSize - BATCH_RESERVED);
#endif
@@ -113,9 +105,9 @@ intel_batchbuffer_require_space(struct intel_context *intel,
}
static INLINE void
intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
intel_batchbuffer_begin(struct intel_context *intel, int n)
{
intel_batchbuffer_require_space(intel, n * 4, is_blit);
intel_batchbuffer_require_space(intel, n * 4);
intel->batch.emit = intel->batch.used;
#ifdef DEBUG
@@ -143,8 +135,7 @@ intel_batchbuffer_advance(struct intel_context *intel)
*/
#define BATCH_LOCALS
#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \

View File

@@ -90,47 +90,6 @@ br13_for_cpp(int cpp)
}
}
/**
* Emits the packet for switching the blitter from X to Y tiled or back.
*
* This has to be called in a single BEGIN_BATCH_BLT_TILED() /
* ADVANCE_BATCH_TILED(). This is because BCS_SWCTRL is saved and restored as
* part of the power context, not a render context, and if the batchbuffer was
* to get flushed between setting and blitting, or blitting and restoring, our
* tiling state would leak into other unsuspecting applications (like the X
* server).
*/
static void
set_blitter_tiling(struct intel_context *intel,
bool dst_y_tiled, bool src_y_tiled)
{
assert(intel->gen >= 6);
/* Idle the blitter before we update how tiling is interpreted. */
OUT_BATCH(MI_FLUSH_DW);
OUT_BATCH(0);
OUT_BATCH(0);
OUT_BATCH(0);
OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
OUT_BATCH(BCS_SWCTRL);
OUT_BATCH((BCS_SWCTRL_DST_Y | BCS_SWCTRL_SRC_Y) << 16 |
(dst_y_tiled ? BCS_SWCTRL_DST_Y : 0) |
(src_y_tiled ? BCS_SWCTRL_SRC_Y : 0));
}
#define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) do { \
BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
if (dst_y_tiled || src_y_tiled) \
set_blitter_tiling(intel, dst_y_tiled, src_y_tiled); \
} while (0)
#define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) do { \
if (dst_y_tiled || src_y_tiled) \
set_blitter_tiling(intel, false, false); \
ADVANCE_BATCH(); \
} while (0)
/**
* Implements a rectangular block transfer (blit) of pixels between two
* miptrees.
@@ -286,7 +245,7 @@ intelEmitCopyBlit(struct intel_context *intel,
if (src_offset & 4095)
return false;
}
if ((dst_y_tiled || src_y_tiled) && intel->gen < 6)
if (dst_y_tiled || src_y_tiled)
return false;
/* do space check before going any further */
@@ -305,7 +264,7 @@ intelEmitCopyBlit(struct intel_context *intel,
if (pass >= 2)
return false;
intel_batchbuffer_require_space(intel, 8 * 4, true);
intel_batchbuffer_require_space(intel, 8 * 4);
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,
src_buffer, src_pitch, src_offset, src_x, src_y,
@@ -356,7 +315,7 @@ intelEmitCopyBlit(struct intel_context *intel,
assert(dst_x < dst_x2);
assert(dst_y < dst_y2);
BEGIN_BATCH_BLT_TILED(8, dst_y_tiled, src_y_tiled);
BEGIN_BATCH(8);
OUT_BATCH(CMD | (8 - 2));
OUT_BATCH(BR13 | (uint16_t)dst_pitch);
@@ -371,7 +330,7 @@ intelEmitCopyBlit(struct intel_context *intel,
I915_GEM_DOMAIN_RENDER, 0,
src_offset);
ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
ADVANCE_BATCH();
intel_batchbuffer_emit_mi_flush(intel);
@@ -531,7 +490,7 @@ intelClearWithBlit(struct gl_context *ctx, GLbitfield mask)
intel_batchbuffer_flush(intel);
}
BEGIN_BATCH_BLT(6);
BEGIN_BATCH(6);
OUT_BATCH(CMD | (6 - 2));
OUT_BATCH(BR13);
OUT_BATCH((y1 << 16) | x1);
@@ -589,7 +548,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
intel_batchbuffer_require_space(intel,
(8 * 4) +
(3 * 4) +
dwords * 4, true);
dwords * 4);
opcode = XY_SETUP_BLT_CMD;
if (cpp == 4)
@@ -602,7 +561,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
if (dst_tiling != I915_TILING_NONE)
blit_cmd |= XY_DST_TILED;
BEGIN_BATCH_BLT(8 + 3);
BEGIN_BATCH(8 + 3);
OUT_BATCH(opcode | (8 - 2));
OUT_BATCH(br13);
OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
@@ -619,7 +578,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
OUT_BATCH(((y + h) << 16) | (x + w));
ADVANCE_BATCH();
intel_batchbuffer_data(intel, src_bits, dwords * 4, true);
intel_batchbuffer_data(intel, src_bits, dwords * 4);
intel_batchbuffer_emit_mi_flush(intel);
@@ -716,9 +675,7 @@ intel_miptree_set_alpha_to_one(struct intel_context *intel,
intel_batchbuffer_flush(intel);
}
bool dst_y_tiled = region->tiling == I915_TILING_Y;
BEGIN_BATCH_BLT_TILED(6, dst_y_tiled, false);
BEGIN_BATCH(6);
OUT_BATCH(CMD | (6 - 2));
OUT_BATCH(BR13);
OUT_BATCH((y << 16) | x);
@@ -727,7 +684,7 @@ intel_miptree_set_alpha_to_one(struct intel_context *intel,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
0);
OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
ADVANCE_BATCH_TILED(dst_y_tiled, false);
ADVANCE_BATCH();
intel_batchbuffer_emit_mi_flush(intel);
}

View File

@@ -129,8 +129,6 @@ struct intel_batchbuffer {
#define BATCH_SZ (8192*sizeof(uint32_t))
uint32_t state_batch_offset;
bool is_blit;
bool needs_sol_reset;
};
/**
@@ -186,8 +184,6 @@ struct intel_context
bool is_945;
bool has_swizzling;
drm_intel_context *hw_ctx;
struct intel_batchbuffer batch;
drm_intel_bo *first_post_swapbuffers_batch;