i915: Drop all has_llc code.

i915 never has llc.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Eric Anholt
2013-06-20 15:03:19 -07:00
committed by Kenneth Graunke
parent be63c1c993
commit d71b7301ec
8 changed files with 10 additions and 226 deletions

View File

@@ -40,10 +40,8 @@ intel_batchbuffer_init(struct intel_context *intel)
{ {
intel_batchbuffer_reset(intel); intel_batchbuffer_reset(intel);
if (!intel->has_llc) { intel->batch.cpu_map = malloc(intel->maxBatchSize);
intel->batch.cpu_map = malloc(intel->maxBatchSize); intel->batch.map = intel->batch.cpu_map;
intel->batch.map = intel->batch.cpu_map;
}
} }
static void static void
@@ -57,10 +55,6 @@ intel_batchbuffer_reset(struct intel_context *intel)
intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer", intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
intel->maxBatchSize, 4096); intel->maxBatchSize, 4096);
if (intel->has_llc) {
drm_intel_bo_map(intel->batch.bo, true);
intel->batch.map = intel->batch.bo->virtual;
}
intel->batch.reserved_space = BATCH_RESERVED; intel->batch.reserved_space = BATCH_RESERVED;
intel->batch.state_batch_offset = intel->batch.bo->size; intel->batch.state_batch_offset = intel->batch.bo->size;
@@ -124,16 +118,12 @@ do_flush_locked(struct intel_context *intel)
struct intel_batchbuffer *batch = &intel->batch; struct intel_batchbuffer *batch = &intel->batch;
int ret = 0; int ret = 0;
if (intel->has_llc) { ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
drm_intel_bo_unmap(batch->bo); if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
} else { ret = drm_intel_bo_subdata(batch->bo,
ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map); batch->state_batch_offset,
if (ret == 0 && batch->state_batch_offset != batch->bo->size) { batch->bo->size - batch->state_batch_offset,
ret = drm_intel_bo_subdata(batch->bo, (char *)batch->map + batch->state_batch_offset);
batch->state_batch_offset,
batch->bo->size - batch->state_batch_offset,
(char *)batch->map + batch->state_batch_offset);
}
} }
if (!intel->intelScreen->no_hw) { if (!intel->intelScreen->no_hw) {

View File

@@ -493,7 +493,6 @@ intelInitContext(struct intel_context *intel,
intel->is_945 = true; intel->is_945 = true;
} }
intel->has_llc = intel->intelScreen->hw_has_llc;
intel->has_swizzling = intel->intelScreen->hw_has_swizzling; intel->has_swizzling = intel->intelScreen->hw_has_swizzling;
memset(&ctx->TextureFormatSupported, memset(&ctx->TextureFormatSupported,

View File

@@ -882,14 +882,8 @@ intel_miptree_map(struct intel_context *intel,
} }
/* See intel_miptree_blit() for details on the 32k pitch limit. */ /* See intel_miptree_blit() for details on the 32k pitch limit. */
if (intel->has_llc && if (mt->region->tiling != I915_TILING_NONE &&
!(mode & GL_MAP_WRITE_BIT) && mt->region->bo->size >= intel->max_gtt_map_object_size) {
!mt->compressed &&
mt->region->tiling == I915_TILING_X &&
mt->region->pitch < 32768) {
intel_miptree_map_blit(intel, mt, map, level, slice);
} else if (mt->region->tiling != I915_TILING_NONE &&
mt->region->bo->size >= intel->max_gtt_map_object_size) {
assert(mt->region->pitch < 32768); assert(mt->region->pitch < 32768);
intel_miptree_map_blit(intel, mt, map, level, slice); intel_miptree_map_blit(intel, mt, map, level, slice);
} else { } else {

View File

@@ -1171,12 +1171,6 @@ __DRIconfig **intelInitScreen2(__DRIscreen *psp)
intelScreen->gen = 2; intelScreen->gen = 2;
} }
int has_llc = 0;
bool success = intel_get_param(intelScreen->driScrnPriv, I915_PARAM_HAS_LLC,
&has_llc);
if (success && has_llc)
intelScreen->hw_has_llc = true;
intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen); intelScreen->hw_has_swizzling = intel_detect_swizzling(intelScreen);
set_max_gl_versions(intelScreen); set_max_gl_versions(intelScreen);

View File

@@ -49,7 +49,6 @@ struct intel_screen
bool no_hw; bool no_hw;
bool hw_has_llc;
bool hw_has_swizzling; bool hw_has_swizzling;
bool no_vbo; bool no_vbo;

View File

@@ -68,15 +68,4 @@ bool
intel_tex_image_s8z24_create_renderbuffers(struct intel_context *intel, intel_tex_image_s8z24_create_renderbuffers(struct intel_context *intel,
struct intel_texture_image *image); struct intel_texture_image *image);
bool
intel_texsubimage_tiled_memcpy(struct gl_context *ctx,
GLuint dims,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth,
GLenum format, GLenum type,
const GLvoid *pixels,
const struct gl_pixelstore_attrib *packing,
bool for_glTexImage);
#endif #endif

View File

@@ -187,22 +187,10 @@ intelTexImage(struct gl_context * ctx,
GLenum format, GLenum type, const void *pixels, GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *unpack) const struct gl_pixelstore_attrib *unpack)
{ {
bool ok;
DBG("%s target %s level %d %dx%dx%d\n", __FUNCTION__, DBG("%s target %s level %d %dx%dx%d\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(texImage->TexObject->Target), _mesa_lookup_enum_by_nr(texImage->TexObject->Target),
texImage->Level, texImage->Width, texImage->Height, texImage->Depth); texImage->Level, texImage->Width, texImage->Height, texImage->Depth);
ok = intel_texsubimage_tiled_memcpy(ctx, dims, texImage,
0, 0, 0, /*x,y,z offsets*/
texImage->Width,
texImage->Height,
texImage->Depth,
format, type, pixels, unpack,
true /*for_glTexImage*/);
if (ok)
return;
/* Attempt to use the blitter for PBO image uploads. /* Attempt to use the blitter for PBO image uploads.
*/ */
if (dims <= 2 && if (dims <= 2 &&

View File

@@ -126,165 +126,6 @@ err:
return false; return false;
} }
/**
* \brief A fast path for glTexImage and glTexSubImage.
*
* \param for_glTexImage Was this called from glTexImage or glTexSubImage?
*
* This fast path is taken when the hardware natively supports the texture
* format (such as GL_BGRA) and when the texture memory is X-tiled. It uploads
* the texture data by mapping the texture memory without a GTT fence, thus
* acquiring a tiled view of the memory, and then memcpy'ing sucessive
* subspans within each tile.
*
* This is a performance win over the conventional texture upload path because
* it avoids the performance penalty of writing through the write-combine
* buffer. In the conventional texture upload path,
* texstore.c:store_texsubimage(), the texture memory is mapped through a GTT
* fence, thus acquiring a linear view of the memory, then each row in the
* image is memcpy'd. In this fast path, we replace each row's memcpy with
* a sequence of memcpy's over each bit6 swizzle span in the row.
*
* This fast path's use case is Google Chrome's paint rectangles. Chrome (as
* of version 21) renders each page as a tiling of 256x256 GL_BGRA textures.
* Each page's content is initially uploaded with glTexImage2D and damaged
* regions are updated with glTexSubImage2D. On some workloads, the
* performance gain of this fastpath on Sandybridge is over 5x.
*/
bool
intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
GLuint dims,
struct gl_texture_image *texImage,
GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth,
GLenum format, GLenum type,
const GLvoid *pixels,
const struct gl_pixelstore_attrib *packing,
bool for_glTexImage)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *image = intel_texture_image(texImage);
/* The miptree's buffer. */
drm_intel_bo *bo;
int error = 0;
/* This fastpath is restricted to a specific texture type: level 0 of
* a 2D BGRA texture. It could be generalized to support more types by
* varying the arithmetic loop below.
*/
if (!intel->has_llc ||
format != GL_BGRA ||
type != GL_UNSIGNED_BYTE ||
texImage->TexFormat != MESA_FORMAT_ARGB8888 ||
texImage->TexObject->Target != GL_TEXTURE_2D ||
texImage->Level != 0 ||
pixels == NULL ||
_mesa_is_bufferobj(packing->BufferObj) ||
packing->Alignment > 4 ||
packing->SkipPixels > 0 ||
packing->SkipRows > 0 ||
(packing->RowLength != 0 && packing->RowLength != width) ||
packing->SwapBytes ||
packing->LsbFirst ||
packing->Invert)
return false;
if (for_glTexImage)
ctx->Driver.AllocTextureImageBuffer(ctx, texImage);
if (!image->mt ||
image->mt->region->tiling != I915_TILING_X) {
/* The algorithm below is written only for X-tiled memory. */
return false;
}
bo = image->mt->region->bo;
if (drm_intel_bo_references(intel->batch.bo, bo)) {
perf_debug("Flushing before mapping a referenced bo.\n");
intel_batchbuffer_flush(intel);
}
if (unlikely(intel->perf_debug)) {
if (drm_intel_bo_busy(bo)) {
perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
}
}
error = drm_intel_bo_map(bo, true /*write_enable*/);
if (error || bo->virtual == NULL) {
DBG("%s: failed to map bo\n", __FUNCTION__);
return false;
}
/* We postponed printing this message until having committed to executing
* the function.
*/
DBG("%s: level=%d offset=(%d,%d) (w,h)=(%d,%d)\n",
__FUNCTION__, texImage->Level, xoffset, yoffset, width, height);
/* In the tiling algorithm below, some variables are in units of pixels,
* others are in units of bytes, and others (such as height) are unitless.
* Each variable name is suffixed with its units.
*/
const uint32_t x_max_pixels = xoffset + width;
const uint32_t y_max_pixels = yoffset + height;
const uint32_t tile_size_bytes = 4096;
const uint32_t tile_width_bytes = 512;
const uint32_t tile_width_pixels = 128;
const uint32_t tile_height = 8;
const uint32_t cpp = 4; /* chars per pixel of GL_BGRA */
const uint32_t swizzle_width_pixels = 16;
const uint32_t stride_bytes = image->mt->region->pitch;
const uint32_t width_tiles = stride_bytes / tile_width_bytes;
for (uint32_t y_pixels = yoffset; y_pixels < y_max_pixels; ++y_pixels) {
const uint32_t y_offset_bytes = (y_pixels / tile_height) * width_tiles * tile_size_bytes
+ (y_pixels % tile_height) * tile_width_bytes;
for (uint32_t x_pixels = xoffset; x_pixels < x_max_pixels; x_pixels += swizzle_width_pixels) {
const uint32_t x_offset_bytes = (x_pixels / tile_width_pixels) * tile_size_bytes
+ (x_pixels % tile_width_pixels) * cpp;
intptr_t offset_bytes = y_offset_bytes + x_offset_bytes;
if (intel->has_swizzling) {
#if 0
/* Clear, unoptimized version. */
bool bit6 = (offset_bytes >> 6) & 1;
bool bit9 = (offset_bytes >> 9) & 1;
bool bit10 = (offset_bytes >> 10) & 1;
if (bit9 ^ bit10)
offset_bytes ^= (1 << 6);
#else
/* Optimized, obfuscated version. */
offset_bytes ^= ((offset_bytes >> 3) ^ (offset_bytes >> 4))
& (1 << 6);
#endif
}
const uint32_t swizzle_bound_pixels = ALIGN(x_pixels + 1, swizzle_width_pixels);
const uint32_t memcpy_bound_pixels = MIN2(x_max_pixels, swizzle_bound_pixels);
const uint32_t copy_size = cpp * (memcpy_bound_pixels - x_pixels);
memcpy(bo->virtual + offset_bytes, pixels, copy_size);
pixels += copy_size;
x_pixels -= (x_pixels % swizzle_width_pixels);
}
}
drm_intel_bo_unmap(bo);
return true;
}
static void static void
intelTexSubImage(struct gl_context * ctx, intelTexSubImage(struct gl_context * ctx,
GLuint dims, GLuint dims,
@@ -295,16 +136,6 @@ intelTexSubImage(struct gl_context * ctx,
const GLvoid * pixels, const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing) const struct gl_pixelstore_attrib *packing)
{ {
bool ok;
ok = intel_texsubimage_tiled_memcpy(ctx, dims, texImage,
xoffset, yoffset, zoffset,
width, height, depth,
format, type, pixels, packing,
false /*for_glTexImage*/);
if (ok)
return;
/* The intel_blit_texsubimage() function only handles 2D images */ /* The intel_blit_texsubimage() function only handles 2D images */
if (dims != 2 || !intel_blit_texsubimage(ctx, texImage, if (dims != 2 || !intel_blit_texsubimage(ctx, texImage,
xoffset, yoffset, xoffset, yoffset,