iris: bufmgr updates.
Drop BO_ALLOC_BUSY (best not to hand people a loaded gun...) Drop vestiges of alignment
This commit is contained in:
@@ -95,7 +95,7 @@ create_batch_buffer(struct iris_bufmgr *bufmgr,
|
|||||||
struct iris_batch_buffer *buf,
|
struct iris_batch_buffer *buf,
|
||||||
const char *name, unsigned size)
|
const char *name, unsigned size)
|
||||||
{
|
{
|
||||||
buf->bo = iris_bo_alloc(bufmgr, name, size, 4096);
|
buf->bo = iris_bo_alloc(bufmgr, name, size);
|
||||||
buf->bo->kflags |= EXEC_OBJECT_CAPTURE;
|
buf->bo->kflags |= EXEC_OBJECT_CAPTURE;
|
||||||
buf->map = iris_bo_map(NULL, buf->bo, MAP_READ | MAP_WRITE);
|
buf->map = iris_bo_map(NULL, buf->bo, MAP_READ | MAP_WRITE);
|
||||||
buf->map_next = buf->map;
|
buf->map_next = buf->map;
|
||||||
@@ -164,7 +164,6 @@ add_exec_bo(struct iris_batch *batch, struct iris_bo *bo)
|
|||||||
batch->validation_list[batch->exec_count] =
|
batch->validation_list[batch->exec_count] =
|
||||||
(struct drm_i915_gem_exec_object2) {
|
(struct drm_i915_gem_exec_object2) {
|
||||||
.handle = bo->gem_handle,
|
.handle = bo->gem_handle,
|
||||||
.alignment = bo->align,
|
|
||||||
.offset = bo->gtt_offset,
|
.offset = bo->gtt_offset,
|
||||||
.flags = bo->kflags,
|
.flags = bo->kflags,
|
||||||
};
|
};
|
||||||
@@ -300,7 +299,7 @@ grow_buffer(struct iris_batch *batch,
|
|||||||
const unsigned existing_bytes = buffer_bytes_used(buf);
|
const unsigned existing_bytes = buffer_bytes_used(buf);
|
||||||
|
|
||||||
struct iris_bo *new_bo =
|
struct iris_bo *new_bo =
|
||||||
iris_bo_alloc(bufmgr, bo->name, new_size, bo->align);
|
iris_bo_alloc(bufmgr, bo->name, new_size);
|
||||||
|
|
||||||
buf->map = iris_bo_map(NULL, new_bo, MAP_READ | MAP_WRITE);
|
buf->map = iris_bo_map(NULL, new_bo, MAP_READ | MAP_WRITE);
|
||||||
buf->map_next = buf->map + existing_bytes;
|
buf->map_next = buf->map + existing_bytes;
|
||||||
|
@@ -239,7 +239,7 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
|
|||||||
uint64_t size,
|
uint64_t size,
|
||||||
unsigned flags,
|
unsigned flags,
|
||||||
uint32_t tiling_mode,
|
uint32_t tiling_mode,
|
||||||
uint32_t stride, uint64_t alignment)
|
uint32_t stride)
|
||||||
{
|
{
|
||||||
struct iris_bo *bo;
|
struct iris_bo *bo;
|
||||||
unsigned int page_size = getpagesize();
|
unsigned int page_size = getpagesize();
|
||||||
@@ -247,20 +247,11 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
|
|||||||
struct bo_cache_bucket *bucket;
|
struct bo_cache_bucket *bucket;
|
||||||
bool alloc_from_cache;
|
bool alloc_from_cache;
|
||||||
uint64_t bo_size;
|
uint64_t bo_size;
|
||||||
bool busy = false;
|
|
||||||
bool zeroed = false;
|
bool zeroed = false;
|
||||||
|
|
||||||
if (flags & BO_ALLOC_BUSY)
|
|
||||||
busy = true;
|
|
||||||
|
|
||||||
if (flags & BO_ALLOC_ZEROED)
|
if (flags & BO_ALLOC_ZEROED)
|
||||||
zeroed = true;
|
zeroed = true;
|
||||||
|
|
||||||
/* BUSY does doesn't really jive with ZEROED as we have to wait for it to
|
|
||||||
* be idle before we can memset. Just disallow that combination.
|
|
||||||
*/
|
|
||||||
assert(!(busy && zeroed));
|
|
||||||
|
|
||||||
/* Round the allocated size up to a power of two number of pages. */
|
/* Round the allocated size up to a power of two number of pages. */
|
||||||
bucket = bucket_for_size(bufmgr, size);
|
bucket = bucket_for_size(bufmgr, size);
|
||||||
|
|
||||||
@@ -280,31 +271,13 @@ bo_alloc_internal(struct iris_bufmgr *bufmgr,
|
|||||||
retry:
|
retry:
|
||||||
alloc_from_cache = false;
|
alloc_from_cache = false;
|
||||||
if (bucket != NULL && !list_empty(&bucket->head)) {
|
if (bucket != NULL && !list_empty(&bucket->head)) {
|
||||||
if (busy && !zeroed) {
|
/* If the last BO in the cache is idle, then reuse it. Otherwise,
|
||||||
/* Allocate new render-target BOs from the tail (MRU)
|
* allocate a fresh buffer to avoid stalling.
|
||||||
* of the list, as it will likely be hot in the GPU
|
*/
|
||||||
* cache and in the aperture for us. If the caller
|
bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
|
||||||
* asked us to zero the buffer, we don't want this
|
if (!iris_bo_busy(bo)) {
|
||||||
* because we are going to mmap it.
|
|
||||||
*/
|
|
||||||
bo = LIST_ENTRY(struct iris_bo, bucket->head.prev, head);
|
|
||||||
list_del(&bo->head);
|
|
||||||
alloc_from_cache = true;
|
alloc_from_cache = true;
|
||||||
bo->align = alignment;
|
list_del(&bo->head);
|
||||||
} else {
|
|
||||||
assert(alignment == 0);
|
|
||||||
/* For non-render-target BOs (where we're probably
|
|
||||||
* going to map it first thing in order to fill it
|
|
||||||
* with data), check if the last BO in the cache is
|
|
||||||
* unbusy, and only reuse in that case. Otherwise,
|
|
||||||
* allocating a new buffer is probably faster than
|
|
||||||
* waiting for the GPU to finish.
|
|
||||||
*/
|
|
||||||
bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
|
|
||||||
if (!iris_bo_busy(bo)) {
|
|
||||||
alloc_from_cache = true;
|
|
||||||
list_del(&bo->head);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (alloc_from_cache) {
|
if (alloc_from_cache) {
|
||||||
@@ -352,7 +325,6 @@ retry:
|
|||||||
bo->gem_handle = create.handle;
|
bo->gem_handle = create.handle;
|
||||||
|
|
||||||
bo->bufmgr = bufmgr;
|
bo->bufmgr = bufmgr;
|
||||||
bo->align = alignment;
|
|
||||||
|
|
||||||
bo->tiling_mode = I915_TILING_NONE;
|
bo->tiling_mode = I915_TILING_NONE;
|
||||||
bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
|
bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
|
||||||
@@ -397,17 +369,18 @@ err:
|
|||||||
|
|
||||||
struct iris_bo *
|
struct iris_bo *
|
||||||
iris_bo_alloc(struct iris_bufmgr *bufmgr,
|
iris_bo_alloc(struct iris_bufmgr *bufmgr,
|
||||||
const char *name, uint64_t size, uint64_t alignment)
|
const char *name,
|
||||||
|
uint64_t size)
|
||||||
{
|
{
|
||||||
return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
|
return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct iris_bo *
|
struct iris_bo *
|
||||||
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
|
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
|
||||||
uint64_t size, uint32_t tiling_mode, uint32_t pitch,
|
uint64_t size, uint32_t tiling_mode, uint32_t pitch,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch, 0);
|
return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -418,7 +391,7 @@ iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
|
|||||||
*/
|
*/
|
||||||
struct iris_bo *
|
struct iris_bo *
|
||||||
iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
|
iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
|
||||||
const char *name, unsigned int handle)
|
const char *name, unsigned int handle)
|
||||||
{
|
{
|
||||||
struct iris_bo *bo;
|
struct iris_bo *bo;
|
||||||
|
|
||||||
|
@@ -44,13 +44,6 @@ struct iris_bo {
|
|||||||
*/
|
*/
|
||||||
uint64_t size;
|
uint64_t size;
|
||||||
|
|
||||||
/**
|
|
||||||
* Alignment requirement for object
|
|
||||||
*
|
|
||||||
* Used for GTT mapping & pinning the object.
|
|
||||||
*/
|
|
||||||
uint64_t align;
|
|
||||||
|
|
||||||
/** Buffer manager context associated with this buffer object */
|
/** Buffer manager context associated with this buffer object */
|
||||||
struct iris_bufmgr *bufmgr;
|
struct iris_bufmgr *bufmgr;
|
||||||
|
|
||||||
@@ -152,8 +145,7 @@ struct iris_bo {
|
|||||||
bool cache_coherent;
|
bool cache_coherent;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define BO_ALLOC_BUSY (1<<0)
|
#define BO_ALLOC_ZEROED (1<<0)
|
||||||
#define BO_ALLOC_ZEROED (1<<1)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate a buffer object.
|
* Allocate a buffer object.
|
||||||
@@ -162,8 +154,9 @@ struct iris_bo {
|
|||||||
* address space or graphics device aperture. They must be mapped
|
* address space or graphics device aperture. They must be mapped
|
||||||
* using iris_bo_map() to be used by the CPU.
|
* using iris_bo_map() to be used by the CPU.
|
||||||
*/
|
*/
|
||||||
struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr, const char *name,
|
struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
|
||||||
uint64_t size, uint64_t alignment);
|
const char *name,
|
||||||
|
uint64_t size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate a tiled buffer object.
|
* Allocate a tiled buffer object.
|
||||||
|
Reference in New Issue
Block a user