anv/allocator: Make the block pool double-ended
This allows us to allocate from either side of the block pool in a consistent way. If you use the previous block_pool_alloc function, you will get offsets from the start of the pool as normal. If you use the new block_pool_alloc_back function, you will get a negative index that corresponds to something in the "back" of the pool.
This commit is contained in:
@@ -241,7 +241,7 @@ anv_ptr_free_list_push(void **list, void *elem)
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size);
|
||||
anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state);
|
||||
|
||||
void
|
||||
anv_block_pool_init(struct anv_block_pool *pool,
|
||||
@@ -252,8 +252,10 @@ anv_block_pool_init(struct anv_block_pool *pool,
|
||||
pool->device = device;
|
||||
pool->bo.gem_handle = 0;
|
||||
pool->bo.offset = 0;
|
||||
pool->bo.size = 0;
|
||||
pool->block_size = block_size;
|
||||
pool->free_list = ANV_FREE_LIST_EMPTY;
|
||||
pool->back_free_list = ANV_FREE_LIST_EMPTY;
|
||||
|
||||
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
|
||||
if (pool->fd == -1)
|
||||
@@ -269,9 +271,13 @@ anv_block_pool_init(struct anv_block_pool *pool,
|
||||
anv_vector_init(&pool->mmap_cleanups,
|
||||
round_to_power_of_two(sizeof(struct anv_mmap_cleanup)), 128);
|
||||
|
||||
/* Immediately grow the pool so we'll have a backing bo. */
|
||||
pool->state.next = 0;
|
||||
pool->state.end = anv_block_pool_grow(pool, 0);
|
||||
pool->state.end = 0;
|
||||
pool->back_state.next = 0;
|
||||
pool->back_state.end = 0;
|
||||
|
||||
/* Immediately grow the pool so we'll have a backing bo. */
|
||||
pool->state.end = anv_block_pool_grow(pool, &pool->state);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -291,8 +297,34 @@ anv_block_pool_finish(struct anv_block_pool *pool)
|
||||
close(pool->fd);
|
||||
}
|
||||
|
||||
#define PAGE_SIZE 4096
|
||||
|
||||
/** Grows and re-centers the block pool.
|
||||
*
|
||||
* We grow the block pool in one or both directions in such a way that the
|
||||
* following conditions are met:
|
||||
*
|
||||
* 1) The size of the entire pool is always a power of two.
|
||||
*
|
||||
* 2) The pool only grows on both ends. Neither end can get
|
||||
* shortened.
|
||||
*
|
||||
* 3) At the end of the allocation, we have about twice as much space
|
||||
* allocated for each end as we have used. This way the pool doesn't
|
||||
* grow too far in one direction or the other.
|
||||
*
|
||||
* 4) If the _alloc_back() has never been called, then the back portion of
|
||||
* the pool retains a size of zero. (This makes it easier for users of
|
||||
* the block pool that only want a one-sided pool.)
|
||||
*
|
||||
* 5) We have enough space allocated for at least one more block in
|
||||
* whichever side `state` points to.
|
||||
*
|
||||
* 6) The center of the pool is always aligned to both the block_size of
|
||||
* the pool and a 4K CPU page.
|
||||
*/
|
||||
static uint32_t
|
||||
anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
|
||||
anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
|
||||
{
|
||||
size_t size;
|
||||
void *map;
|
||||
@@ -301,8 +333,39 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
|
||||
|
||||
pthread_mutex_lock(&pool->device->mutex);
|
||||
|
||||
assert(state == &pool->state || state == &pool->back_state);
|
||||
|
||||
/* Gather a little usage information on the pool. Since we may have
|
||||
* threadsd waiting in queue to get some storage while we resize, it's
|
||||
* actually possible that total_used will be larger than old_size. In
|
||||
* particular, block_pool_alloc() increments state->next prior to
|
||||
* calling block_pool_grow, so this ensures that we get enough space for
|
||||
* which ever side tries to grow the pool.
|
||||
*
|
||||
* We align to a page size because it makes it easier to do our
|
||||
* calculations later in such a way that we state page-aigned.
|
||||
*/
|
||||
uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE);
|
||||
uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE);
|
||||
uint32_t total_used = front_used + back_used;
|
||||
|
||||
assert(state == &pool->state || back_used > 0);
|
||||
|
||||
size_t old_size = pool->bo.size;
|
||||
|
||||
if (old_size != 0 &&
|
||||
back_used * 2 <= pool->center_bo_offset &&
|
||||
front_used * 2 <= (old_size - pool->center_bo_offset)) {
|
||||
/* If we're in this case then this isn't the firsta allocation and we
|
||||
* already have enough space on both sides to hold double what we
|
||||
* have allocated. There's nothing for us to do.
|
||||
*/
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (old_size == 0) {
|
||||
size = 32 * pool->block_size;
|
||||
/* This is the first allocation */
|
||||
size = MAX2(32 * pool->block_size, PAGE_SIZE);
|
||||
} else {
|
||||
size = old_size * 2;
|
||||
}
|
||||
@@ -313,6 +376,35 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
|
||||
*/
|
||||
assert(size <= (1u << 31));
|
||||
|
||||
/* We compute a new center_bo_offset such that, when we double the size
|
||||
* of the pool, we maintain the ratio of how much is used by each side.
|
||||
* This way things should remain more-or-less balanced.
|
||||
*/
|
||||
uint32_t center_bo_offset;
|
||||
if (back_used == 0) {
|
||||
/* If we're in this case then we have never called alloc_back(). In
|
||||
* this case, we want keep the offset at 0 to make things as simple
|
||||
* as possible for users that don't care about back allocations.
|
||||
*/
|
||||
center_bo_offset = 0;
|
||||
} else {
|
||||
center_bo_offset = ((uint64_t)size * back_used) / total_used;
|
||||
|
||||
/* Align down to a multiple of both the block size and page size */
|
||||
uint32_t granularity = MAX2(pool->block_size, PAGE_SIZE);
|
||||
assert(util_is_power_of_two(granularity));
|
||||
center_bo_offset &= ~(granularity - 1);
|
||||
|
||||
assert(center_bo_offset >= back_used);
|
||||
}
|
||||
|
||||
assert(center_bo_offset % pool->block_size == 0);
|
||||
assert(center_bo_offset % PAGE_SIZE == 0);
|
||||
|
||||
/* Assert that we only ever grow the pool */
|
||||
assert(center_bo_offset >= pool->back_state.end);
|
||||
assert(size - center_bo_offset >= pool->back_state.end);
|
||||
|
||||
cleanup = anv_vector_add(&pool->mmap_cleanups);
|
||||
if (!cleanup)
|
||||
goto fail;
|
||||
@@ -320,7 +412,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
|
||||
|
||||
/* First try to see if mremap can grow the map in place. */
|
||||
map = MAP_FAILED;
|
||||
if (old_size > 0)
|
||||
if (old_size > 0 && center_bo_offset == 0)
|
||||
map = mremap(pool->map, old_size, size, 0);
|
||||
if (map == MAP_FAILED) {
|
||||
/* Just leak the old map until we destroy the pool. We can't munmap it
|
||||
@@ -330,7 +422,8 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
|
||||
* should try to get some numbers.
|
||||
*/
|
||||
map = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_POPULATE, pool->fd, 0);
|
||||
MAP_SHARED | MAP_POPULATE, pool->fd,
|
||||
BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
|
||||
cleanup->map = map;
|
||||
cleanup->size = size;
|
||||
}
|
||||
@@ -344,18 +437,30 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
|
||||
|
||||
/* Now that we successfull allocated everything, we can write the new
|
||||
* values back into pool. */
|
||||
pool->map = map;
|
||||
pool->map = map + center_bo_offset;
|
||||
pool->center_bo_offset = center_bo_offset;
|
||||
pool->bo.gem_handle = gem_handle;
|
||||
pool->bo.size = size;
|
||||
pool->bo.map = map;
|
||||
pool->bo.index = 0;
|
||||
|
||||
done:
|
||||
pthread_mutex_unlock(&pool->device->mutex);
|
||||
|
||||
return size;
|
||||
/* Return the appropreate new size. This function never actually
|
||||
* updates state->next. Instead, we let the caller do that because it
|
||||
* needs to do so in order to maintain its concurrency model.
|
||||
*/
|
||||
if (state == &pool->state) {
|
||||
return pool->bo.size - pool->center_bo_offset;
|
||||
} else {
|
||||
assert(pool->center_bo_offset > 0);
|
||||
return pool->center_bo_offset;
|
||||
}
|
||||
|
||||
fail:
|
||||
pthread_mutex_unlock(&pool->device->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -372,12 +477,12 @@ anv_block_pool_alloc_new(struct anv_block_pool *pool,
|
||||
return state.next;
|
||||
} else if (state.next == state.end) {
|
||||
/* We allocated the first block outside the pool, we have to grow it.
|
||||
* pool->next_block acts a mutex: threads who try to allocate now will
|
||||
* pool_state->next acts a mutex: threads who try to allocate now will
|
||||
* get block indexes above the current limit and hit futex_wait
|
||||
* below. */
|
||||
new.next = state.next + pool->block_size;
|
||||
new.end = anv_block_pool_grow(pool, state.end);
|
||||
assert(new.end > 0);
|
||||
new.end = anv_block_pool_grow(pool, pool_state);
|
||||
assert(new.end >= new.next && new.end % pool->block_size == 0);
|
||||
old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
|
||||
if (old.next != state.next)
|
||||
futex_wake(&pool_state->end, INT_MAX);
|
||||
@@ -389,7 +494,7 @@ anv_block_pool_alloc_new(struct anv_block_pool *pool,
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t
|
||||
int32_t
|
||||
anv_block_pool_alloc(struct anv_block_pool *pool)
|
||||
{
|
||||
int32_t offset;
|
||||
@@ -404,10 +509,46 @@ anv_block_pool_alloc(struct anv_block_pool *pool)
|
||||
return anv_block_pool_alloc_new(pool, &pool->state);
|
||||
}
|
||||
|
||||
void
|
||||
anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset)
|
||||
/* Allocates a block out of the back of the block pool.
|
||||
*
|
||||
* This will allocated a block earlier than the "start" of the block pool.
|
||||
* The offsets returned from this function will be negative but will still
|
||||
* be correct relative to the block pool's map pointer.
|
||||
*
|
||||
* If you ever use anv_block_pool_alloc_back, then you will have to do
|
||||
* gymnastics with the block pool's BO when doing relocations.
|
||||
*/
|
||||
int32_t
|
||||
anv_block_pool_alloc_back(struct anv_block_pool *pool)
|
||||
{
|
||||
anv_free_list_push(&pool->free_list, pool->map, offset);
|
||||
int32_t offset;
|
||||
|
||||
/* Try free list first. */
|
||||
if (anv_free_list_pop(&pool->back_free_list, &pool->map, &offset)) {
|
||||
assert(offset < 0);
|
||||
assert(pool->map);
|
||||
return offset;
|
||||
}
|
||||
|
||||
offset = anv_block_pool_alloc_new(pool, &pool->back_state);
|
||||
|
||||
/* The offset we get out of anv_block_pool_alloc_new() is actually the
|
||||
* number of bytes downwards from the middle to the end of the block.
|
||||
* We need to turn it into a (negative) offset from the middle to the
|
||||
* start of the block.
|
||||
*/
|
||||
assert(offset >= 0);
|
||||
return -(offset + pool->block_size);
|
||||
}
|
||||
|
||||
void
|
||||
anv_block_pool_free(struct anv_block_pool *pool, int32_t offset)
|
||||
{
|
||||
if (offset < 0) {
|
||||
anv_free_list_push(&pool->back_free_list, pool->map, offset);
|
||||
} else {
|
||||
anv_free_list_push(&pool->free_list, pool->map, offset);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -253,6 +253,25 @@ struct anv_block_pool {
|
||||
struct anv_device *device;
|
||||
|
||||
struct anv_bo bo;
|
||||
|
||||
/* Offset from the start of the memfd to the "center" of the block pool. */
|
||||
uint32_t center_fd_offset;
|
||||
|
||||
/* The offset from the start of the bo to the "center" of the block
|
||||
* pool. Pointers to allocated blocks are given by
|
||||
* bo.map + center_bo_offset + offsets.
|
||||
*/
|
||||
uint32_t center_bo_offset;
|
||||
|
||||
/* Current memory map of the block pool. This pointer may or may not
|
||||
* point to the actual beginning of the block pool memory. If
|
||||
* anv_block_pool_alloc_back has ever been called, then this pointer
|
||||
* will point to the "center" position of the buffer and all offsets
|
||||
* (negative or positive) given out by the block pool alloc functions
|
||||
* will be valid relative to this pointer.
|
||||
*
|
||||
* In particular, map == bo.map + center_offset
|
||||
*/
|
||||
void *map;
|
||||
int fd;
|
||||
|
||||
@@ -266,15 +285,23 @@ struct anv_block_pool {
|
||||
|
||||
union anv_free_list free_list;
|
||||
struct anv_block_state state;
|
||||
|
||||
union anv_free_list back_free_list;
|
||||
struct anv_block_state back_state;
|
||||
};
|
||||
|
||||
/* Block pools are backed by a fixed-size 2GB memfd */
|
||||
#define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
|
||||
|
||||
/* The center of the block pool is also the middle of the memfd. This may
|
||||
* change in the future if we decide differently for some reason.
|
||||
*/
|
||||
#define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
|
||||
|
||||
static inline uint32_t
|
||||
anv_block_pool_size(struct anv_block_pool *pool)
|
||||
{
|
||||
return pool->state.end;
|
||||
return pool->state.end + pool->back_state.end;
|
||||
}
|
||||
|
||||
struct anv_state {
|
||||
@@ -309,8 +336,9 @@ struct anv_state_stream {
|
||||
void anv_block_pool_init(struct anv_block_pool *pool,
|
||||
struct anv_device *device, uint32_t block_size);
|
||||
void anv_block_pool_finish(struct anv_block_pool *pool);
|
||||
uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
|
||||
void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
|
||||
int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
|
||||
int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
|
||||
void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
|
||||
void anv_state_pool_init(struct anv_state_pool *pool,
|
||||
struct anv_block_pool *block_pool);
|
||||
void anv_state_pool_finish(struct anv_state_pool *pool);
|
||||
|
@@ -34,14 +34,18 @@ struct job {
|
||||
unsigned id;
|
||||
struct anv_block_pool *pool;
|
||||
uint32_t blocks[BLOCKS_PER_THREAD];
|
||||
uint32_t back_blocks[BLOCKS_PER_THREAD];
|
||||
} jobs[NUM_THREADS];
|
||||
|
||||
|
||||
static void *alloc_blocks(void *_job)
|
||||
{
|
||||
struct job *job = _job;
|
||||
|
||||
for (unsigned i = 0; i < BLOCKS_PER_THREAD; i++)
|
||||
for (unsigned i = 0; i < BLOCKS_PER_THREAD; i++) {
|
||||
job->blocks[i] = anv_block_pool_alloc(job->pool);
|
||||
job->back_blocks[i] = -anv_block_pool_alloc_back(job->pool);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -98,10 +102,15 @@ static void run_test()
|
||||
for (unsigned i = 0; i < NUM_THREADS; i++)
|
||||
pthread_join(jobs[i].thread, NULL);
|
||||
|
||||
/* Validate that the block allocations were monotonic */
|
||||
uint32_t *block_ptrs[NUM_THREADS];
|
||||
for (unsigned i = 0; i < NUM_THREADS; i++)
|
||||
block_ptrs[i] = jobs[i].blocks;
|
||||
validate_monotonic(block_ptrs);
|
||||
|
||||
/* Validate that the back block allocations were monotonic */
|
||||
for (unsigned i = 0; i < NUM_THREADS; i++)
|
||||
block_ptrs[i] = jobs[i].back_blocks;
|
||||
validate_monotonic(block_ptrs);
|
||||
|
||||
anv_block_pool_finish(&pool);
|
||||
|
Reference in New Issue
Block a user