anv/allocator: Take the device mutex when growing a block pool

We don't have any locking issues yet because we use the pool size itself as
a mutex in block_pool_alloc to guarantee that only one thread is resizing
at a time.  However, we are about to add support for growing the block pool
at both ends.  This introduces two potential races:

 1) You could have two block_pool_alloc() calls that both try to grow the
    block pool, one from each end.

 2) The relocation handling code will now have to think about not only the
    bo that we use for the block pool but also the offset from the start of
    that bo to the center of the block pool.  It's possible that the block
    pool growing code could race with the relocation handling code and get
    a bo and offset out of sync.

Grabbing the device mutex solves both of these problems.  Thanks to (2), we
can't really do anything more granular.
This commit is contained in:
Jason Ekstrand
2015-09-15 14:52:26 -07:00
parent 222ddac810
commit 74bf7aa07c
5 changed files with 23 additions and 5 deletions

View File

@@ -287,6 +287,8 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
int gem_handle; int gem_handle;
struct anv_mmap_cleanup *cleanup; struct anv_mmap_cleanup *cleanup;
pthread_mutex_lock(&pool->device->mutex);
if (old_size == 0) { if (old_size == 0) {
size = 32 * pool->block_size; size = 32 * pool->block_size;
} else { } else {
@@ -295,17 +297,17 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
cleanup = anv_vector_add(&pool->mmap_cleanups); cleanup = anv_vector_add(&pool->mmap_cleanups);
if (!cleanup) if (!cleanup)
return 0; goto fail;
*cleanup = ANV_MMAP_CLEANUP_INIT; *cleanup = ANV_MMAP_CLEANUP_INIT;
if (old_size == 0) if (old_size == 0)
pool->fd = memfd_create("block pool", MFD_CLOEXEC); pool->fd = memfd_create("block pool", MFD_CLOEXEC);
if (pool->fd == -1) if (pool->fd == -1)
return 0; goto fail;
if (ftruncate(pool->fd, size) == -1) if (ftruncate(pool->fd, size) == -1)
return 0; goto fail;
/* First try to see if mremap can grow the map in place. */ /* First try to see if mremap can grow the map in place. */
map = MAP_FAILED; map = MAP_FAILED;
@@ -324,11 +326,11 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
cleanup->size = size; cleanup->size = size;
} }
if (map == MAP_FAILED) if (map == MAP_FAILED)
return 0; goto fail;
gem_handle = anv_gem_userptr(pool->device, map, size); gem_handle = anv_gem_userptr(pool->device, map, size);
if (gem_handle == 0) if (gem_handle == 0)
return 0; goto fail;
cleanup->gem_handle = gem_handle; cleanup->gem_handle = gem_handle;
/* Now that we successfull allocated everything, we can write the new /* Now that we successfull allocated everything, we can write the new
@@ -339,7 +341,13 @@ anv_block_pool_grow(struct anv_block_pool *pool, uint32_t old_size)
pool->bo.map = map; pool->bo.map = map;
pool->bo.index = 0; pool->bo.index = 0;
pthread_mutex_unlock(&pool->device->mutex);
return size; return size;
fail:
pthread_mutex_unlock(&pool->device->mutex);
return 0;
} }
uint32_t uint32_t

View File

@@ -51,6 +51,7 @@ static void run_test()
struct anv_device device; struct anv_device device;
struct anv_block_pool pool; struct anv_block_pool pool;
pthread_mutex_init(&device.mutex, NULL);
anv_block_pool_init(&pool, &device, 16); anv_block_pool_init(&pool, &device, 16);
for (unsigned i = 0; i < NUM_THREADS; i++) { for (unsigned i = 0; i < NUM_THREADS; i++) {
@@ -95,6 +96,7 @@ static void run_test()
} }
anv_block_pool_finish(&pool); anv_block_pool_finish(&pool);
pthread_mutex_destroy(&device.mutex);
} }
int main(int argc, char **argv) int main(int argc, char **argv)

View File

@@ -38,6 +38,8 @@ int main(int argc, char **argv)
struct anv_block_pool block_pool; struct anv_block_pool block_pool;
struct anv_state_pool state_pool; struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
for (unsigned i = 0; i < NUM_RUNS; i++) { for (unsigned i = 0; i < NUM_RUNS; i++) {
anv_block_pool_init(&block_pool, &device, 256); anv_block_pool_init(&block_pool, &device, 256);
anv_state_pool_init(&state_pool, &block_pool); anv_state_pool_init(&state_pool, &block_pool);
@@ -50,4 +52,6 @@ int main(int argc, char **argv)
anv_state_pool_finish(&state_pool); anv_state_pool_finish(&state_pool);
anv_block_pool_finish(&block_pool); anv_block_pool_finish(&block_pool);
} }
pthread_mutex_destroy(&device.mutex);
} }

View File

@@ -37,6 +37,7 @@ int main(int argc, char **argv)
struct anv_block_pool block_pool; struct anv_block_pool block_pool;
struct anv_state_pool state_pool; struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
anv_block_pool_init(&block_pool, &device, 4096); anv_block_pool_init(&block_pool, &device, 4096);
anv_state_pool_init(&state_pool, &block_pool); anv_state_pool_init(&state_pool, &block_pool);
@@ -61,4 +62,5 @@ int main(int argc, char **argv)
anv_state_pool_finish(&state_pool); anv_state_pool_finish(&state_pool);
anv_block_pool_finish(&block_pool); anv_block_pool_finish(&block_pool);
pthread_mutex_destroy(&device.mutex);
} }

View File

@@ -58,6 +58,7 @@ static void run_test()
struct anv_block_pool block_pool; struct anv_block_pool block_pool;
struct anv_state_pool state_pool; struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
anv_block_pool_init(&block_pool, &device, 64); anv_block_pool_init(&block_pool, &device, 64);
anv_state_pool_init(&state_pool, &block_pool); anv_state_pool_init(&state_pool, &block_pool);
@@ -106,6 +107,7 @@ static void run_test()
anv_state_pool_finish(&state_pool); anv_state_pool_finish(&state_pool);
anv_block_pool_finish(&block_pool); anv_block_pool_finish(&block_pool);
pthread_mutex_destroy(&device.mutex);
} }
int main(int argc, char **argv) int main(int argc, char **argv)