anv: Make anv_vma_alloc/free a lot dumber

All they do now is take a size, align, and flags and figure out which
heap to allocate in.  All of the actual code to deal with the BO is in
anv_allocator.c.  We want to leave anv_vma_alloc/free in anv_device.c
because it deals with API-exposed heaps so it still makes sense to have
it there.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3519>
This commit is contained in:
Jason Ekstrand
2020-01-22 16:40:13 -06:00
committed by Marge Bot
parent fd0f9d1196
commit b29cf7daf3
3 changed files with 80 additions and 69 deletions

View File

@@ -1521,6 +1521,18 @@ anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
return bo_flags;
}
static uint32_t
anv_device_get_bo_align(struct anv_device *device)
{
/* Gen12 CCS surface addresses need to be 64K aligned. We have no way of
* telling what this allocation is for so pick the largest alignment.
*/
if (device->info.gen >= 12)
return 64 * 1024;
return 4096;
}
VkResult
anv_device_alloc_bo(struct anv_device *device,
uint64_t size,
@@ -1535,6 +1547,8 @@ anv_device_alloc_bo(struct anv_device *device,
/* The kernel is going to give us whole pages anyway */
size = align_u64(size, 4096);
const uint32_t align = anv_device_get_bo_align(device);
uint32_t gem_handle = anv_gem_create(device, size);
if (gem_handle == 0)
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
@@ -1581,14 +1595,18 @@ anv_device_alloc_bo(struct anv_device *device,
if (alloc_flags & ANV_BO_ALLOC_FIXED_ADDRESS) {
new_bo.has_fixed_address = true;
new_bo.offset = explicit_address;
} else {
if (!anv_vma_alloc(device, &new_bo, explicit_address)) {
} else if (new_bo.flags & EXEC_OBJECT_PINNED) {
new_bo.offset = anv_vma_alloc(device, new_bo.size, align,
alloc_flags, explicit_address);
if (new_bo.offset == 0) {
if (new_bo.map)
anv_gem_munmap(new_bo.map, size);
anv_gem_close(device, new_bo.gem_handle);
return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
assert(!new_bo.has_client_visible_address);
}
assert(new_bo.gem_handle);
@@ -1670,11 +1688,25 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
};
assert(client_address == gen_48b_address(client_address));
if (!anv_vma_alloc(device, &new_bo, client_address)) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
if (new_bo.flags & EXEC_OBJECT_PINNED) {
/* Gen12 CCS surface addresses need to be 64K aligned. We have no way
* of telling what this allocation is for so pick the largest
* alignment.
*/
const uint32_t align = device->info.gen >= 12 ? (64 * 1024) :
(4 * 1024);
new_bo.offset = anv_vma_alloc(device, new_bo.size,
anv_device_get_bo_align(device),
alloc_flags, client_address);
if (new_bo.offset == 0) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
assert(!new_bo.has_client_visible_address);
}
*bo = new_bo;
@@ -1789,11 +1821,18 @@ anv_device_import_bo(struct anv_device *device,
};
assert(client_address == gen_48b_address(client_address));
if (!anv_vma_alloc(device, &new_bo, client_address)) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
if (new_bo.flags & EXEC_OBJECT_PINNED) {
new_bo.offset = anv_vma_alloc(device, new_bo.size,
anv_device_get_bo_align(device),
alloc_flags, client_address);
if (new_bo.offset == 0) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
assert(!new_bo.has_client_visible_address);
}
*bo = new_bo;
@@ -1875,8 +1914,8 @@ anv_device_release_bo(struct anv_device *device,
if (bo->map && !bo->from_host_ptr)
anv_gem_munmap(bo->map, bo->size);
if (!bo->has_fixed_address)
anv_vma_free(device, bo);
if ((bo->flags & EXEC_OBJECT_PINNED) && !bo->has_fixed_address)
anv_vma_free(device, bo->offset, bo->size);
uint32_t gem_handle = bo->gem_handle;

View File

@@ -3201,40 +3201,24 @@ VkResult anv_DeviceWaitIdle(
return anv_queue_submit_simple_batch(&device->queue, NULL);
}
bool
anv_vma_alloc(struct anv_device *device, struct anv_bo *bo,
uint64_t
anv_vma_alloc(struct anv_device *device,
uint64_t size, uint64_t align,
enum anv_bo_alloc_flags alloc_flags,
uint64_t client_address)
{
const struct gen_device_info *devinfo = &device->info;
/* Gen12 CCS surface addresses need to be 64K aligned. We have no way of
* telling what this allocation is for so pick the largest alignment.
*/
const uint32_t vma_alignment =
devinfo->gen >= 12 ? (64 * 1024) : (4 * 1024);
if (!(bo->flags & EXEC_OBJECT_PINNED)) {
assert(!(bo->has_client_visible_address));
return true;
}
pthread_mutex_lock(&device->vma_mutex);
bo->offset = 0;
uint64_t addr = 0;
if (bo->has_client_visible_address) {
assert(bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS);
if (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) {
if (client_address) {
if (util_vma_heap_alloc_addr(&device->vma_cva,
client_address, bo->size)) {
bo->offset = gen_canonical_address(client_address);
client_address, size)) {
addr = client_address;
}
} else {
uint64_t addr =
util_vma_heap_alloc(&device->vma_cva, bo->size, vma_alignment);
if (addr) {
bo->offset = gen_canonical_address(addr);
assert(addr == gen_48b_address(bo->offset));
}
addr = util_vma_heap_alloc(&device->vma_cva, size, align);
}
/* We don't want to fall back to other heaps */
goto done;
@@ -3242,54 +3226,39 @@ anv_vma_alloc(struct anv_device *device, struct anv_bo *bo,
assert(client_address == 0);
if (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) {
uint64_t addr =
util_vma_heap_alloc(&device->vma_hi, bo->size, vma_alignment);
if (addr) {
bo->offset = gen_canonical_address(addr);
assert(addr == gen_48b_address(bo->offset));
}
}
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
addr = util_vma_heap_alloc(&device->vma_hi, size, align);
if (bo->offset == 0) {
uint64_t addr =
util_vma_heap_alloc(&device->vma_lo, bo->size, vma_alignment);
if (addr) {
bo->offset = gen_canonical_address(addr);
assert(addr == gen_48b_address(bo->offset));
}
}
if (addr == 0)
addr = util_vma_heap_alloc(&device->vma_lo, size, align);
done:
pthread_mutex_unlock(&device->vma_mutex);
return bo->offset != 0;
assert(addr == gen_48b_address(addr));
return gen_canonical_address(addr);
}
void
anv_vma_free(struct anv_device *device, struct anv_bo *bo)
anv_vma_free(struct anv_device *device,
uint64_t address, uint64_t size)
{
if (!(bo->flags & EXEC_OBJECT_PINNED))
return;
const uint64_t addr_48b = gen_48b_address(bo->offset);
const uint64_t addr_48b = gen_48b_address(address);
pthread_mutex_lock(&device->vma_mutex);
if (addr_48b >= LOW_HEAP_MIN_ADDRESS &&
addr_48b <= LOW_HEAP_MAX_ADDRESS) {
util_vma_heap_free(&device->vma_lo, addr_48b, bo->size);
util_vma_heap_free(&device->vma_lo, addr_48b, size);
} else if (addr_48b >= CLIENT_VISIBLE_HEAP_MIN_ADDRESS &&
addr_48b <= CLIENT_VISIBLE_HEAP_MAX_ADDRESS) {
util_vma_heap_free(&device->vma_cva, addr_48b, bo->size);
util_vma_heap_free(&device->vma_cva, addr_48b, size);
} else {
assert(addr_48b >= HIGH_HEAP_MIN_ADDRESS);
util_vma_heap_free(&device->vma_hi, addr_48b, bo->size);
util_vma_heap_free(&device->vma_hi, addr_48b, size);
}
pthread_mutex_unlock(&device->vma_mutex);
bo->offset = 0;
}
VkResult anv_AllocateMemory(

View File

@@ -1467,9 +1467,12 @@ int anv_gem_syncobj_wait(struct anv_device *device,
uint32_t *handles, uint32_t num_handles,
int64_t abs_timeout_ns, bool wait_all);
bool anv_vma_alloc(struct anv_device *device, struct anv_bo *bo,
uint64_t client_address);
void anv_vma_free(struct anv_device *device, struct anv_bo *bo);
uint64_t anv_vma_alloc(struct anv_device *device,
uint64_t size, uint64_t align,
enum anv_bo_alloc_flags alloc_flags,
uint64_t client_address);
void anv_vma_free(struct anv_device *device,
uint64_t address, uint64_t size);
struct anv_reloc_list {
uint32_t num_relocs;