anv: Add helpers in anv_allocator for mapping BOs

Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13610>
This commit is contained in:
Jason Ekstrand
2021-10-30 17:02:41 -05:00
committed by Marge Bot
parent 90ac06e502
commit 13fe43714c
3 changed files with 72 additions and 30 deletions

View File

@@ -1608,7 +1608,7 @@ anv_bo_finish(struct anv_device *device, struct anv_bo *bo)
anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size); anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
if (bo->map && !bo->from_host_ptr) if (bo->map && !bo->from_host_ptr)
anv_gem_munmap(device, bo->map, bo->size); anv_device_unmap_bo(device, bo);
assert(bo->gem_handle != 0); assert(bo->gem_handle != 0);
anv_gem_close(device, bo->gem_handle); anv_gem_close(device, bo->gem_handle);
@@ -1719,11 +1719,10 @@ anv_device_alloc_bo(struct anv_device *device,
}; };
if (alloc_flags & ANV_BO_ALLOC_MAPPED) { if (alloc_flags & ANV_BO_ALLOC_MAPPED) {
new_bo.map = anv_gem_mmap(device, new_bo.gem_handle, 0, size, 0); VkResult result = anv_device_map_bo(device, &new_bo, 0, size, 0, NULL);
if (new_bo.map == MAP_FAILED) { if (unlikely(result != VK_SUCCESS)) {
anv_gem_close(device, new_bo.gem_handle); anv_gem_close(device, new_bo.gem_handle);
return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY, return result;
"mmap failed: %m");
} }
} }
@@ -1778,6 +1777,46 @@ anv_device_alloc_bo(struct anv_device *device,
return VK_SUCCESS; return VK_SUCCESS;
} }
VkResult
anv_device_map_bo(struct anv_device *device,
struct anv_bo *bo,
uint64_t offset,
size_t size,
uint32_t gem_flags,
void **map_out)
{
assert(!bo->is_wrapper && !bo->from_host_ptr);
assert(size > 0);
assert(bo->map == NULL && bo->map_size == 0);
void *map = anv_gem_mmap(device, bo->gem_handle, offset, size, gem_flags);
if (unlikely(map == MAP_FAILED))
return vk_errorf(device, VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
assert(map != NULL);
bo->map = map;
bo->map_size = size;
if (map_out)
*map_out = map;
return VK_SUCCESS;
}
void
anv_device_unmap_bo(struct anv_device *device,
struct anv_bo *bo)
{
assert(!bo->is_wrapper && !bo->from_host_ptr);
assert(bo->map != NULL && bo->map_size > 0);
anv_gem_munmap(device, bo->map, bo->map_size);
bo->map = NULL;
bo->map_size = 0;
}
VkResult VkResult
anv_device_import_bo_from_host_ptr(struct anv_device *device, anv_device_import_bo_from_host_ptr(struct anv_device *device,
void *host_ptr, uint32_t size, void *host_ptr, uint32_t size,

View File

@@ -22,6 +22,7 @@
*/ */
#include <assert.h> #include <assert.h>
#include <inttypes.h>
#include <stdbool.h> #include <stdbool.h>
#include <string.h> #include <string.h>
#ifdef MAJOR_IN_MKDEV #ifdef MAJOR_IN_MKDEV
@@ -3678,8 +3679,6 @@ VkResult anv_AllocateMemory(
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
mem->type = mem_type; mem->type = mem_type;
mem->map = NULL;
mem->map_size = 0;
mem->ahw = NULL; mem->ahw = NULL;
mem->host_ptr = NULL; mem->host_ptr = NULL;
@@ -3983,9 +3982,6 @@ void anv_FreeMemory(
list_del(&mem->link); list_del(&mem->link);
pthread_mutex_unlock(&device->mutex); pthread_mutex_unlock(&device->mutex);
if (mem->map)
anv_UnmapMemory(_device, _mem);
p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used, p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used,
-mem->bo->size); -mem->bo->size);
@@ -4066,16 +4062,14 @@ VkResult anv_MapMemory(
/* Let's map whole pages */ /* Let's map whole pages */
map_size = align_u64(map_size, 4096); map_size = align_u64(map_size, 4096);
void *map = anv_gem_mmap(device, mem->bo->gem_handle, void *map;
map_offset, map_size, gem_flags); VkResult result = anv_device_map_bo(device, mem->bo, map_offset,
if (map == MAP_FAILED) map_size, gem_flags, &map);
return vk_error(device, VK_ERROR_MEMORY_MAP_FAILED); if (result != VK_SUCCESS)
return result;
mem->map = map;
mem->map_size = map_size;
mem->map_delta = (offset - map_offset); mem->map_delta = (offset - map_offset);
*ppData = map + mem->map_delta;
*ppData = mem->map + mem->map_delta;
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -4090,10 +4084,8 @@ void anv_UnmapMemory(
if (mem == NULL || mem->host_ptr) if (mem == NULL || mem->host_ptr)
return; return;
anv_gem_munmap(device, mem->map, mem->map_size); anv_device_unmap_bo(device, mem->bo);
mem->map = NULL;
mem->map_size = 0;
mem->map_delta = 0; mem->map_delta = 0;
} }
@@ -4105,14 +4097,14 @@ clflush_mapped_ranges(struct anv_device *device,
for (uint32_t i = 0; i < count; i++) { for (uint32_t i = 0; i < count; i++) {
ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory); ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
uint64_t map_offset = ranges[i].offset + mem->map_delta; uint64_t map_offset = ranges[i].offset + mem->map_delta;
if (map_offset >= mem->map_size) if (map_offset >= mem->bo->map_size)
continue; continue;
if (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) if (mem->type->propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
continue; continue;
intel_clflush_range(mem->map + map_offset, intel_clflush_range(mem->bo->map + map_offset,
MIN2(ranges[i].size, mem->map_size - map_offset)); MIN2(ranges[i].size, mem->bo->map_size - map_offset));
} }
} }

View File

@@ -452,12 +452,17 @@ struct anv_bo {
/** Size of the buffer not including implicit aux */ /** Size of the buffer not including implicit aux */
uint64_t size; uint64_t size;
/* Map for internally mapped BOs. /* Map for mapped BOs.
* *
* If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO. * If ANV_BO_ALLOC_MAPPED is set in flags, this is the map for the whole
* BO. If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO.
* Otherwise, this is the map for the currently mapped range mapped via
* vkMapMemory().
*/ */
void *map; void *map;
size_t map_size;
/** Size of the implicit CCS range at the end of the buffer /** Size of the implicit CCS range at the end of the buffer
* *
* On Gfx12, CCS data is always a direct 1/256 scale-down. A single 64K * On Gfx12, CCS data is always a direct 1/256 scale-down. A single 64K
@@ -1379,6 +1384,14 @@ VkResult anv_device_alloc_bo(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags, enum anv_bo_alloc_flags alloc_flags,
uint64_t explicit_address, uint64_t explicit_address,
struct anv_bo **bo); struct anv_bo **bo);
VkResult anv_device_map_bo(struct anv_device *device,
struct anv_bo *bo,
uint64_t offset,
size_t size,
uint32_t gem_flags,
void **map_out);
void anv_device_unmap_bo(struct anv_device *device,
struct anv_bo *bo);
VkResult anv_device_import_bo_from_host_ptr(struct anv_device *device, VkResult anv_device_import_bo_from_host_ptr(struct anv_device *device,
void *host_ptr, uint32_t size, void *host_ptr, uint32_t size,
enum anv_bo_alloc_flags alloc_flags, enum anv_bo_alloc_flags alloc_flags,
@@ -1774,11 +1787,9 @@ struct anv_device_memory {
struct anv_bo * bo; struct anv_bo * bo;
const struct anv_memory_type * type; const struct anv_memory_type * type;
VkDeviceSize map_size;
void * map;
/* The map, from the user PoV is map + map_delta */ /* The map, from the user PoV is bo->map + map_delta */
uint32_t map_delta; uint64_t map_delta;
/* If set, we are holding reference to AHardwareBuffer /* If set, we are holding reference to AHardwareBuffer
* which we must release when memory is freed. * which we must release when memory is freed.