anv: Add support for userptr in Xe KMD
Xe KMD only requires userptr to be bound to VM, so here reusing workaround_bo->gem_handle id to all userptr bos in Xe version of gem_create_userptr(). The Xe version of gem_close() will make sure that workaround_bo->gem_handle is not closed when userptr bos are closed. With the same gem_handle for all userptr bos, it was also necessary skip the anv_device_lookup_bo() and manually allocate memory to store anv_bo in host heap memory, what lead to some small changes in anv_device_release_bo() as well. The remaining changes are the support to VM bind userptr bos and the gem_vm_bind() call in anv_device_import_bo_from_host_ptr(). Fixes: dEQP-VK.memory.external_memory_host* Signed-off-by: José Roberto de Souza <jose.souza@intel.com> Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23787>
This commit is contained in:

committed by
Marge Bot

parent
5c729cb1b8
commit
6f88e3befb
@@ -1640,7 +1640,18 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
|
||||
|
||||
pthread_mutex_lock(&cache->mutex);
|
||||
|
||||
struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
|
||||
struct anv_bo *bo = NULL;
|
||||
if (device->info->kmd_type == INTEL_KMD_TYPE_XE) {
|
||||
bo = vk_zalloc(&device->vk.alloc, sizeof(*bo), 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
||||
if (!bo) {
|
||||
pthread_mutex_unlock(&cache->mutex);
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
}
|
||||
} else {
|
||||
bo = anv_device_lookup_bo(device, gem_handle);
|
||||
}
|
||||
|
||||
if (bo->refcount > 0) {
|
||||
/* VK_EXT_external_memory_host doesn't require handling importing the
|
||||
* same pointer twice at the same time, but we don't get in the way. If
|
||||
@@ -1693,6 +1704,13 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
|
||||
return result;
|
||||
}
|
||||
|
||||
if (device->kmd_backend->gem_vm_bind(device, &new_bo)) {
|
||||
VkResult res = vk_errorf(device, VK_ERROR_UNKNOWN, "vm bind failed: %m");
|
||||
anv_bo_vma_free(device, &new_bo);
|
||||
pthread_mutex_unlock(&cache->mutex);
|
||||
return res;
|
||||
}
|
||||
|
||||
*bo = new_bo;
|
||||
}
|
||||
|
||||
@@ -1909,7 +1927,10 @@ anv_device_release_bo(struct anv_device *device,
|
||||
struct anv_bo *bo)
|
||||
{
|
||||
struct anv_bo_cache *cache = &device->bo_cache;
|
||||
assert(anv_device_lookup_bo(device, bo->gem_handle) == bo);
|
||||
const bool bo_is_xe_userptr = device->info->kmd_type == INTEL_KMD_TYPE_XE &&
|
||||
bo->from_host_ptr;
|
||||
assert(bo_is_xe_userptr ||
|
||||
anv_device_lookup_bo(device, bo->gem_handle) == bo);
|
||||
|
||||
/* Try to decrement the counter but don't go below one. If this succeeds
|
||||
* then the refcount has been decremented and we are not the last
|
||||
@@ -1948,6 +1969,9 @@ anv_device_release_bo(struct anv_device *device,
|
||||
*/
|
||||
struct anv_bo old_bo = *bo;
|
||||
|
||||
if (bo_is_xe_userptr)
|
||||
vk_free(&device->vk.alloc, bo);
|
||||
else
|
||||
memset(bo, 0, sizeof(*bo));
|
||||
|
||||
anv_bo_finish(device, &old_bo);
|
||||
|
@@ -404,6 +404,7 @@ struct anv_bo {
|
||||
*/
|
||||
struct util_vma_heap *vma_heap;
|
||||
|
||||
/* All userptr bos in Xe KMD has gem_handle set to workaround_bo->gem_handle */
|
||||
uint32_t gem_handle;
|
||||
|
||||
uint32_t refcount;
|
||||
|
@@ -94,6 +94,15 @@ xe_gem_vm_bind_op(struct anv_device *device, struct anv_bo *bo, uint32_t op)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
uint32_t obj = op == XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle;
|
||||
uint64_t obj_offset = 0;
|
||||
if (bo->from_host_ptr) {
|
||||
obj = 0;
|
||||
obj_offset = (uintptr_t)bo->map;
|
||||
if (op == XE_VM_BIND_OP_MAP)
|
||||
op = XE_VM_BIND_OP_MAP_USERPTR;
|
||||
}
|
||||
|
||||
struct drm_xe_sync sync = {
|
||||
.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
|
||||
.handle = syncobj_handle,
|
||||
@@ -101,8 +110,8 @@ xe_gem_vm_bind_op(struct anv_device *device, struct anv_bo *bo, uint32_t op)
|
||||
struct drm_xe_vm_bind args = {
|
||||
.vm_id = device->vm_id,
|
||||
.num_binds = 1,
|
||||
.bind.obj = op == XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle,
|
||||
.bind.obj_offset = 0,
|
||||
.bind.obj = obj,
|
||||
.bind.obj_offset = obj_offset,
|
||||
.bind.range = bo->actual_size,
|
||||
.bind.addr = intel_48b_address(bo->offset),
|
||||
.bind.op = op,
|
||||
@@ -141,7 +150,11 @@ static int xe_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo)
|
||||
static uint32_t
|
||||
xe_gem_create_userptr(struct anv_device *device, void *mem, uint64_t size)
|
||||
{
|
||||
return 0;
|
||||
/* We return the workaround BO gem_handle here, because Xe doesn't
|
||||
* create handles for userptrs. But we still need to make it look
|
||||
* to the rest of Anv that the operation succeeded.
|
||||
*/
|
||||
return device->workaround_bo->gem_handle;
|
||||
}
|
||||
|
||||
const struct anv_kmd_backend *
|
||||
|
Reference in New Issue
Block a user