venus: make sure gem_handle and vn_renderer_bo are 1:1
When two vn_renderer_bo's share the same gem_handle, destroying one of them would invalidate the other. Signed-off-by: Chia-I Wu <olvaffe@gmail.com> Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10437>
This commit is contained in:
@@ -188,7 +188,7 @@ struct vn_renderer_bo_ops {
|
||||
VkExternalMemoryHandleTypeFlags external_handles,
|
||||
struct vn_renderer_bo **out_bo);
|
||||
|
||||
void (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
|
||||
bool (*destroy)(struct vn_renderer *renderer, struct vn_renderer_bo *bo);
|
||||
|
||||
int (*export_dmabuf)(struct vn_renderer *renderer,
|
||||
struct vn_renderer_bo *bo);
|
||||
@@ -358,7 +358,7 @@ vn_renderer_bo_create_from_dmabuf(
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
||||
assert(atomic_load(&bo->refcount) == 1);
|
||||
assert(atomic_load(&bo->refcount) >= 1);
|
||||
assert(bo->res_id);
|
||||
assert(!bo->mmap_size || bo->mmap_size >= size);
|
||||
|
||||
@@ -385,8 +385,7 @@ vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
|
||||
|
||||
if (old == 1) {
|
||||
atomic_thread_fence(memory_order_acquire);
|
||||
renderer->bo_ops.destroy(renderer, bo);
|
||||
return true;
|
||||
return renderer->bo_ops.destroy(renderer, bo);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@@ -108,6 +108,8 @@ struct virtgpu {
|
||||
*/
|
||||
struct util_sparse_array shmem_array;
|
||||
struct util_sparse_array bo_array;
|
||||
|
||||
mtx_t dmabuf_import_mutex;
|
||||
};
|
||||
|
||||
#ifdef SIMULATE_SYNCOBJ
|
||||
@@ -1097,15 +1099,32 @@ virtgpu_bo_export_dmabuf(struct vn_renderer *renderer,
|
||||
: -1;
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
virtgpu_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
|
||||
{
|
||||
struct virtgpu *gpu = (struct virtgpu *)renderer;
|
||||
struct virtgpu_bo *bo = (struct virtgpu_bo *)_bo;
|
||||
|
||||
mtx_lock(&gpu->dmabuf_import_mutex);
|
||||
|
||||
/* Check the refcount again after the import lock is grabbed. Yes, we use
|
||||
* the double-checked locking anti-pattern.
|
||||
*/
|
||||
if (atomic_load_explicit(&bo->base.refcount, memory_order_relaxed) > 0) {
|
||||
mtx_unlock(&gpu->dmabuf_import_mutex);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (bo->base.mmap_ptr)
|
||||
munmap(bo->base.mmap_ptr, bo->base.mmap_size);
|
||||
virtgpu_ioctl_gem_close(gpu, bo->gem_handle);
|
||||
|
||||
/* set gem_handle to 0 to indicate that the bo is invalid */
|
||||
bo->gem_handle = 0;
|
||||
|
||||
mtx_unlock(&gpu->dmabuf_import_mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
@@ -1135,6 +1154,8 @@ virtgpu_bo_create_from_dmabuf(struct vn_renderer *renderer,
|
||||
struct drm_virtgpu_resource_info info;
|
||||
uint32_t gem_handle = 0;
|
||||
|
||||
mtx_lock(&gpu->dmabuf_import_mutex);
|
||||
|
||||
gem_handle = virtgpu_ioctl_prime_fd_to_handle(gpu, fd);
|
||||
if (!gem_handle)
|
||||
goto fail;
|
||||
@@ -1164,15 +1185,29 @@ virtgpu_bo_create_from_dmabuf(struct vn_renderer *renderer,
|
||||
}
|
||||
|
||||
struct virtgpu_bo *bo = util_sparse_array_get(&gpu->bo_array, gem_handle);
|
||||
*bo = (struct virtgpu_bo){
|
||||
.base = {
|
||||
.refcount = 1,
|
||||
.res_id = info.res_handle,
|
||||
.mmap_size = size,
|
||||
},
|
||||
.gem_handle = gem_handle,
|
||||
.blob_flags = blob_flags,
|
||||
};
|
||||
if (bo->gem_handle == gem_handle) {
|
||||
if (bo->base.mmap_size < size)
|
||||
goto fail;
|
||||
if (blob_flags & ~bo->blob_flags)
|
||||
goto fail;
|
||||
|
||||
/* we can't use vn_renderer_bo_ref as the refcount may drop to 0
|
||||
* temporarily before virtgpu_bo_destroy grabs the lock
|
||||
*/
|
||||
atomic_fetch_add_explicit(&bo->base.refcount, 1, memory_order_relaxed);
|
||||
} else {
|
||||
*bo = (struct virtgpu_bo){
|
||||
.base = {
|
||||
.refcount = 1,
|
||||
.res_id = info.res_handle,
|
||||
.mmap_size = size,
|
||||
},
|
||||
.gem_handle = gem_handle,
|
||||
.blob_flags = blob_flags,
|
||||
};
|
||||
}
|
||||
|
||||
mtx_unlock(&gpu->dmabuf_import_mutex);
|
||||
|
||||
*out_bo = &bo->base;
|
||||
|
||||
@@ -1181,6 +1216,7 @@ virtgpu_bo_create_from_dmabuf(struct vn_renderer *renderer,
|
||||
fail:
|
||||
if (gem_handle)
|
||||
virtgpu_ioctl_gem_close(gpu, gem_handle);
|
||||
mtx_unlock(&gpu->dmabuf_import_mutex);
|
||||
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
||||
}
|
||||
|
||||
@@ -1331,6 +1367,8 @@ virtgpu_destroy(struct vn_renderer *renderer,
|
||||
if (gpu->fd >= 0)
|
||||
close(gpu->fd);
|
||||
|
||||
mtx_destroy(&gpu->dmabuf_import_mutex);
|
||||
|
||||
util_sparse_array_finish(&gpu->shmem_array);
|
||||
util_sparse_array_finish(&gpu->bo_array);
|
||||
|
||||
@@ -1493,6 +1531,8 @@ virtgpu_init(struct virtgpu *gpu)
|
||||
1024);
|
||||
util_sparse_array_init(&gpu->bo_array, sizeof(struct virtgpu_bo), 1024);
|
||||
|
||||
mtx_init(&gpu->dmabuf_import_mutex, mtx_plain);
|
||||
|
||||
VkResult result = virtgpu_open(gpu);
|
||||
if (result == VK_SUCCESS)
|
||||
result = virtgpu_init_params(gpu);
|
||||
|
@@ -729,7 +729,7 @@ vtest_bo_export_dmabuf(struct vn_renderer *renderer,
|
||||
return shareable ? os_dupfd_cloexec(bo->res_fd) : -1;
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
vtest_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
|
||||
{
|
||||
struct vtest *vtest = (struct vtest *)renderer;
|
||||
@@ -743,6 +743,8 @@ vtest_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
|
||||
mtx_lock(&vtest->sock_mutex);
|
||||
vtest_vcmd_resource_unref(vtest, bo->base.res_id);
|
||||
mtx_unlock(&vtest->sock_mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
|
Reference in New Issue
Block a user