vulkan,anv: Auto-detect syncobj features
Instead of having a bunch of const vk_sync_type for each permutation of vk_drm_syncobj capabilities, have a vk_drm_syncobj_get_type helper which auto-detects features. If a driver can't support a feature for some reason (i915 got timeline support very late, for instance), they can always mask off feature bits they don't want. Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13427>
This commit is contained in:
@@ -172,6 +172,9 @@ static void
|
|||||||
get_device_extensions(const struct anv_physical_device *device,
|
get_device_extensions(const struct anv_physical_device *device,
|
||||||
struct vk_device_extension_table *ext)
|
struct vk_device_extension_table *ext)
|
||||||
{
|
{
|
||||||
|
const bool has_syncobj_wait =
|
||||||
|
(device->sync_syncobj_type.features & VK_SYNC_FEATURE_CPU_WAIT) != 0;
|
||||||
|
|
||||||
*ext = (struct vk_device_extension_table) {
|
*ext = (struct vk_device_extension_table) {
|
||||||
.KHR_8bit_storage = device->info.ver >= 8,
|
.KHR_8bit_storage = device->info.ver >= 8,
|
||||||
.KHR_16bit_storage = device->info.ver >= 8,
|
.KHR_16bit_storage = device->info.ver >= 8,
|
||||||
@@ -186,8 +189,8 @@ get_device_extensions(const struct anv_physical_device *device,
|
|||||||
.KHR_device_group = true,
|
.KHR_device_group = true,
|
||||||
.KHR_draw_indirect_count = true,
|
.KHR_draw_indirect_count = true,
|
||||||
.KHR_driver_properties = true,
|
.KHR_driver_properties = true,
|
||||||
.KHR_external_fence = device->has_syncobj_wait,
|
.KHR_external_fence = has_syncobj_wait,
|
||||||
.KHR_external_fence_fd = device->has_syncobj_wait,
|
.KHR_external_fence_fd = has_syncobj_wait,
|
||||||
.KHR_external_memory = true,
|
.KHR_external_memory = true,
|
||||||
.KHR_external_memory_fd = true,
|
.KHR_external_memory_fd = true,
|
||||||
.KHR_external_semaphore = true,
|
.KHR_external_semaphore = true,
|
||||||
@@ -871,9 +874,6 @@ anv_physical_device_try_create(struct anv_instance *instance,
|
|||||||
|
|
||||||
device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
|
device->has_exec_async = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_ASYNC);
|
||||||
device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
|
device->has_exec_capture = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_CAPTURE);
|
||||||
device->has_syncobj_wait = intel_gem_supports_syncobj_wait(fd);
|
|
||||||
device->has_syncobj_wait_available =
|
|
||||||
anv_gem_get_drm_cap(fd, DRM_CAP_SYNCOBJ_TIMELINE) != 0;
|
|
||||||
|
|
||||||
/* Start with medium; sorted low to high */
|
/* Start with medium; sorted low to high */
|
||||||
const int priorities[] = {
|
const int priorities[] = {
|
||||||
@@ -908,19 +908,20 @@ anv_physical_device_try_create(struct anv_instance *instance,
|
|||||||
device->has_exec_timeline = false;
|
device->has_exec_timeline = false;
|
||||||
|
|
||||||
unsigned st_idx = 0;
|
unsigned st_idx = 0;
|
||||||
if (device->has_syncobj_wait) {
|
|
||||||
device->sync_types[st_idx++] = &vk_drm_binary_syncobj_type;
|
|
||||||
} else {
|
|
||||||
device->sync_types[st_idx++] = &vk_drm_binary_syncobj_no_wait_type;
|
|
||||||
device->sync_types[st_idx++] = &anv_bo_sync_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (device->has_syncobj_wait_available && device->has_exec_timeline) {
|
device->sync_syncobj_type = vk_drm_syncobj_get_type(fd);
|
||||||
device->sync_types[st_idx++] = &vk_drm_timeline_syncobj_type;
|
if (!device->has_exec_timeline)
|
||||||
} else {
|
device->sync_syncobj_type.features &= ~VK_SYNC_FEATURE_TIMELINE;
|
||||||
|
device->sync_types[st_idx++] = &device->sync_syncobj_type;
|
||||||
|
|
||||||
|
if (!(device->sync_syncobj_type.features & VK_SYNC_FEATURE_CPU_WAIT))
|
||||||
|
device->sync_types[st_idx++] = &anv_bo_sync_type;
|
||||||
|
|
||||||
|
if (!(device->sync_syncobj_type.features & VK_SYNC_FEATURE_TIMELINE)) {
|
||||||
device->sync_timeline_type = vk_sync_timeline_get_type(&anv_bo_sync_type);
|
device->sync_timeline_type = vk_sync_timeline_get_type(&anv_bo_sync_type);
|
||||||
device->sync_types[st_idx++] = &device->sync_timeline_type.sync;
|
device->sync_types[st_idx++] = &device->sync_timeline_type.sync;
|
||||||
}
|
}
|
||||||
|
|
||||||
device->sync_types[st_idx++] = NULL;
|
device->sync_types[st_idx++] = NULL;
|
||||||
assert(st_idx <= ARRAY_SIZE(device->sync_types));
|
assert(st_idx <= ARRAY_SIZE(device->sync_types));
|
||||||
device->vk.supported_sync_types = device->sync_types;
|
device->vk.supported_sync_types = device->sync_types;
|
||||||
|
@@ -318,17 +318,6 @@ anv_gem_get_param(int fd, uint32_t param)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t
|
|
||||||
anv_gem_get_drm_cap(int fd, uint32_t capability)
|
|
||||||
{
|
|
||||||
struct drm_get_cap cap = {
|
|
||||||
.capability = capability,
|
|
||||||
};
|
|
||||||
|
|
||||||
intel_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
|
|
||||||
return cap.value;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
anv_gem_has_context_priority(int fd, int priority)
|
anv_gem_has_context_priority(int fd, int priority)
|
||||||
{
|
{
|
||||||
|
@@ -137,12 +137,6 @@ anv_gem_get_param(int fd, uint32_t param)
|
|||||||
unreachable("Unused");
|
unreachable("Unused");
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t
|
|
||||||
anv_gem_get_drm_cap(int fd, uint32_t capability)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
anv_gem_create_context(struct anv_device *device)
|
anv_gem_create_context(struct anv_device *device)
|
||||||
{
|
{
|
||||||
|
@@ -918,8 +918,6 @@ struct anv_physical_device {
|
|||||||
int cmd_parser_version;
|
int cmd_parser_version;
|
||||||
bool has_exec_async;
|
bool has_exec_async;
|
||||||
bool has_exec_capture;
|
bool has_exec_capture;
|
||||||
bool has_syncobj_wait;
|
|
||||||
bool has_syncobj_wait_available;
|
|
||||||
int max_context_priority;
|
int max_context_priority;
|
||||||
bool has_context_isolation;
|
bool has_context_isolation;
|
||||||
bool has_mmap_offset;
|
bool has_mmap_offset;
|
||||||
@@ -976,6 +974,7 @@ struct anv_physical_device {
|
|||||||
uint8_t driver_uuid[VK_UUID_SIZE];
|
uint8_t driver_uuid[VK_UUID_SIZE];
|
||||||
uint8_t device_uuid[VK_UUID_SIZE];
|
uint8_t device_uuid[VK_UUID_SIZE];
|
||||||
|
|
||||||
|
struct vk_sync_type sync_syncobj_type;
|
||||||
struct vk_sync_timeline_type sync_timeline_type;
|
struct vk_sync_timeline_type sync_timeline_type;
|
||||||
const struct vk_sync_type * sync_types[4];
|
const struct vk_sync_type * sync_types[4];
|
||||||
|
|
||||||
@@ -1377,7 +1376,6 @@ int anv_gem_set_context_param(int fd, int context, uint32_t param,
|
|||||||
int anv_gem_get_context_param(int fd, int context, uint32_t param,
|
int anv_gem_get_context_param(int fd, int context, uint32_t param,
|
||||||
uint64_t *value);
|
uint64_t *value);
|
||||||
int anv_gem_get_param(int fd, uint32_t param);
|
int anv_gem_get_param(int fd, uint32_t param);
|
||||||
uint64_t anv_gem_get_drm_cap(int fd, uint32_t capability);
|
|
||||||
int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
|
int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
|
||||||
int anv_gem_context_get_reset_stats(int fd, int context,
|
int anv_gem_context_get_reset_stats(int fd, int context,
|
||||||
uint32_t *active, uint32_t *pending);
|
uint32_t *active, uint32_t *pending);
|
||||||
|
@@ -39,14 +39,14 @@ to_drm_syncobj(struct vk_sync *sync)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static VkResult
|
static VkResult
|
||||||
vk_drm_binary_syncobj_init(struct vk_device *device,
|
vk_drm_syncobj_init(struct vk_device *device,
|
||||||
struct vk_sync *sync,
|
struct vk_sync *sync,
|
||||||
uint64_t initial_value)
|
uint64_t initial_value)
|
||||||
{
|
{
|
||||||
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
|
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
|
||||||
|
|
||||||
uint32_t flags = 0;
|
uint32_t flags = 0;
|
||||||
if (initial_value)
|
if (!(sync->flags & VK_SYNC_IS_TIMELINE) && initial_value)
|
||||||
flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
|
flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
|
||||||
|
|
||||||
assert(device->drm_fd >= 0);
|
assert(device->drm_fd >= 0);
|
||||||
@@ -56,35 +56,7 @@ vk_drm_binary_syncobj_init(struct vk_device *device,
|
|||||||
"DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
|
"DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
|
||||||
}
|
}
|
||||||
|
|
||||||
return VK_SUCCESS;
|
if ((sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) {
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
vk_drm_syncobj_finish(struct vk_device *device,
|
|
||||||
struct vk_sync *sync)
|
|
||||||
{
|
|
||||||
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
|
|
||||||
|
|
||||||
assert(device->drm_fd >= 0);
|
|
||||||
ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
|
|
||||||
assert(err == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static VkResult
|
|
||||||
vk_drm_timeline_syncobj_init(struct vk_device *device,
|
|
||||||
struct vk_sync *sync,
|
|
||||||
uint64_t initial_value)
|
|
||||||
{
|
|
||||||
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
|
|
||||||
|
|
||||||
assert(device->drm_fd >= 0);
|
|
||||||
int err = drmSyncobjCreate(device->drm_fd, 0, &sobj->syncobj);
|
|
||||||
if (err < 0) {
|
|
||||||
return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
|
|
||||||
"DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (initial_value) {
|
|
||||||
err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj,
|
err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj,
|
||||||
&initial_value, 1);
|
&initial_value, 1);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
@@ -97,6 +69,17 @@ vk_drm_timeline_syncobj_init(struct vk_device *device,
|
|||||||
return VK_SUCCESS;
|
return VK_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
vk_drm_syncobj_finish(struct vk_device *device,
|
||||||
|
struct vk_sync *sync)
|
||||||
|
{
|
||||||
|
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
|
||||||
|
|
||||||
|
assert(device->drm_fd >= 0);
|
||||||
|
ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
|
||||||
|
assert(err == 0);
|
||||||
|
}
|
||||||
|
|
||||||
static VkResult
|
static VkResult
|
||||||
vk_drm_syncobj_signal(struct vk_device *device,
|
vk_drm_syncobj_signal(struct vk_device *device,
|
||||||
struct vk_sync *sync,
|
struct vk_sync *sync,
|
||||||
@@ -337,57 +320,50 @@ vk_drm_syncobj_move(struct vk_device *device,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct vk_sync_type vk_drm_binary_syncobj_no_wait_type = {
|
struct vk_sync_type
|
||||||
.size = sizeof(struct vk_drm_syncobj),
|
vk_drm_syncobj_get_type(int drm_fd)
|
||||||
.features = VK_SYNC_FEATURE_BINARY |
|
{
|
||||||
VK_SYNC_FEATURE_GPU_WAIT |
|
uint32_t syncobj = 0;
|
||||||
VK_SYNC_FEATURE_CPU_RESET |
|
int err = drmSyncobjCreate(drm_fd, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj);
|
||||||
VK_SYNC_FEATURE_CPU_SIGNAL,
|
if (err < 0)
|
||||||
.init = vk_drm_binary_syncobj_init,
|
return (struct vk_sync_type) { .features = 0 };
|
||||||
.finish = vk_drm_syncobj_finish,
|
|
||||||
.signal = vk_drm_syncobj_signal,
|
|
||||||
.reset = vk_drm_syncobj_reset,
|
|
||||||
.move = vk_drm_syncobj_move,
|
|
||||||
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
|
|
||||||
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
|
|
||||||
.import_sync_file = vk_drm_syncobj_import_sync_file,
|
|
||||||
.export_sync_file = vk_drm_syncobj_export_sync_file,
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct vk_sync_type vk_drm_binary_syncobj_type = {
|
struct vk_sync_type type = {
|
||||||
.size = sizeof(struct vk_drm_syncobj),
|
.size = sizeof(struct vk_drm_syncobj),
|
||||||
.features = VK_SYNC_FEATURE_BINARY |
|
.features = VK_SYNC_FEATURE_BINARY |
|
||||||
VK_SYNC_FEATURE_GPU_WAIT |
|
VK_SYNC_FEATURE_GPU_WAIT |
|
||||||
VK_SYNC_FEATURE_CPU_WAIT |
|
VK_SYNC_FEATURE_CPU_RESET |
|
||||||
VK_SYNC_FEATURE_CPU_RESET |
|
VK_SYNC_FEATURE_CPU_SIGNAL,
|
||||||
VK_SYNC_FEATURE_CPU_SIGNAL |
|
.init = vk_drm_syncobj_init,
|
||||||
VK_SYNC_FEATURE_WAIT_ANY |
|
.finish = vk_drm_syncobj_finish,
|
||||||
VK_SYNC_FEATURE_WAIT_PENDING,
|
.signal = vk_drm_syncobj_signal,
|
||||||
.init = vk_drm_binary_syncobj_init,
|
.reset = vk_drm_syncobj_reset,
|
||||||
.finish = vk_drm_syncobj_finish,
|
.move = vk_drm_syncobj_move,
|
||||||
.signal = vk_drm_syncobj_signal,
|
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
|
||||||
.reset = vk_drm_syncobj_reset,
|
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
|
||||||
.move = vk_drm_syncobj_move,
|
.import_sync_file = vk_drm_syncobj_import_sync_file,
|
||||||
.wait_many = vk_drm_syncobj_wait_many,
|
.export_sync_file = vk_drm_syncobj_export_sync_file,
|
||||||
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
|
};
|
||||||
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
|
|
||||||
.import_sync_file = vk_drm_syncobj_import_sync_file,
|
|
||||||
.export_sync_file = vk_drm_syncobj_export_sync_file,
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct vk_sync_type vk_drm_timeline_syncobj_type = {
|
err = drmSyncobjWait(drm_fd, &syncobj, 1, 0,
|
||||||
.size = sizeof(struct vk_drm_syncobj),
|
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
|
||||||
.features = VK_SYNC_FEATURE_TIMELINE |
|
NULL /* first_signaled */);
|
||||||
VK_SYNC_FEATURE_GPU_WAIT |
|
if (err == 0) {
|
||||||
VK_SYNC_FEATURE_CPU_WAIT |
|
type.wait_many = vk_drm_syncobj_wait_many;
|
||||||
VK_SYNC_FEATURE_CPU_SIGNAL |
|
type.features |= VK_SYNC_FEATURE_CPU_WAIT |
|
||||||
VK_SYNC_FEATURE_WAIT_ANY |
|
VK_SYNC_FEATURE_WAIT_ANY;
|
||||||
VK_SYNC_FEATURE_WAIT_PENDING,
|
}
|
||||||
.init = vk_drm_timeline_syncobj_init,
|
|
||||||
.finish = vk_drm_syncobj_finish,
|
uint64_t cap;
|
||||||
.signal = vk_drm_syncobj_signal,
|
err = drmGetCap(drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap);
|
||||||
.get_value = vk_drm_syncobj_get_value,
|
if (err == 0 && cap != 0) {
|
||||||
.wait_many = vk_drm_syncobj_wait_many,
|
type.get_value = vk_drm_syncobj_get_value;
|
||||||
.import_opaque_fd = vk_drm_syncobj_import_opaque_fd,
|
type.features |= VK_SYNC_FEATURE_TIMELINE |
|
||||||
.export_opaque_fd = vk_drm_syncobj_export_opaque_fd,
|
VK_SYNC_FEATURE_WAIT_PENDING;
|
||||||
};
|
}
|
||||||
|
|
||||||
|
err = drmSyncobjDestroy(drm_fd, syncobj);
|
||||||
|
assert(err == 0);
|
||||||
|
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
@@ -31,21 +31,18 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern const struct vk_sync_type vk_drm_binary_syncobj_no_wait_type;
|
|
||||||
extern const struct vk_sync_type vk_drm_binary_syncobj_type;
|
|
||||||
extern const struct vk_sync_type vk_drm_timeline_syncobj_type;
|
|
||||||
|
|
||||||
struct vk_drm_syncobj {
|
struct vk_drm_syncobj {
|
||||||
struct vk_sync base;
|
struct vk_sync base;
|
||||||
uint32_t syncobj;
|
uint32_t syncobj;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void vk_drm_syncobj_finish(struct vk_device *device,
|
||||||
|
struct vk_sync *sync);
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
vk_sync_type_is_drm_syncobj(const struct vk_sync_type *type)
|
vk_sync_type_is_drm_syncobj(const struct vk_sync_type *type)
|
||||||
{
|
{
|
||||||
return type == &vk_drm_binary_syncobj_no_wait_type ||
|
return type->finish == vk_drm_syncobj_finish;
|
||||||
type == &vk_drm_binary_syncobj_type ||
|
|
||||||
type == &vk_drm_timeline_syncobj_type;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct vk_drm_syncobj *
|
static inline struct vk_drm_syncobj *
|
||||||
@@ -57,6 +54,8 @@ vk_sync_as_drm_syncobj(struct vk_sync *sync)
|
|||||||
return container_of(sync, struct vk_drm_syncobj, base);
|
return container_of(sync, struct vk_drm_syncobj, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct vk_sync_type vk_drm_syncobj_get_type(int drm_fd);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user