zink: extract function allocate_bo from resource_create_object
v2: move reworking the loop to a new commit (Mike) Signed-off-by: Gert Wollny <gert.wollny@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27155>
This commit is contained in:
@@ -975,6 +975,152 @@ get_export_flags(struct zink_screen *screen, const struct pipe_resource *templ,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct mem_alloc_info {
|
||||||
|
struct winsys_handle *whandle;
|
||||||
|
VkMemoryPropertyFlags flags;
|
||||||
|
enum zink_alloc_flag aflags;
|
||||||
|
bool need_dedicated;
|
||||||
|
bool shared;
|
||||||
|
const void *user_mem;
|
||||||
|
VkExternalMemoryHandleTypeFlags external;
|
||||||
|
VkExternalMemoryHandleTypeFlags export_types;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
allocate_bo(struct zink_screen *screen, const struct pipe_resource *templ,
|
||||||
|
VkMemoryRequirements *reqs, struct zink_resource_object *obj,
|
||||||
|
struct mem_alloc_info *alloc_info)
|
||||||
|
{
|
||||||
|
VkMemoryAllocateInfo mai;
|
||||||
|
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
||||||
|
mai.pNext = NULL;
|
||||||
|
mai.allocationSize = reqs->size;
|
||||||
|
enum zink_heap heap = zink_heap_from_domain_flags(alloc_info->flags, alloc_info->aflags);
|
||||||
|
|
||||||
|
VkMemoryDedicatedAllocateInfo ded_alloc_info = {
|
||||||
|
.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
|
||||||
|
.pNext = mai.pNext,
|
||||||
|
.image = obj->image,
|
||||||
|
.buffer = VK_NULL_HANDLE,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (screen->info.have_KHR_dedicated_allocation && alloc_info->need_dedicated) {
|
||||||
|
ded_alloc_info.pNext = mai.pNext;
|
||||||
|
mai.pNext = &ded_alloc_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
VkExportMemoryAllocateInfo emai;
|
||||||
|
if ((templ->bind & ZINK_BIND_VIDEO) || ((templ->bind & PIPE_BIND_SHARED) && alloc_info->shared) || (templ->bind & ZINK_BIND_DMABUF)) {
|
||||||
|
emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
|
||||||
|
emai.handleTypes = alloc_info->export_types;
|
||||||
|
|
||||||
|
emai.pNext = mai.pNext;
|
||||||
|
mai.pNext = &emai;
|
||||||
|
obj->exportable = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ZINK_USE_DMABUF
|
||||||
|
|
||||||
|
#if !defined(_WIN32)
|
||||||
|
VkImportMemoryFdInfoKHR imfi = {
|
||||||
|
VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (alloc_info->whandle) {
|
||||||
|
imfi.pNext = NULL;
|
||||||
|
imfi.handleType = alloc_info->external;
|
||||||
|
imfi.fd = os_dupfd_cloexec(alloc_info->whandle->handle);
|
||||||
|
if (imfi.fd < 0) {
|
||||||
|
mesa_loge("ZINK: failed to dup dmabuf fd: %s\n", strerror(errno));
|
||||||
|
return -2;
|
||||||
|
}
|
||||||
|
|
||||||
|
imfi.pNext = mai.pNext;
|
||||||
|
mai.pNext = &imfi;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
VkImportMemoryWin32HandleInfoKHR imfi = {
|
||||||
|
VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (alloc_info->whandle) {
|
||||||
|
HANDLE source_target = GetCurrentProcess();
|
||||||
|
HANDLE out_handle;
|
||||||
|
|
||||||
|
bool result = DuplicateHandle(source_target, alloc_info->whandle->handle, source_target, &out_handle, 0, false, DUPLICATE_SAME_ACCESS);
|
||||||
|
|
||||||
|
if (!result || !out_handle) {
|
||||||
|
mesa_loge("ZINK: failed to DuplicateHandle with winerr: %08x\n", (int)GetLastError());
|
||||||
|
return -2;
|
||||||
|
}
|
||||||
|
|
||||||
|
imfi.pNext = NULL;
|
||||||
|
imfi.handleType = alloc_info->external;
|
||||||
|
imfi.handle = out_handle;
|
||||||
|
|
||||||
|
imfi.pNext = mai.pNext;
|
||||||
|
mai.pNext = &imfi;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
VkImportMemoryHostPointerInfoEXT imhpi = {
|
||||||
|
VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
if (alloc_info->user_mem) {
|
||||||
|
imhpi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
|
||||||
|
imhpi.pHostPointer = (void*)alloc_info->user_mem;
|
||||||
|
imhpi.pNext = mai.pNext;
|
||||||
|
mai.pNext = &imhpi;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned alignment = MAX2(reqs->alignment, 256);
|
||||||
|
if (templ->usage == PIPE_USAGE_STAGING && obj->is_buffer)
|
||||||
|
alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
|
||||||
|
obj->alignment = alignment;
|
||||||
|
|
||||||
|
if (zink_mem_type_idx_from_bits(screen, heap, reqs->memoryTypeBits) == UINT32_MAX) {
|
||||||
|
/* not valid based on reqs; demote to more compatible type */
|
||||||
|
switch (heap) {
|
||||||
|
case ZINK_HEAP_DEVICE_LOCAL_VISIBLE:
|
||||||
|
heap = ZINK_HEAP_DEVICE_LOCAL;
|
||||||
|
break;
|
||||||
|
case ZINK_HEAP_HOST_VISIBLE_COHERENT_CACHED:
|
||||||
|
heap = ZINK_HEAP_HOST_VISIBLE_COHERENT;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
assert(zink_mem_type_idx_from_bits(screen, heap, reqs->memoryTypeBits) != UINT32_MAX);
|
||||||
|
}
|
||||||
|
|
||||||
|
retry:
|
||||||
|
/* iterate over all available memory types to reduce chance of oom */
|
||||||
|
for (unsigned i = 0; !obj->bo && i < screen->heap_count[heap]; i++) {
|
||||||
|
if (!(reqs->memoryTypeBits & BITFIELD_BIT(screen->heap_map[heap][i])))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
mai.memoryTypeIndex = screen->heap_map[heap][i];
|
||||||
|
obj->bo = zink_bo(zink_bo_create(screen, reqs->size, alignment, heap, mai.pNext ? ZINK_ALLOC_NO_SUBALLOC : 0, mai.memoryTypeIndex, mai.pNext));
|
||||||
|
if (!obj->bo) {
|
||||||
|
if (heap == ZINK_HEAP_DEVICE_LOCAL_VISIBLE) {
|
||||||
|
/* demote BAR allocations to a different heap on failure to avoid oom */
|
||||||
|
if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC)
|
||||||
|
heap = ZINK_HEAP_HOST_VISIBLE_COHERENT;
|
||||||
|
else
|
||||||
|
heap = ZINK_HEAP_DEVICE_LOCAL;
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj->bo ? 0: -2;
|
||||||
|
}
|
||||||
|
|
||||||
static struct zink_resource_object *
|
static struct zink_resource_object *
|
||||||
resource_object_create(struct zink_screen *screen, const struct pipe_resource *templ, struct winsys_handle *whandle, bool *linear,
|
resource_object_create(struct zink_screen *screen, const struct pipe_resource *templ, struct winsys_handle *whandle, bool *linear,
|
||||||
uint64_t *modifiers, int modifiers_count, const void *loader_private, const void *user_mem)
|
uint64_t *modifiers, int modifiers_count, const void *loader_private, const void *user_mem)
|
||||||
@@ -992,7 +1138,15 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
obj->last_dt_idx = obj->dt_idx = UINT32_MAX; //TODO: unionize
|
obj->last_dt_idx = obj->dt_idx = UINT32_MAX; //TODO: unionize
|
||||||
|
|
||||||
VkMemoryRequirements reqs = {0};
|
VkMemoryRequirements reqs = {0};
|
||||||
VkMemoryPropertyFlags flags;
|
|
||||||
|
struct mem_alloc_info alloc_info = {
|
||||||
|
.whandle = whandle,
|
||||||
|
.need_dedicated = false,
|
||||||
|
.external = 0,
|
||||||
|
.export_types = ZINK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_BIT,
|
||||||
|
.shared = templ->bind & PIPE_BIND_SHARED,
|
||||||
|
.user_mem = user_mem
|
||||||
|
};
|
||||||
|
|
||||||
/* figure out aux plane count */
|
/* figure out aux plane count */
|
||||||
if (whandle && whandle->plane >= util_format_get_num_planes(whandle->format))
|
if (whandle && whandle->plane >= util_format_get_num_planes(whandle->format))
|
||||||
@@ -1004,14 +1158,9 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool need_dedicated = false;
|
|
||||||
bool shared = templ->bind & PIPE_BIND_SHARED;
|
|
||||||
|
|
||||||
VkExternalMemoryHandleTypeFlags external = 0;
|
|
||||||
VkExternalMemoryHandleTypeFlags export_types = ZINK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_BIT;
|
|
||||||
unsigned num_planes = util_format_get_num_planes(templ->format);
|
unsigned num_planes = util_format_get_num_planes(templ->format);
|
||||||
|
|
||||||
if (!get_export_flags(screen, templ, whandle, &external, &export_types)) {
|
if (!get_export_flags(screen, templ, whandle, &alloc_info.external, &alloc_info.export_types)) {
|
||||||
/* can't export anything, fail early */
|
/* can't export anything, fail early */
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@@ -1028,12 +1177,12 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
return obj;
|
return obj;
|
||||||
} else if (templ->target == PIPE_BUFFER) {
|
} else if (templ->target == PIPE_BUFFER) {
|
||||||
if (!create_buffer(screen, obj, templ, modifiers, modifiers_count, user_mem,
|
if (!create_buffer(screen, obj, templ, modifiers, modifiers_count, user_mem,
|
||||||
&flags, &reqs))
|
&alloc_info.flags, &reqs))
|
||||||
goto fail1;
|
goto fail1;
|
||||||
max_level = 1;
|
max_level = 1;
|
||||||
} else {
|
} else {
|
||||||
max_level = templ->last_level + 1;
|
max_level = templ->last_level + 1;
|
||||||
bool winsys_modifier = (export_types & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) && whandle && whandle->modifier != DRM_FORMAT_MOD_INVALID;
|
bool winsys_modifier = (alloc_info.export_types & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) && whandle && whandle->modifier != DRM_FORMAT_MOD_INVALID;
|
||||||
uint64_t *ici_modifiers = winsys_modifier ? &whandle->modifier : modifiers;
|
uint64_t *ici_modifiers = winsys_modifier ? &whandle->modifier : modifiers;
|
||||||
unsigned ici_modifier_count = winsys_modifier ? 1 : modifiers_count;
|
unsigned ici_modifier_count = winsys_modifier ? 1 : modifiers_count;
|
||||||
VkImageCreateInfo ici;
|
VkImageCreateInfo ici;
|
||||||
@@ -1097,10 +1246,10 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
|
|
||||||
obj->render_target = (ici.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) != 0;
|
obj->render_target = (ici.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) != 0;
|
||||||
|
|
||||||
if (shared || external) {
|
if (alloc_info.shared || alloc_info.external) {
|
||||||
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
|
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
|
||||||
emici.pNext = ici.pNext;
|
emici.pNext = ici.pNext;
|
||||||
emici.handleTypes = export_types;
|
emici.handleTypes = alloc_info.export_types;
|
||||||
ici.pNext = &emici;
|
ici.pNext = &emici;
|
||||||
|
|
||||||
assert(ici.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT || mod != DRM_FORMAT_MOD_INVALID);
|
assert(ici.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT || mod != DRM_FORMAT_MOD_INVALID);
|
||||||
@@ -1131,7 +1280,7 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
idfmlci.pDrmFormatModifiers = modifiers;
|
idfmlci.pDrmFormatModifiers = modifiers;
|
||||||
ici.pNext = &idfmlci;
|
ici.pNext = &idfmlci;
|
||||||
} else if (ici.tiling == VK_IMAGE_TILING_OPTIMAL) {
|
} else if (ici.tiling == VK_IMAGE_TILING_OPTIMAL) {
|
||||||
shared = false;
|
alloc_info.shared = false;
|
||||||
}
|
}
|
||||||
} else if (user_mem) {
|
} else if (user_mem) {
|
||||||
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
|
emici.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
|
||||||
@@ -1195,11 +1344,11 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
assert(num_dmabuf_planes <= 4);
|
assert(num_dmabuf_planes <= 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
need_dedicated = get_image_memory_requirement(screen, obj, num_planes, &reqs);
|
alloc_info.need_dedicated = get_image_memory_requirement(screen, obj, num_planes, &reqs);
|
||||||
if (templ->usage == PIPE_USAGE_STAGING && ici.tiling == VK_IMAGE_TILING_LINEAR)
|
if (templ->usage == PIPE_USAGE_STAGING && ici.tiling == VK_IMAGE_TILING_LINEAR)
|
||||||
flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
|
alloc_info.flags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
|
||||||
else
|
else
|
||||||
flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
alloc_info.flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||||
|
|
||||||
obj->vkflags = ici.flags;
|
obj->vkflags = ici.flags;
|
||||||
obj->vkusage = ici.usage;
|
obj->vkusage = ici.usage;
|
||||||
@@ -1207,13 +1356,13 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
obj->alignment = reqs.alignment;
|
obj->alignment = reqs.alignment;
|
||||||
|
|
||||||
if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC)
|
if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC)
|
||||||
flags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
alloc_info.flags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||||
else if (!(flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
|
else if (!(alloc_info.flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
|
||||||
templ->usage == PIPE_USAGE_STAGING)
|
templ->usage == PIPE_USAGE_STAGING)
|
||||||
flags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
alloc_info.flags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
||||||
|
|
||||||
if (templ->bind & ZINK_BIND_TRANSIENT)
|
if (templ->bind & ZINK_BIND_TRANSIENT)
|
||||||
flags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
|
alloc_info.flags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
|
||||||
|
|
||||||
if (user_mem) {
|
if (user_mem) {
|
||||||
VkExternalMemoryHandleTypeFlagBits handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
|
VkExternalMemoryHandleTypeFlagBits handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
|
||||||
@@ -1226,139 +1375,20 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
|
|||||||
goto fail1;
|
goto fail1;
|
||||||
}
|
}
|
||||||
reqs.memoryTypeBits &= memory_host_pointer_properties.memoryTypeBits;
|
reqs.memoryTypeBits &= memory_host_pointer_properties.memoryTypeBits;
|
||||||
flags &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
alloc_info.flags &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
VkMemoryAllocateInfo mai;
|
alloc_info.aflags = templ->flags & PIPE_RESOURCE_FLAG_SPARSE ? ZINK_ALLOC_SPARSE : 0;
|
||||||
enum zink_alloc_flag aflags = templ->flags & PIPE_RESOURCE_FLAG_SPARSE ? ZINK_ALLOC_SPARSE : 0;
|
|
||||||
mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
|
|
||||||
mai.pNext = NULL;
|
|
||||||
mai.allocationSize = reqs.size;
|
|
||||||
enum zink_heap heap = zink_heap_from_domain_flags(flags, aflags);
|
|
||||||
|
|
||||||
VkMemoryDedicatedAllocateInfo ded_alloc_info = {
|
int retval = allocate_bo(screen, templ, &reqs, obj, &alloc_info);
|
||||||
.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
|
switch (retval) {
|
||||||
.pNext = mai.pNext,
|
case -1: goto fail1;
|
||||||
.image = obj->image,
|
case -2: goto fail2;
|
||||||
.buffer = VK_NULL_HANDLE,
|
default:
|
||||||
|
assert(obj->bo);
|
||||||
};
|
};
|
||||||
|
|
||||||
if (screen->info.have_KHR_dedicated_allocation && need_dedicated) {
|
if (alloc_info.aflags == ZINK_ALLOC_SPARSE) {
|
||||||
ded_alloc_info.pNext = mai.pNext;
|
|
||||||
mai.pNext = &ded_alloc_info;
|
|
||||||
}
|
|
||||||
|
|
||||||
VkExportMemoryAllocateInfo emai;
|
|
||||||
if ((templ->bind & ZINK_BIND_VIDEO) || ((templ->bind & PIPE_BIND_SHARED) && shared) || (templ->bind & ZINK_BIND_DMABUF)) {
|
|
||||||
emai.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO;
|
|
||||||
emai.handleTypes = export_types;
|
|
||||||
|
|
||||||
emai.pNext = mai.pNext;
|
|
||||||
mai.pNext = &emai;
|
|
||||||
obj->exportable = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef ZINK_USE_DMABUF
|
|
||||||
|
|
||||||
#if !defined(_WIN32)
|
|
||||||
VkImportMemoryFdInfoKHR imfi = {
|
|
||||||
VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR,
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (whandle) {
|
|
||||||
imfi.pNext = NULL;
|
|
||||||
imfi.handleType = external;
|
|
||||||
imfi.fd = os_dupfd_cloexec(whandle->handle);
|
|
||||||
if (imfi.fd < 0) {
|
|
||||||
mesa_loge("ZINK: failed to dup dmabuf fd: %s\n", strerror(errno));
|
|
||||||
goto fail1;
|
|
||||||
}
|
|
||||||
|
|
||||||
imfi.pNext = mai.pNext;
|
|
||||||
mai.pNext = &imfi;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
VkImportMemoryWin32HandleInfoKHR imfi = {
|
|
||||||
VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR,
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (whandle) {
|
|
||||||
HANDLE source_target = GetCurrentProcess();
|
|
||||||
HANDLE out_handle;
|
|
||||||
|
|
||||||
bool result = DuplicateHandle(source_target, whandle->handle, source_target, &out_handle, 0, false, DUPLICATE_SAME_ACCESS);
|
|
||||||
|
|
||||||
if (!result || !out_handle) {
|
|
||||||
mesa_loge("ZINK: failed to DuplicateHandle with winerr: %08x\n", (int)GetLastError());
|
|
||||||
goto fail1;
|
|
||||||
}
|
|
||||||
|
|
||||||
imfi.pNext = NULL;
|
|
||||||
imfi.handleType = external;
|
|
||||||
imfi.handle = out_handle;
|
|
||||||
|
|
||||||
imfi.pNext = mai.pNext;
|
|
||||||
mai.pNext = &imfi;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
VkImportMemoryHostPointerInfoEXT imhpi = {
|
|
||||||
VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
|
|
||||||
NULL,
|
|
||||||
};
|
|
||||||
if (user_mem) {
|
|
||||||
imhpi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
|
|
||||||
imhpi.pHostPointer = (void*)user_mem;
|
|
||||||
imhpi.pNext = mai.pNext;
|
|
||||||
mai.pNext = &imhpi;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned alignment = MAX2(reqs.alignment, 256);
|
|
||||||
if (templ->usage == PIPE_USAGE_STAGING && obj->is_buffer)
|
|
||||||
alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
|
|
||||||
obj->alignment = alignment;
|
|
||||||
|
|
||||||
if (zink_mem_type_idx_from_bits(screen, heap, reqs.memoryTypeBits) == UINT32_MAX) {
|
|
||||||
/* not valid based on reqs; demote to more compatible type */
|
|
||||||
switch (heap) {
|
|
||||||
case ZINK_HEAP_DEVICE_LOCAL_VISIBLE:
|
|
||||||
heap = ZINK_HEAP_DEVICE_LOCAL;
|
|
||||||
break;
|
|
||||||
case ZINK_HEAP_HOST_VISIBLE_COHERENT_CACHED:
|
|
||||||
heap = ZINK_HEAP_HOST_VISIBLE_COHERENT;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
assert(zink_mem_type_idx_from_bits(screen, heap, reqs.memoryTypeBits) != UINT32_MAX);
|
|
||||||
}
|
|
||||||
|
|
||||||
retry:
|
|
||||||
/* iterate over all available memory types to reduce chance of oom */
|
|
||||||
for (unsigned i = 0; !obj->bo && i < screen->heap_count[heap]; i++) {
|
|
||||||
if (!(reqs.memoryTypeBits & BITFIELD_BIT(screen->heap_map[heap][i])))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
mai.memoryTypeIndex = screen->heap_map[heap][i];
|
|
||||||
obj->bo = zink_bo(zink_bo_create(screen, reqs.size, alignment, heap, mai.pNext ? ZINK_ALLOC_NO_SUBALLOC : 0, mai.memoryTypeIndex, mai.pNext));
|
|
||||||
if (!obj->bo) {
|
|
||||||
if (heap == ZINK_HEAP_DEVICE_LOCAL_VISIBLE) {
|
|
||||||
/* demote BAR allocations to a different heap on failure to avoid oom */
|
|
||||||
if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC)
|
|
||||||
heap = ZINK_HEAP_HOST_VISIBLE_COHERENT;
|
|
||||||
else
|
|
||||||
heap = ZINK_HEAP_DEVICE_LOCAL;
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!obj->bo)
|
|
||||||
goto fail2;
|
|
||||||
if (aflags == ZINK_ALLOC_SPARSE) {
|
|
||||||
obj->size = templ->width0;
|
obj->size = templ->width0;
|
||||||
} else {
|
} else {
|
||||||
obj->offset = zink_bo_get_offset(obj->bo);
|
obj->offset = zink_bo_get_offset(obj->bo);
|
||||||
|
@@ -162,10 +162,6 @@ zink_resource_access_is_write(VkAccessFlags flags)
|
|||||||
bool
|
bool
|
||||||
zink_resource_image_needs_barrier(struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline)
|
zink_resource_image_needs_barrier(struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline)
|
||||||
{
|
{
|
||||||
if (!pipeline)
|
|
||||||
pipeline = pipeline_dst_stage(new_layout);
|
|
||||||
if (!flags)
|
|
||||||
flags = access_dst_flags(new_layout);
|
|
||||||
return res->layout != new_layout || (res->obj->access_stage & pipeline) != pipeline ||
|
return res->layout != new_layout || (res->obj->access_stage & pipeline) != pipeline ||
|
||||||
(res->obj->access & flags) != flags ||
|
(res->obj->access & flags) != flags ||
|
||||||
zink_resource_access_is_write(res->obj->access) ||
|
zink_resource_access_is_write(res->obj->access) ||
|
||||||
@@ -262,29 +258,36 @@ unordered_res_exec(const struct zink_context *ctx, const struct zink_resource *r
|
|||||||
return res->obj->unordered_write || !zink_batch_usage_matches(res->obj->bo->writes.u, ctx->batch.state);
|
return res->obj->unordered_write || !zink_batch_usage_matches(res->obj->bo->writes.u, ctx->batch.state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
check_unordered_exec(struct zink_context *ctx, struct zink_resource *res, bool is_write)
|
||||||
|
{
|
||||||
|
if (res) {
|
||||||
|
if (!res->obj->is_buffer) {
|
||||||
|
/* TODO: figure out how to link up unordered layout -> ordered layout and delete this conditionals */
|
||||||
|
if (zink_resource_usage_is_unflushed(res) && !res->obj->unordered_read && !res->obj->unordered_write)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return unordered_res_exec(ctx, res, is_write);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
VkCommandBuffer
|
VkCommandBuffer
|
||||||
zink_get_cmdbuf(struct zink_context *ctx, struct zink_resource *src, struct zink_resource *dst)
|
zink_get_cmdbuf(struct zink_context *ctx, struct zink_resource *src, struct zink_resource *dst)
|
||||||
{
|
{
|
||||||
bool unordered_exec = (zink_debug & ZINK_DEBUG_NOREORDER) == 0;
|
bool unordered_exec = (zink_debug & ZINK_DEBUG_NOREORDER) == 0;
|
||||||
/* TODO: figure out how to link up unordered layout -> ordered layout and delete these two conditionals */
|
|
||||||
if (src && !src->obj->is_buffer) {
|
unordered_exec &= check_unordered_exec(ctx, src, false) &&
|
||||||
if (zink_resource_usage_is_unflushed(src) && !src->obj->unordered_read && !src->obj->unordered_write)
|
check_unordered_exec(ctx, dst, true);
|
||||||
unordered_exec = false;
|
|
||||||
}
|
|
||||||
if (dst && !dst->obj->is_buffer) {
|
|
||||||
if (zink_resource_usage_is_unflushed(dst) && !dst->obj->unordered_read && !dst->obj->unordered_write)
|
|
||||||
unordered_exec = false;
|
|
||||||
}
|
|
||||||
if (src && unordered_exec)
|
|
||||||
unordered_exec &= unordered_res_exec(ctx, src, false);
|
|
||||||
if (dst && unordered_exec)
|
|
||||||
unordered_exec &= unordered_res_exec(ctx, dst, true);
|
|
||||||
if (src)
|
if (src)
|
||||||
src->obj->unordered_read = unordered_exec;
|
src->obj->unordered_read = unordered_exec;
|
||||||
if (dst)
|
if (dst)
|
||||||
dst->obj->unordered_write = unordered_exec;
|
dst->obj->unordered_write = unordered_exec;
|
||||||
|
|
||||||
if (!unordered_exec || ctx->unordered_blitting)
|
if (!unordered_exec || ctx->unordered_blitting)
|
||||||
zink_batch_no_rp(ctx);
|
zink_batch_no_rp(ctx);
|
||||||
|
|
||||||
if (unordered_exec) {
|
if (unordered_exec) {
|
||||||
ctx->batch.state->has_barriers = true;
|
ctx->batch.state->has_barriers = true;
|
||||||
ctx->batch.has_work = true;
|
ctx->batch.has_work = true;
|
||||||
@@ -320,7 +323,201 @@ resource_check_defer_image_barrier(struct zink_context *ctx, struct zink_resourc
|
|||||||
_mesa_set_add(ctx->need_barriers[is_compute], res);
|
_mesa_set_add(ctx->need_barriers[is_compute], res);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool HAS_SYNC2, bool UNSYNCHRONIZED>
|
enum barrier_type {
|
||||||
|
barrier_default,
|
||||||
|
barrier_KHR_synchronzation2
|
||||||
|
};
|
||||||
|
|
||||||
|
template <barrier_type BARRIER_API>
|
||||||
|
struct emit_memory_barrier {
|
||||||
|
static void for_image(struct zink_context *ctx, struct zink_resource *res, VkImageLayout new_layout,
|
||||||
|
VkAccessFlags flags, VkPipelineStageFlags pipeline, bool completed, VkCommandBuffer cmdbuf,
|
||||||
|
bool *queue_import) {
|
||||||
|
VkImageMemoryBarrier imb;
|
||||||
|
zink_resource_image_barrier_init(&imb, res, new_layout, flags, pipeline);
|
||||||
|
if (!res->obj->access_stage || completed)
|
||||||
|
imb.srcAccessMask = 0;
|
||||||
|
if (res->obj->needs_zs_evaluate)
|
||||||
|
imb.pNext = &res->obj->zs_evaluate;
|
||||||
|
res->obj->needs_zs_evaluate = false;
|
||||||
|
if (res->queue != zink_screen(ctx->base.screen)->gfx_queue && res->queue != VK_QUEUE_FAMILY_IGNORED) {
|
||||||
|
imb.srcQueueFamilyIndex = res->queue;
|
||||||
|
imb.dstQueueFamilyIndex = zink_screen(ctx->base.screen)->gfx_queue;
|
||||||
|
res->queue = VK_QUEUE_FAMILY_IGNORED;
|
||||||
|
*queue_import = true;
|
||||||
|
}
|
||||||
|
VKCTX(CmdPipelineBarrier)(
|
||||||
|
cmdbuf,
|
||||||
|
res->obj->access_stage ? res->obj->access_stage : VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
||||||
|
pipeline,
|
||||||
|
0,
|
||||||
|
0, NULL,
|
||||||
|
0, NULL,
|
||||||
|
1, &imb
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void for_buffer(struct zink_context *ctx, struct zink_resource *res,
|
||||||
|
VkPipelineStageFlags pipeline,
|
||||||
|
VkAccessFlags flags,
|
||||||
|
bool unordered,
|
||||||
|
bool usage_matches,
|
||||||
|
VkPipelineStageFlags stages,
|
||||||
|
VkCommandBuffer cmdbuf) {
|
||||||
|
VkMemoryBarrier bmb;
|
||||||
|
bmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
|
||||||
|
bmb.pNext = NULL;
|
||||||
|
if (unordered) {
|
||||||
|
stages = usage_matches ? res->obj->unordered_access_stage : stages;
|
||||||
|
bmb.srcAccessMask = usage_matches ? res->obj->unordered_access : res->obj->access;
|
||||||
|
} else {
|
||||||
|
bmb.srcAccessMask = res->obj->access;
|
||||||
|
}
|
||||||
|
VKCTX(CmdPipelineBarrier)(
|
||||||
|
cmdbuf,
|
||||||
|
stages,
|
||||||
|
pipeline,
|
||||||
|
0,
|
||||||
|
1, &bmb,
|
||||||
|
0, NULL,
|
||||||
|
0, NULL);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct emit_memory_barrier<barrier_KHR_synchronzation2> {
|
||||||
|
static void for_image(struct zink_context *ctx, struct zink_resource *res, VkImageLayout new_layout,
|
||||||
|
VkAccessFlags flags, VkPipelineStageFlags pipeline, bool completed, VkCommandBuffer cmdbuf,
|
||||||
|
bool *queue_import) {
|
||||||
|
VkImageMemoryBarrier2 imb;
|
||||||
|
zink_resource_image_barrier2_init(&imb, res, new_layout, flags, pipeline);
|
||||||
|
if (!res->obj->access_stage || completed)
|
||||||
|
imb.srcAccessMask = 0;
|
||||||
|
if (res->obj->needs_zs_evaluate)
|
||||||
|
imb.pNext = &res->obj->zs_evaluate;
|
||||||
|
res->obj->needs_zs_evaluate = false;
|
||||||
|
if (res->queue != zink_screen(ctx->base.screen)->gfx_queue && res->queue != VK_QUEUE_FAMILY_IGNORED) {
|
||||||
|
imb.srcQueueFamilyIndex = res->queue;
|
||||||
|
imb.dstQueueFamilyIndex = zink_screen(ctx->base.screen)->gfx_queue;
|
||||||
|
res->queue = VK_QUEUE_FAMILY_IGNORED;
|
||||||
|
*queue_import = true;
|
||||||
|
}
|
||||||
|
VkDependencyInfo dep = {
|
||||||
|
VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
|
||||||
|
NULL,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
NULL,
|
||||||
|
0,
|
||||||
|
NULL,
|
||||||
|
1,
|
||||||
|
&imb
|
||||||
|
};
|
||||||
|
VKCTX(CmdPipelineBarrier2)(cmdbuf, &dep);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void for_buffer(struct zink_context *ctx, struct zink_resource *res,
|
||||||
|
VkPipelineStageFlags pipeline,
|
||||||
|
VkAccessFlags flags,
|
||||||
|
bool unordered,
|
||||||
|
bool usage_matches,
|
||||||
|
VkPipelineStageFlags stages,
|
||||||
|
VkCommandBuffer cmdbuf) {
|
||||||
|
VkMemoryBarrier2 bmb;
|
||||||
|
bmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2;
|
||||||
|
bmb.pNext = NULL;
|
||||||
|
if (unordered) {
|
||||||
|
bmb.srcStageMask = usage_matches ? res->obj->unordered_access_stage : stages;
|
||||||
|
bmb.srcAccessMask = usage_matches ? res->obj->unordered_access : res->obj->access;
|
||||||
|
} else {
|
||||||
|
bmb.srcStageMask = stages;
|
||||||
|
bmb.srcAccessMask = res->obj->access;
|
||||||
|
}
|
||||||
|
bmb.dstStageMask = pipeline;
|
||||||
|
bmb.dstAccessMask = flags;
|
||||||
|
VkDependencyInfo dep = {
|
||||||
|
VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
|
||||||
|
NULL,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
&bmb,
|
||||||
|
0,
|
||||||
|
NULL,
|
||||||
|
0,
|
||||||
|
NULL
|
||||||
|
};
|
||||||
|
VKCTX(CmdPipelineBarrier2)(cmdbuf, &dep);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <bool UNSYNCHRONIZED>
|
||||||
|
struct update_unordered_access_and_get_cmdbuf
|
||||||
|
{
|
||||||
|
/* use base template to make the cases for true and false more explicite below */
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct update_unordered_access_and_get_cmdbuf<true> {
|
||||||
|
static VkCommandBuffer apply(struct zink_context *ctx, struct zink_resource *res, bool usage_matches, bool is_write) {
|
||||||
|
assert(!usage_matches);
|
||||||
|
res->obj->unordered_write = true;
|
||||||
|
res->obj->unordered_read = true;
|
||||||
|
ctx->batch.state->has_unsync = true;
|
||||||
|
return ctx->batch.state->unsynchronized_cmdbuf;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct update_unordered_access_and_get_cmdbuf<false> {
|
||||||
|
static VkCommandBuffer apply(struct zink_context *ctx, struct zink_resource *res, bool usage_matches, bool is_write) {
|
||||||
|
VkCommandBuffer cmdbuf;
|
||||||
|
if (!usage_matches) {
|
||||||
|
res->obj->unordered_write = true;
|
||||||
|
if (is_write || zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, ZINK_RESOURCE_ACCESS_RW))
|
||||||
|
res->obj->unordered_read = true;
|
||||||
|
}
|
||||||
|
if (zink_resource_usage_matches(res, ctx->batch.state) && !ctx->unordered_blitting &&
|
||||||
|
/* if current batch usage exists with ordered non-transfer access, never promote
|
||||||
|
* this avoids layout dsync
|
||||||
|
*/
|
||||||
|
(!res->obj->unordered_read || !res->obj->unordered_write)) {
|
||||||
|
cmdbuf = ctx->batch.state->cmdbuf;
|
||||||
|
res->obj->unordered_write = false;
|
||||||
|
res->obj->unordered_read = false;
|
||||||
|
/* it's impossible to detect this from the caller
|
||||||
|
* there should be no valid case where this barrier can occur inside a renderpass
|
||||||
|
*/
|
||||||
|
zink_batch_no_rp(ctx);
|
||||||
|
} else {
|
||||||
|
cmdbuf = is_write ? zink_get_cmdbuf(ctx, NULL, res) : zink_get_cmdbuf(ctx, res, NULL);
|
||||||
|
/* force subsequent barriers to be ordered to avoid layout desync */
|
||||||
|
if (cmdbuf != ctx->batch.state->reordered_cmdbuf) {
|
||||||
|
res->obj->unordered_write = false;
|
||||||
|
res->obj->unordered_read = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cmdbuf;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <bool UNSYNCHRONIZED>
|
||||||
|
struct check_defer_image_barrier {
|
||||||
|
static void apply(UNUSED struct zink_context *ctx, UNUSED struct zink_resource *res, UNUSED VkImageLayout new_layout,
|
||||||
|
UNUSED VkPipelineStageFlags pipeline) {
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct check_defer_image_barrier<false> {
|
||||||
|
static void apply(struct zink_context *ctx, struct zink_resource *res, VkImageLayout new_layout,
|
||||||
|
VkPipelineStageFlags pipeline) {
|
||||||
|
resource_check_defer_image_barrier(ctx, res, new_layout, pipeline);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <barrier_type BARRIER_API, bool UNSYNCHRONIZED>
|
||||||
void
|
void
|
||||||
zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline)
|
zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline)
|
||||||
{
|
{
|
||||||
@@ -336,92 +533,12 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
|
|||||||
enum zink_resource_access rw = is_write ? ZINK_RESOURCE_ACCESS_RW : ZINK_RESOURCE_ACCESS_WRITE;
|
enum zink_resource_access rw = is_write ? ZINK_RESOURCE_ACCESS_RW : ZINK_RESOURCE_ACCESS_WRITE;
|
||||||
bool completed = zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, rw);
|
bool completed = zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, rw);
|
||||||
bool usage_matches = !completed && zink_resource_usage_matches(res, ctx->batch.state);
|
bool usage_matches = !completed && zink_resource_usage_matches(res, ctx->batch.state);
|
||||||
VkCommandBuffer cmdbuf;
|
VkCommandBuffer cmdbuf = update_unordered_access_and_get_cmdbuf<UNSYNCHRONIZED>::apply(ctx, res, usage_matches, is_write);
|
||||||
if (!usage_matches) {
|
|
||||||
res->obj->unordered_write = true;
|
|
||||||
if (is_write || zink_resource_usage_check_completion_fast(zink_screen(ctx->base.screen), res, ZINK_RESOURCE_ACCESS_RW))
|
|
||||||
res->obj->unordered_read = true;
|
|
||||||
} else {
|
|
||||||
assert(!UNSYNCHRONIZED);
|
|
||||||
}
|
|
||||||
if (UNSYNCHRONIZED) {
|
|
||||||
cmdbuf = ctx->batch.state->unsynchronized_cmdbuf;
|
|
||||||
res->obj->unordered_write = true;
|
|
||||||
res->obj->unordered_read = true;
|
|
||||||
ctx->batch.state->has_unsync = true;
|
|
||||||
} else if (zink_resource_usage_matches(res, ctx->batch.state) && !ctx->unordered_blitting &&
|
|
||||||
/* if current batch usage exists with ordered non-transfer access, never promote
|
|
||||||
* this avoids layout dsync
|
|
||||||
*/
|
|
||||||
(!res->obj->unordered_read || !res->obj->unordered_write)) {
|
|
||||||
cmdbuf = ctx->batch.state->cmdbuf;
|
|
||||||
res->obj->unordered_write = false;
|
|
||||||
res->obj->unordered_read = false;
|
|
||||||
/* it's impossible to detect this from the caller
|
|
||||||
* there should be no valid case where this barrier can occur inside a renderpass
|
|
||||||
*/
|
|
||||||
zink_batch_no_rp(ctx);
|
|
||||||
} else {
|
|
||||||
cmdbuf = is_write ? zink_get_cmdbuf(ctx, NULL, res) : zink_get_cmdbuf(ctx, res, NULL);
|
|
||||||
/* force subsequent barriers to be ordered to avoid layout desync */
|
|
||||||
if (cmdbuf != ctx->batch.state->reordered_cmdbuf) {
|
|
||||||
res->obj->unordered_write = false;
|
|
||||||
res->obj->unordered_read = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(new_layout);
|
assert(new_layout);
|
||||||
bool marker = zink_cmd_debug_marker_begin(ctx, cmdbuf, "image_barrier(%s->%s)", vk_ImageLayout_to_str(res->layout), vk_ImageLayout_to_str(new_layout));
|
bool marker = zink_cmd_debug_marker_begin(ctx, cmdbuf, "image_barrier(%s->%s)", vk_ImageLayout_to_str(res->layout), vk_ImageLayout_to_str(new_layout));
|
||||||
bool queue_import = false;
|
bool queue_import = false;
|
||||||
if (HAS_SYNC2) {
|
emit_memory_barrier<BARRIER_API>::for_image(ctx, res, new_layout, flags, pipeline, completed, cmdbuf, &queue_import);
|
||||||
VkImageMemoryBarrier2 imb;
|
|
||||||
zink_resource_image_barrier2_init(&imb, res, new_layout, flags, pipeline);
|
|
||||||
if (!res->obj->access_stage || completed)
|
|
||||||
imb.srcAccessMask = 0;
|
|
||||||
if (res->obj->needs_zs_evaluate)
|
|
||||||
imb.pNext = &res->obj->zs_evaluate;
|
|
||||||
res->obj->needs_zs_evaluate = false;
|
|
||||||
if (res->queue != zink_screen(ctx->base.screen)->gfx_queue && res->queue != VK_QUEUE_FAMILY_IGNORED) {
|
|
||||||
imb.srcQueueFamilyIndex = res->queue;
|
|
||||||
imb.dstQueueFamilyIndex = zink_screen(ctx->base.screen)->gfx_queue;
|
|
||||||
res->queue = VK_QUEUE_FAMILY_IGNORED;
|
|
||||||
queue_import = true;
|
|
||||||
}
|
|
||||||
VkDependencyInfo dep = {
|
|
||||||
VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
|
|
||||||
NULL,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
NULL,
|
|
||||||
0,
|
|
||||||
NULL,
|
|
||||||
1,
|
|
||||||
&imb
|
|
||||||
};
|
|
||||||
VKCTX(CmdPipelineBarrier2)(cmdbuf, &dep);
|
|
||||||
} else {
|
|
||||||
VkImageMemoryBarrier imb;
|
|
||||||
zink_resource_image_barrier_init(&imb, res, new_layout, flags, pipeline);
|
|
||||||
if (!res->obj->access_stage || completed)
|
|
||||||
imb.srcAccessMask = 0;
|
|
||||||
if (res->obj->needs_zs_evaluate)
|
|
||||||
imb.pNext = &res->obj->zs_evaluate;
|
|
||||||
res->obj->needs_zs_evaluate = false;
|
|
||||||
if (res->queue != zink_screen(ctx->base.screen)->gfx_queue && res->queue != VK_QUEUE_FAMILY_IGNORED) {
|
|
||||||
imb.srcQueueFamilyIndex = res->queue;
|
|
||||||
imb.dstQueueFamilyIndex = zink_screen(ctx->base.screen)->gfx_queue;
|
|
||||||
res->queue = VK_QUEUE_FAMILY_IGNORED;
|
|
||||||
queue_import = true;
|
|
||||||
}
|
|
||||||
VKCTX(CmdPipelineBarrier)(
|
|
||||||
cmdbuf,
|
|
||||||
res->obj->access_stage ? res->obj->access_stage : VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
|
||||||
pipeline,
|
|
||||||
0,
|
|
||||||
0, NULL,
|
|
||||||
0, NULL,
|
|
||||||
1, &imb
|
|
||||||
);
|
|
||||||
}
|
|
||||||
zink_cmd_debug_marker_end(ctx, cmdbuf, marker);
|
zink_cmd_debug_marker_end(ctx, cmdbuf, marker);
|
||||||
|
|
||||||
if (!UNSYNCHRONIZED)
|
if (!UNSYNCHRONIZED)
|
||||||
@@ -433,6 +550,10 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
|
|||||||
res->obj->access = flags;
|
res->obj->access = flags;
|
||||||
res->obj->access_stage = pipeline;
|
res->obj->access_stage = pipeline;
|
||||||
res->layout = new_layout;
|
res->layout = new_layout;
|
||||||
|
|
||||||
|
if (new_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
|
||||||
|
zink_resource_copies_reset(res);
|
||||||
|
|
||||||
if (res->obj->exportable)
|
if (res->obj->exportable)
|
||||||
simple_mtx_lock(&ctx->batch.state->exportable_lock);
|
simple_mtx_lock(&ctx->batch.state->exportable_lock);
|
||||||
if (res->obj->dt) {
|
if (res->obj->dt) {
|
||||||
@@ -448,8 +569,6 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
|
|||||||
pipe_resource_reference(&pres, &res->base.b);
|
pipe_resource_reference(&pres, &res->base.b);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (new_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
|
|
||||||
zink_resource_copies_reset(res);
|
|
||||||
if (res->obj->exportable && queue_import) {
|
if (res->obj->exportable && queue_import) {
|
||||||
for (struct zink_resource *r = res; r; r = zink_resource(r->base.b.next)) {
|
for (struct zink_resource *r = res; r; r = zink_resource(r->base.b.next)) {
|
||||||
VkSemaphore sem = zink_screen_export_dmabuf_semaphore(zink_screen(ctx->base.screen), r);
|
VkSemaphore sem = zink_screen_export_dmabuf_semaphore(zink_screen(ctx->base.screen), r);
|
||||||
@@ -575,7 +694,9 @@ buffer_needs_barrier(struct zink_resource *res, VkAccessFlags flags, VkPipelineS
|
|||||||
((unordered ? res->obj->unordered_access : res->obj->access) & flags) != flags;
|
((unordered ? res->obj->unordered_access : res->obj->access) & flags) != flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool HAS_SYNC2>
|
|
||||||
|
|
||||||
|
template <barrier_type BARRIER_API>
|
||||||
void
|
void
|
||||||
zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline)
|
zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline)
|
||||||
{
|
{
|
||||||
@@ -638,51 +759,7 @@ zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res
|
|||||||
}
|
}
|
||||||
|
|
||||||
VkPipelineStageFlags stages = res->obj->access_stage ? res->obj->access_stage : pipeline_access_stage(res->obj->access);;
|
VkPipelineStageFlags stages = res->obj->access_stage ? res->obj->access_stage : pipeline_access_stage(res->obj->access);;
|
||||||
if (HAS_SYNC2) {
|
emit_memory_barrier<BARRIER_API>::for_buffer(ctx, res, pipeline, flags, unordered,usage_matches, stages, cmdbuf);
|
||||||
VkMemoryBarrier2 bmb;
|
|
||||||
bmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2;
|
|
||||||
bmb.pNext = NULL;
|
|
||||||
if (unordered) {
|
|
||||||
bmb.srcStageMask = usage_matches ? res->obj->unordered_access_stage : stages;
|
|
||||||
bmb.srcAccessMask = usage_matches ? res->obj->unordered_access : res->obj->access;
|
|
||||||
} else {
|
|
||||||
bmb.srcStageMask = stages;
|
|
||||||
bmb.srcAccessMask = res->obj->access;
|
|
||||||
}
|
|
||||||
bmb.dstStageMask = pipeline;
|
|
||||||
bmb.dstAccessMask = flags;
|
|
||||||
VkDependencyInfo dep = {
|
|
||||||
VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
|
|
||||||
NULL,
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
&bmb,
|
|
||||||
0,
|
|
||||||
NULL,
|
|
||||||
0,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
VKCTX(CmdPipelineBarrier2)(cmdbuf, &dep);
|
|
||||||
} else {
|
|
||||||
VkMemoryBarrier bmb;
|
|
||||||
bmb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
|
|
||||||
bmb.pNext = NULL;
|
|
||||||
if (unordered) {
|
|
||||||
stages = usage_matches ? res->obj->unordered_access_stage : stages;
|
|
||||||
bmb.srcAccessMask = usage_matches ? res->obj->unordered_access : res->obj->access;
|
|
||||||
} else {
|
|
||||||
bmb.srcAccessMask = res->obj->access;
|
|
||||||
}
|
|
||||||
VKCTX(CmdPipelineBarrier)(
|
|
||||||
cmdbuf,
|
|
||||||
stages,
|
|
||||||
pipeline,
|
|
||||||
0,
|
|
||||||
1, &bmb,
|
|
||||||
0, NULL,
|
|
||||||
0, NULL
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
zink_cmd_debug_marker_end(ctx, cmdbuf, marker);
|
zink_cmd_debug_marker_end(ctx, cmdbuf, marker);
|
||||||
}
|
}
|
||||||
@@ -713,12 +790,12 @@ void
|
|||||||
zink_synchronization_init(struct zink_screen *screen)
|
zink_synchronization_init(struct zink_screen *screen)
|
||||||
{
|
{
|
||||||
if (screen->info.have_vulkan13 || screen->info.have_KHR_synchronization2) {
|
if (screen->info.have_vulkan13 || screen->info.have_KHR_synchronization2) {
|
||||||
screen->buffer_barrier = zink_resource_buffer_barrier<true>;
|
screen->buffer_barrier = zink_resource_buffer_barrier<barrier_KHR_synchronzation2>;
|
||||||
screen->image_barrier = zink_resource_image_barrier<true, false>;
|
screen->image_barrier = zink_resource_image_barrier<barrier_KHR_synchronzation2, false>;
|
||||||
screen->image_barrier_unsync = zink_resource_image_barrier<true, true>;
|
screen->image_barrier_unsync = zink_resource_image_barrier<barrier_KHR_synchronzation2, true>;
|
||||||
} else {
|
} else {
|
||||||
screen->buffer_barrier = zink_resource_buffer_barrier<false>;
|
screen->buffer_barrier = zink_resource_buffer_barrier<barrier_default>;
|
||||||
screen->image_barrier = zink_resource_image_barrier<false, false>;
|
screen->image_barrier = zink_resource_image_barrier<barrier_default, false>;
|
||||||
screen->image_barrier_unsync = zink_resource_image_barrier<false, true>;
|
screen->image_barrier_unsync = zink_resource_image_barrier<barrier_default, true>;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user