2021-04-07 17:25:40 -07:00
|
|
|
/*
|
|
|
|
* Copyright 2019 Google LLC
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* based in part on anv and radv which are:
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
* Copyright © 2016 Red Hat.
|
|
|
|
* Copyright © 2016 Bas Nieuwenhuizen
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "vn_buffer.h"
|
|
|
|
|
|
|
|
#include "venus-protocol/vn_protocol_driver_buffer.h"
|
|
|
|
#include "venus-protocol/vn_protocol_driver_buffer_view.h"
|
|
|
|
|
2021-06-30 17:28:47 +00:00
|
|
|
#include "vn_android.h"
|
2021-04-07 17:25:40 -07:00
|
|
|
#include "vn_device.h"
|
2021-04-07 17:30:54 -07:00
|
|
|
#include "vn_device_memory.h"
|
2022-09-03 06:33:21 +00:00
|
|
|
#include "vn_physical_device.h"
|
2021-04-07 17:25:40 -07:00
|
|
|
|
|
|
|
/* buffer commands */
|
|
|
|
|
2021-10-18 23:36:11 +00:00
|
|
|
static inline bool
|
2023-02-08 17:01:31 -08:00
|
|
|
vn_buffer_create_info_can_be_cached(const VkBufferCreateInfo *create_info,
|
|
|
|
struct vn_buffer_cache *cache)
|
2021-10-18 23:36:11 +00:00
|
|
|
{
|
|
|
|
/* cache only VK_SHARING_MODE_EXCLUSIVE and without pNext for simplicity */
|
2023-02-08 17:01:31 -08:00
|
|
|
return (create_info->size <= cache->max_buffer_size) &&
|
|
|
|
(create_info->pNext == NULL) &&
|
2021-10-18 23:36:11 +00:00
|
|
|
(create_info->sharingMode == VK_SHARING_MODE_EXCLUSIVE);
|
|
|
|
}
|
|
|
|
|
2021-10-15 18:37:23 +00:00
|
|
|
static VkResult
|
|
|
|
vn_buffer_get_max_buffer_size(struct vn_device *dev,
|
|
|
|
uint64_t *out_max_buffer_size)
|
|
|
|
{
|
2021-10-18 18:03:11 +00:00
|
|
|
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
|
2022-06-30 09:50:36 -03:00
|
|
|
struct vn_physical_device *pdev = dev->physical_device;
|
2021-10-18 18:03:11 +00:00
|
|
|
VkDevice dev_handle = vn_device_to_handle(dev);
|
|
|
|
VkBuffer buf_handle;
|
|
|
|
VkBufferCreateInfo create_info = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
|
|
|
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
|
|
|
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
|
|
|
};
|
|
|
|
uint64_t max_buffer_size = 0;
|
|
|
|
uint8_t begin = 0;
|
|
|
|
uint8_t end = 64;
|
|
|
|
|
2022-08-30 16:42:07 -07:00
|
|
|
if (pdev->features.vulkan_1_3.maintenance4) {
|
|
|
|
*out_max_buffer_size = pdev->properties.vulkan_1_3.maxBufferSize;
|
2022-06-30 09:50:36 -03:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For drivers that don't support VK_KHR_maintenance4, we try to estimate
|
|
|
|
* the maxBufferSize using binary search.
|
|
|
|
* TODO remove all the search code after VK_KHR_maintenance4 becomes
|
|
|
|
* a requirement.
|
|
|
|
*/
|
2021-12-13 17:02:34 -08:00
|
|
|
while (begin < end) {
|
2021-10-18 18:03:11 +00:00
|
|
|
uint8_t mid = (begin + end) >> 1;
|
2021-12-13 17:02:34 -08:00
|
|
|
create_info.size = 1ull << mid;
|
2021-10-18 18:03:11 +00:00
|
|
|
if (vn_CreateBuffer(dev_handle, &create_info, alloc, &buf_handle) ==
|
|
|
|
VK_SUCCESS) {
|
|
|
|
vn_DestroyBuffer(dev_handle, buf_handle, alloc);
|
|
|
|
max_buffer_size = create_info.size;
|
|
|
|
begin = mid + 1;
|
|
|
|
} else {
|
2021-12-13 17:02:34 -08:00
|
|
|
end = mid;
|
2021-10-18 18:03:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*out_max_buffer_size = max_buffer_size;
|
2021-10-15 18:37:23 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
vn_buffer_cache_init(struct vn_device *dev)
|
|
|
|
{
|
|
|
|
uint32_t ahb_mem_type_bits = 0;
|
|
|
|
uint64_t max_buffer_size = 0;
|
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
if (dev->base.base.enabled_extensions
|
|
|
|
.ANDROID_external_memory_android_hardware_buffer) {
|
|
|
|
result =
|
|
|
|
vn_android_get_ahb_buffer_memory_type_bits(dev, &ahb_mem_type_bits);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-11-17 21:23:18 +00:00
|
|
|
if (!VN_PERF(NO_ASYNC_BUFFER_CREATE)) {
|
|
|
|
result = vn_buffer_get_max_buffer_size(dev, &max_buffer_size);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
}
|
2021-10-15 18:37:23 +00:00
|
|
|
|
|
|
|
dev->buffer_cache.ahb_mem_type_bits = ahb_mem_type_bits;
|
|
|
|
dev->buffer_cache.max_buffer_size = max_buffer_size;
|
2023-02-08 17:01:31 -08:00
|
|
|
|
|
|
|
simple_mtx_init(&dev->buffer_cache.mutex, mtx_plain);
|
|
|
|
util_sparse_array_init(&dev->buffer_cache.entries,
|
|
|
|
sizeof(struct vn_buffer_cache_entry), 64);
|
|
|
|
|
2021-10-15 18:37:23 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vn_buffer_cache_fini(struct vn_device *dev)
|
|
|
|
{
|
2023-02-08 17:01:31 -08:00
|
|
|
util_sparse_array_finish(&dev->buffer_cache.entries);
|
|
|
|
simple_mtx_destroy(&dev->buffer_cache.mutex);
|
2021-10-15 18:37:23 +00:00
|
|
|
}
|
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
static struct vn_buffer_cache_entry *
|
|
|
|
vn_buffer_get_cached_memory_requirements(
|
2021-10-15 18:37:23 +00:00
|
|
|
struct vn_buffer_cache *cache,
|
|
|
|
const VkBufferCreateInfo *create_info,
|
|
|
|
struct vn_buffer_memory_requirements *out)
|
|
|
|
{
|
2022-04-22 21:47:38 +00:00
|
|
|
if (VN_PERF(NO_ASYNC_BUFFER_CREATE))
|
2023-02-08 17:01:31 -08:00
|
|
|
return NULL;
|
2021-10-18 23:36:11 +00:00
|
|
|
|
|
|
|
/* 12.7. Resource Memory Association
|
|
|
|
*
|
|
|
|
* The memoryTypeBits member is identical for all VkBuffer objects created
|
|
|
|
* with the same value for the flags and usage members in the
|
|
|
|
* VkBufferCreateInfo structure and the handleTypes member of the
|
|
|
|
* VkExternalMemoryBufferCreateInfo structure passed to vkCreateBuffer.
|
|
|
|
*/
|
2023-02-08 17:01:31 -08:00
|
|
|
if (vn_buffer_create_info_can_be_cached(create_info, cache)) {
|
|
|
|
/* Combine flags and usage bits to form a unique index. */
|
|
|
|
const uint64_t idx =
|
|
|
|
(uint64_t)create_info->flags << 32 | create_info->usage;
|
|
|
|
|
|
|
|
struct vn_buffer_cache_entry *entry =
|
|
|
|
util_sparse_array_get(&cache->entries, idx);
|
|
|
|
|
|
|
|
if (entry->valid) {
|
2021-10-18 23:36:11 +00:00
|
|
|
*out = entry->requirements;
|
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
/* TODO remove comment after mandating VK_KHR_maintenance4
|
2021-10-18 23:36:11 +00:00
|
|
|
*
|
2022-01-21 20:07:50 +00:00
|
|
|
* This is based on below implementation defined behavior:
|
|
|
|
* req.size <= align64(info.size, req.alignment)
|
2021-10-18 23:36:11 +00:00
|
|
|
*/
|
|
|
|
out->memory.memoryRequirements.size = align64(
|
|
|
|
create_info->size, out->memory.memoryRequirements.alignment);
|
|
|
|
}
|
2023-02-08 17:01:31 -08:00
|
|
|
|
|
|
|
return entry;
|
2021-10-18 23:36:11 +00:00
|
|
|
}
|
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vn_buffer_cache_entry_init(struct vn_buffer_cache *cache,
|
|
|
|
struct vn_buffer_cache_entry *entry,
|
|
|
|
VkMemoryRequirements2 *req)
|
|
|
|
{
|
|
|
|
simple_mtx_lock(&cache->mutex);
|
|
|
|
|
|
|
|
/* Entry might have already been initialized by another thread
|
|
|
|
* before the lock
|
|
|
|
*/
|
|
|
|
if (entry->valid)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
entry->requirements.memory = *req;
|
|
|
|
|
|
|
|
const VkMemoryDedicatedRequirements *dedicated_req =
|
|
|
|
vk_find_struct_const(req->pNext, MEMORY_DEDICATED_REQUIREMENTS);
|
|
|
|
if (dedicated_req)
|
|
|
|
entry->requirements.dedicated = *dedicated_req;
|
|
|
|
|
|
|
|
entry->valid = true;
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
simple_mtx_unlock(&cache->mutex);
|
2021-10-15 18:37:23 +00:00
|
|
|
}
|
|
|
|
|
2022-09-14 16:30:56 -07:00
|
|
|
static void
|
|
|
|
vn_copy_cached_memory_requirements(
|
|
|
|
const struct vn_buffer_memory_requirements *cached,
|
|
|
|
VkMemoryRequirements2 *out_mem_req)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
VkBaseOutStructure *pnext;
|
|
|
|
VkMemoryRequirements2 *two;
|
|
|
|
VkMemoryDedicatedRequirements *dedicated;
|
|
|
|
} u = { .two = out_mem_req };
|
|
|
|
|
|
|
|
while (u.pnext) {
|
|
|
|
switch (u.pnext->sType) {
|
|
|
|
case VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2:
|
|
|
|
u.two->memoryRequirements = cached->memory.memoryRequirements;
|
|
|
|
break;
|
|
|
|
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
|
|
|
|
u.dedicated->prefersDedicatedAllocation =
|
|
|
|
cached->dedicated.prefersDedicatedAllocation;
|
|
|
|
u.dedicated->requiresDedicatedAllocation =
|
|
|
|
cached->dedicated.requiresDedicatedAllocation;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
u.pnext = u.pnext->pNext;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-18 20:17:04 +00:00
|
|
|
static VkResult
|
|
|
|
vn_buffer_init(struct vn_device *dev,
|
|
|
|
const VkBufferCreateInfo *create_info,
|
|
|
|
struct vn_buffer *buf)
|
|
|
|
{
|
|
|
|
VkDevice dev_handle = vn_device_to_handle(dev);
|
|
|
|
VkBuffer buf_handle = vn_buffer_to_handle(buf);
|
2023-02-08 17:01:31 -08:00
|
|
|
struct vn_buffer_cache *cache = &dev->buffer_cache;
|
2021-10-18 20:17:04 +00:00
|
|
|
VkResult result;
|
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
/* If cacheable and mem requirements found in cache, make async call */
|
|
|
|
struct vn_buffer_cache_entry *entry =
|
|
|
|
vn_buffer_get_cached_memory_requirements(cache, create_info,
|
|
|
|
&buf->requirements);
|
|
|
|
|
|
|
|
/* Check size instead of entry->valid to be lock free */
|
|
|
|
if (buf->requirements.memory.memoryRequirements.size) {
|
2021-10-15 18:37:23 +00:00
|
|
|
vn_async_vkCreateBuffer(dev->instance, dev_handle, create_info, NULL,
|
|
|
|
&buf_handle);
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
/* If cache miss or not cacheable, make synchronous call */
|
2021-10-18 20:17:04 +00:00
|
|
|
result = vn_call_vkCreateBuffer(dev->instance, dev_handle, create_info,
|
|
|
|
NULL, &buf_handle);
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return result;
|
|
|
|
|
2021-10-21 05:56:30 +00:00
|
|
|
buf->requirements.memory.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2;
|
|
|
|
buf->requirements.memory.pNext = &buf->requirements.dedicated;
|
|
|
|
buf->requirements.dedicated.sType =
|
2021-10-18 20:17:04 +00:00
|
|
|
VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS;
|
2021-10-21 05:56:30 +00:00
|
|
|
buf->requirements.dedicated.pNext = NULL;
|
2021-10-18 20:17:04 +00:00
|
|
|
|
|
|
|
vn_call_vkGetBufferMemoryRequirements2(
|
|
|
|
dev->instance, dev_handle,
|
|
|
|
&(VkBufferMemoryRequirementsInfo2){
|
|
|
|
.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
|
|
|
|
.buffer = buf_handle,
|
|
|
|
},
|
2021-10-21 05:56:30 +00:00
|
|
|
&buf->requirements.memory);
|
2021-10-18 20:17:04 +00:00
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
/* If cacheable, store mem requirements from the synchronous call */
|
|
|
|
if (entry)
|
|
|
|
vn_buffer_cache_entry_init(cache, entry, &buf->requirements.memory);
|
|
|
|
|
2021-10-18 20:17:04 +00:00
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2021-04-07 17:25:40 -07:00
|
|
|
VkResult
|
2021-06-30 06:04:39 +00:00
|
|
|
vn_buffer_create(struct vn_device *dev,
|
|
|
|
const VkBufferCreateInfo *create_info,
|
|
|
|
const VkAllocationCallbacks *alloc,
|
|
|
|
struct vn_buffer **out_buf)
|
2021-04-07 17:25:40 -07:00
|
|
|
{
|
2021-06-30 06:04:39 +00:00
|
|
|
struct vn_buffer *buf = NULL;
|
|
|
|
VkResult result;
|
2021-04-07 17:25:40 -07:00
|
|
|
|
2021-06-30 06:04:39 +00:00
|
|
|
buf = vk_zalloc(alloc, sizeof(*buf), VN_DEFAULT_ALIGN,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
2021-04-07 17:25:40 -07:00
|
|
|
if (!buf)
|
2021-06-30 06:04:39 +00:00
|
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
2021-04-07 17:25:40 -07:00
|
|
|
|
|
|
|
vn_object_base_init(&buf->base, VK_OBJECT_TYPE_BUFFER, &dev->base);
|
|
|
|
|
2021-10-18 20:17:04 +00:00
|
|
|
result = vn_buffer_init(dev, create_info, buf);
|
2021-04-07 17:25:40 -07:00
|
|
|
if (result != VK_SUCCESS) {
|
2021-08-21 22:21:17 +00:00
|
|
|
vn_object_base_fini(&buf->base);
|
2021-04-07 17:25:40 -07:00
|
|
|
vk_free(alloc, buf);
|
2021-06-30 06:04:39 +00:00
|
|
|
return result;
|
2021-04-07 17:25:40 -07:00
|
|
|
}
|
|
|
|
|
2021-06-30 06:04:39 +00:00
|
|
|
*out_buf = buf;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
vn_CreateBuffer(VkDevice device,
|
|
|
|
const VkBufferCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkBuffer *pBuffer)
|
|
|
|
{
|
2022-07-12 06:31:37 +00:00
|
|
|
VN_TRACE_FUNC();
|
2021-06-30 06:04:39 +00:00
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
|
|
|
const VkAllocationCallbacks *alloc =
|
|
|
|
pAllocator ? pAllocator : &dev->base.base.alloc;
|
|
|
|
struct vn_buffer *buf = NULL;
|
|
|
|
VkResult result;
|
|
|
|
|
2021-06-30 17:28:47 +00:00
|
|
|
const VkExternalMemoryBufferCreateInfo *external_info =
|
|
|
|
vk_find_struct_const(pCreateInfo->pNext,
|
|
|
|
EXTERNAL_MEMORY_BUFFER_CREATE_INFO);
|
|
|
|
const bool ahb_info =
|
|
|
|
external_info &&
|
|
|
|
external_info->handleTypes ==
|
|
|
|
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
|
|
|
|
|
|
|
|
if (ahb_info)
|
|
|
|
result = vn_android_buffer_from_ahb(dev, pCreateInfo, alloc, &buf);
|
|
|
|
else
|
|
|
|
result = vn_buffer_create(dev, pCreateInfo, alloc, &buf);
|
2021-06-30 06:04:39 +00:00
|
|
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
return vn_error(dev->instance, result);
|
|
|
|
|
|
|
|
*pBuffer = vn_buffer_to_handle(buf);
|
2021-04-07 17:25:40 -07:00
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vn_DestroyBuffer(VkDevice device,
|
|
|
|
VkBuffer buffer,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
2022-07-12 06:31:37 +00:00
|
|
|
VN_TRACE_FUNC();
|
2021-04-07 17:25:40 -07:00
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
|
|
|
struct vn_buffer *buf = vn_buffer_from_handle(buffer);
|
|
|
|
const VkAllocationCallbacks *alloc =
|
|
|
|
pAllocator ? pAllocator : &dev->base.base.alloc;
|
|
|
|
|
|
|
|
if (!buf)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vn_async_vkDestroyBuffer(dev->instance, device, buffer, NULL);
|
|
|
|
|
|
|
|
vn_object_base_fini(&buf->base);
|
|
|
|
vk_free(alloc, buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
VkDeviceAddress
|
|
|
|
vn_GetBufferDeviceAddress(VkDevice device,
|
|
|
|
const VkBufferDeviceAddressInfo *pInfo)
|
|
|
|
{
|
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
|
|
|
|
|
|
|
return vn_call_vkGetBufferDeviceAddress(dev->instance, device, pInfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
vn_GetBufferOpaqueCaptureAddress(VkDevice device,
|
|
|
|
const VkBufferDeviceAddressInfo *pInfo)
|
|
|
|
{
|
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
|
|
|
|
|
|
|
return vn_call_vkGetBufferOpaqueCaptureAddress(dev->instance, device,
|
|
|
|
pInfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vn_GetBufferMemoryRequirements2(VkDevice device,
|
|
|
|
const VkBufferMemoryRequirementsInfo2 *pInfo,
|
|
|
|
VkMemoryRequirements2 *pMemoryRequirements)
|
|
|
|
{
|
|
|
|
const struct vn_buffer *buf = vn_buffer_from_handle(pInfo->buffer);
|
|
|
|
|
2022-09-14 16:30:56 -07:00
|
|
|
vn_copy_cached_memory_requirements(&buf->requirements,
|
|
|
|
pMemoryRequirements);
|
2021-04-07 17:25:40 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
vn_BindBufferMemory2(VkDevice device,
|
|
|
|
uint32_t bindInfoCount,
|
|
|
|
const VkBindBufferMemoryInfo *pBindInfos)
|
|
|
|
{
|
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
|
|
|
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
|
|
|
|
|
|
|
|
VkBindBufferMemoryInfo *local_infos = NULL;
|
|
|
|
for (uint32_t i = 0; i < bindInfoCount; i++) {
|
|
|
|
const VkBindBufferMemoryInfo *info = &pBindInfos[i];
|
|
|
|
struct vn_device_memory *mem =
|
|
|
|
vn_device_memory_from_handle(info->memory);
|
|
|
|
if (!mem->base_memory)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!local_infos) {
|
|
|
|
const size_t size = sizeof(*local_infos) * bindInfoCount;
|
|
|
|
local_infos = vk_alloc(alloc, size, VN_DEFAULT_ALIGN,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
|
|
|
|
if (!local_infos)
|
|
|
|
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
memcpy(local_infos, pBindInfos, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
local_infos[i].memory = vn_device_memory_to_handle(mem->base_memory);
|
|
|
|
local_infos[i].memoryOffset += mem->base_offset;
|
|
|
|
}
|
|
|
|
if (local_infos)
|
|
|
|
pBindInfos = local_infos;
|
|
|
|
|
|
|
|
vn_async_vkBindBufferMemory2(dev->instance, device, bindInfoCount,
|
|
|
|
pBindInfos);
|
|
|
|
|
|
|
|
vk_free(alloc, local_infos);
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* buffer view commands */
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
vn_CreateBufferView(VkDevice device,
|
|
|
|
const VkBufferViewCreateInfo *pCreateInfo,
|
|
|
|
const VkAllocationCallbacks *pAllocator,
|
|
|
|
VkBufferView *pView)
|
|
|
|
{
|
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
|
|
|
const VkAllocationCallbacks *alloc =
|
|
|
|
pAllocator ? pAllocator : &dev->base.base.alloc;
|
|
|
|
|
|
|
|
struct vn_buffer_view *view =
|
|
|
|
vk_zalloc(alloc, sizeof(*view), VN_DEFAULT_ALIGN,
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
if (!view)
|
|
|
|
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
vn_object_base_init(&view->base, VK_OBJECT_TYPE_BUFFER_VIEW, &dev->base);
|
|
|
|
|
|
|
|
VkBufferView view_handle = vn_buffer_view_to_handle(view);
|
|
|
|
vn_async_vkCreateBufferView(dev->instance, device, pCreateInfo, NULL,
|
|
|
|
&view_handle);
|
|
|
|
|
|
|
|
*pView = view_handle;
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vn_DestroyBufferView(VkDevice device,
|
|
|
|
VkBufferView bufferView,
|
|
|
|
const VkAllocationCallbacks *pAllocator)
|
|
|
|
{
|
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
|
|
|
struct vn_buffer_view *view = vn_buffer_view_from_handle(bufferView);
|
|
|
|
const VkAllocationCallbacks *alloc =
|
|
|
|
pAllocator ? pAllocator : &dev->base.base.alloc;
|
|
|
|
|
|
|
|
if (!view)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vn_async_vkDestroyBufferView(dev->instance, device, bufferView, NULL);
|
|
|
|
|
|
|
|
vn_object_base_fini(&view->base);
|
|
|
|
vk_free(alloc, view);
|
|
|
|
}
|
2022-06-29 17:38:43 -03:00
|
|
|
|
|
|
|
void
|
|
|
|
vn_GetDeviceBufferMemoryRequirements(
|
|
|
|
VkDevice device,
|
|
|
|
const VkDeviceBufferMemoryRequirements *pInfo,
|
|
|
|
VkMemoryRequirements2 *pMemoryRequirements)
|
|
|
|
{
|
|
|
|
struct vn_device *dev = vn_device_from_handle(device);
|
2023-02-08 17:01:31 -08:00
|
|
|
struct vn_buffer_cache *cache = &dev->buffer_cache;
|
|
|
|
struct vn_buffer_memory_requirements reqs = { 0 };
|
2022-09-14 16:30:56 -07:00
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
/* If cacheable and mem requirements found in cache, skip host call */
|
|
|
|
struct vn_buffer_cache_entry *entry =
|
|
|
|
vn_buffer_get_cached_memory_requirements(cache, pInfo->pCreateInfo,
|
|
|
|
&reqs);
|
|
|
|
|
|
|
|
/* Check size instead of entry->valid to be lock free */
|
|
|
|
if (reqs.memory.memoryRequirements.size) {
|
|
|
|
vn_copy_cached_memory_requirements(&reqs, pMemoryRequirements);
|
2022-09-14 16:30:56 -07:00
|
|
|
return;
|
|
|
|
}
|
2022-06-29 17:38:43 -03:00
|
|
|
|
2023-02-08 17:01:31 -08:00
|
|
|
/* Make the host call if not found in cache or not cacheable */
|
2022-06-29 17:38:43 -03:00
|
|
|
vn_call_vkGetDeviceBufferMemoryRequirements(dev->instance, device, pInfo,
|
|
|
|
pMemoryRequirements);
|
2023-02-08 17:01:31 -08:00
|
|
|
|
|
|
|
/* If cacheable, store mem requirements from the host call */
|
|
|
|
if (entry)
|
|
|
|
vn_buffer_cache_entry_init(cache, entry, pMemoryRequirements);
|
2022-06-29 17:38:43 -03:00
|
|
|
}
|