radv/winsys: Return vulkan errors for buffer creation.

Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10570>
This commit is contained in:
Bas Nieuwenhuizen
2021-04-28 02:10:57 +02:00
committed by Marge Bot
parent c88d1bace3
commit 8025b4120f
14 changed files with 199 additions and 147 deletions

View File

@@ -747,9 +747,10 @@ radv_import_ahb_memory(struct radv_device *device, struct radv_device_memory *me
return VK_ERROR_INVALID_EXTERNAL_HANDLE; return VK_ERROR_INVALID_EXTERNAL_HANDLE;
uint64_t alloc_size = 0; uint64_t alloc_size = 0;
mem->bo = device->ws->buffer_from_fd(device->ws, dma_buf, priority, &alloc_size); VkResult result =
if (!mem->bo) device->ws->buffer_from_fd(device->ws, dma_buf, priority, &mem->bo, &alloc_size);
return VK_ERROR_OUT_OF_HOST_MEMORY; if (result != VK_SUCCESS)
return result;
if (mem->image) { if (mem->image) {
struct radeon_bo_metadata metadata; struct radeon_bo_metadata metadata;

View File

@@ -511,20 +511,21 @@ static bool
radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, uint64_t min_needed) radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, uint64_t min_needed)
{ {
uint64_t new_size; uint64_t new_size;
struct radeon_winsys_bo *bo; struct radeon_winsys_bo *bo = NULL;
struct radv_cmd_buffer_upload *upload; struct radv_cmd_buffer_upload *upload;
struct radv_device *device = cmd_buffer->device; struct radv_device *device = cmd_buffer->device;
new_size = MAX2(min_needed, 16 * 1024); new_size = MAX2(min_needed, 16 * 1024);
new_size = MAX2(new_size, 2 * cmd_buffer->upload.size); new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
bo = device->ws->buffer_create(device->ws, new_size, 4096, device->ws->cs_domain(device->ws), VkResult result =
device->ws->buffer_create(device->ws, new_size, 4096, device->ws->cs_domain(device->ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC, RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC,
RADV_BO_PRIORITY_UPLOAD_BUFFER); RADV_BO_PRIORITY_UPLOAD_BUFFER, &bo);
if (!bo) { if (result != VK_SUCCESS) {
cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY; cmd_buffer->record_result = result;
return false; return false;
} }

View File

@@ -67,11 +67,11 @@ radv_init_trace(struct radv_device *device)
struct radeon_winsys *ws = device->ws; struct radeon_winsys *ws = device->ws;
VkResult result; VkResult result;
device->trace_bo = ws->buffer_create( result = ws->buffer_create(
ws, TRACE_BO_SIZE, 8, RADEON_DOMAIN_VRAM, ws, TRACE_BO_SIZE, 8, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM, RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
RADV_BO_PRIORITY_UPLOAD_BUFFER); RADV_BO_PRIORITY_UPLOAD_BUFFER, &device->trace_bo);
if (!device->trace_bo) if (result != VK_SUCCESS)
return false; return false;
result = ws->buffer_make_resident(ws, device->trace_bo, true); result = ws->buffer_make_resident(ws, device->trace_bo, true);
@@ -836,11 +836,11 @@ radv_trap_handler_init(struct radv_device *device)
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return false; return false;
device->tma_bo = ws->buffer_create(ws, TMA_BO_SIZE, 256, RADEON_DOMAIN_VRAM, result = ws->buffer_create(ws, TMA_BO_SIZE, 256, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_ZERO_VRAM | RADEON_FLAG_32BIT, RADEON_FLAG_ZERO_VRAM | RADEON_FLAG_32BIT,
RADV_BO_PRIORITY_SCRATCH); RADV_BO_PRIORITY_SCRATCH, &device->tma_bo);
if (!device->tma_bo) if (result != VK_SUCCESS)
return false; return false;
result = ws->buffer_make_resident(ws, device->tma_bo, true); result = ws->buffer_make_resident(ws, device->tma_bo, true);

View File

@@ -801,13 +801,13 @@ radv_CreateDescriptorPool(VkDevice _device, const VkDescriptorPoolCreateInfo *pC
if (bo_size) { if (bo_size) {
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE)) { if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE)) {
pool->bo = device->ws->buffer_create( VkResult result = device->ws->buffer_create(
device->ws, bo_size, 32, RADEON_DOMAIN_VRAM, device->ws, bo_size, 32, RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT, RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT,
RADV_BO_PRIORITY_DESCRIPTOR); RADV_BO_PRIORITY_DESCRIPTOR, &pool->bo);
if (!pool->bo) { if (result != VK_SUCCESS) {
radv_destroy_descriptor_pool(device, pAllocator, pool); radv_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(device->instance, result);
} }
pool->mapped_ptr = (uint8_t *)device->ws->buffer_map(pool->bo); pool->mapped_ptr = (uint8_t *)device->ws->buffer_map(pool->bo);
if (!pool->mapped_ptr) { if (!pool->mapped_ptr) {

View File

@@ -2758,13 +2758,13 @@ radv_device_init_border_color(struct radv_device *device)
{ {
VkResult result; VkResult result;
device->border_color_data.bo = device->ws->buffer_create( result = device->ws->buffer_create(
device->ws, RADV_BORDER_COLOR_BUFFER_SIZE, 4096, RADEON_DOMAIN_VRAM, device->ws, RADV_BORDER_COLOR_BUFFER_SIZE, 4096, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_NO_INTERPROCESS_SHARING, RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_NO_INTERPROCESS_SHARING,
RADV_BO_PRIORITY_SHADER); RADV_BO_PRIORITY_SHADER, &device->border_color_data.bo);
if (device->border_color_data.bo == NULL) if (result != VK_SUCCESS)
return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(device->physical_device->instance, result);
result = device->ws->buffer_make_resident(device->ws, device->border_color_data.bo, true); result = device->ws->buffer_make_resident(device->ws, device->border_color_data.bo, true);
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
@@ -3766,6 +3766,7 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
unsigned hs_offchip_param = 0; unsigned hs_offchip_param = 0;
unsigned tess_offchip_ring_offset; unsigned tess_offchip_ring_offset;
uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING; uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
VkResult result = VK_SUCCESS;
if (!queue->has_tess_rings) { if (!queue->has_tess_rings) {
if (needs_tess_rings) if (needs_tess_rings)
add_tess_rings = true; add_tess_rings = true;
@@ -3821,10 +3822,10 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
uint32_t scratch_size = scratch_size_per_wave * scratch_waves; uint32_t scratch_size = scratch_size_per_wave * scratch_waves;
uint32_t queue_scratch_size = queue->scratch_size_per_wave * queue->scratch_waves; uint32_t queue_scratch_size = queue->scratch_size_per_wave * queue->scratch_waves;
if (scratch_size > queue_scratch_size) { if (scratch_size > queue_scratch_size) {
scratch_bo = result =
queue->device->ws->buffer_create(queue->device->ws, scratch_size, 4096, RADEON_DOMAIN_VRAM, queue->device->ws->buffer_create(queue->device->ws, scratch_size, 4096, RADEON_DOMAIN_VRAM,
ring_bo_flags, RADV_BO_PRIORITY_SCRATCH); ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, &scratch_bo);
if (!scratch_bo) if (result != VK_SUCCESS)
goto fail; goto fail;
} else } else
scratch_bo = queue->scratch_bo; scratch_bo = queue->scratch_bo;
@@ -3833,20 +3834,20 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
uint32_t compute_queue_scratch_size = uint32_t compute_queue_scratch_size =
queue->compute_scratch_size_per_wave * queue->compute_scratch_waves; queue->compute_scratch_size_per_wave * queue->compute_scratch_waves;
if (compute_scratch_size > compute_queue_scratch_size) { if (compute_scratch_size > compute_queue_scratch_size) {
compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws, compute_scratch_size, result = queue->device->ws->buffer_create(queue->device->ws, compute_scratch_size, 4096,
4096, RADEON_DOMAIN_VRAM, ring_bo_flags, RADEON_DOMAIN_VRAM, ring_bo_flags,
RADV_BO_PRIORITY_SCRATCH); RADV_BO_PRIORITY_SCRATCH, &compute_scratch_bo);
if (!compute_scratch_bo) if (result != VK_SUCCESS)
goto fail; goto fail;
} else } else
compute_scratch_bo = queue->compute_scratch_bo; compute_scratch_bo = queue->compute_scratch_bo;
if (esgs_ring_size > queue->esgs_ring_size) { if (esgs_ring_size > queue->esgs_ring_size) {
esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws, esgs_ring_size, 4096, result = queue->device->ws->buffer_create(queue->device->ws, esgs_ring_size, 4096,
RADEON_DOMAIN_VRAM, ring_bo_flags, RADEON_DOMAIN_VRAM, ring_bo_flags,
RADV_BO_PRIORITY_SCRATCH); RADV_BO_PRIORITY_SCRATCH, &esgs_ring_bo);
if (!esgs_ring_bo) if (result != VK_SUCCESS)
goto fail; goto fail;
} else { } else {
esgs_ring_bo = queue->esgs_ring_bo; esgs_ring_bo = queue->esgs_ring_bo;
@@ -3854,10 +3855,10 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
} }
if (gsvs_ring_size > queue->gsvs_ring_size) { if (gsvs_ring_size > queue->gsvs_ring_size) {
gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws, gsvs_ring_size, 4096, result = queue->device->ws->buffer_create(queue->device->ws, gsvs_ring_size, 4096,
RADEON_DOMAIN_VRAM, ring_bo_flags, RADEON_DOMAIN_VRAM, ring_bo_flags,
RADV_BO_PRIORITY_SCRATCH); RADV_BO_PRIORITY_SCRATCH, &gsvs_ring_bo);
if (!gsvs_ring_bo) if (result != VK_SUCCESS)
goto fail; goto fail;
} else { } else {
gsvs_ring_bo = queue->gsvs_ring_bo; gsvs_ring_bo = queue->gsvs_ring_bo;
@@ -3865,10 +3866,10 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
} }
if (add_tess_rings) { if (add_tess_rings) {
tess_rings_bo = queue->device->ws->buffer_create( result = queue->device->ws->buffer_create(
queue->device->ws, tess_offchip_ring_offset + tess_offchip_ring_size, 256, queue->device->ws, tess_offchip_ring_offset + tess_offchip_ring_size, 256,
RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH); RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, &tess_rings_bo);
if (!tess_rings_bo) if (result != VK_SUCCESS)
goto fail; goto fail;
} else { } else {
tess_rings_bo = queue->tess_rings_bo; tess_rings_bo = queue->tess_rings_bo;
@@ -3880,9 +3881,9 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
/* 4 streamout GDS counters. /* 4 streamout GDS counters.
* We need 256B (64 dw) of GDS, otherwise streamout hangs. * We need 256B (64 dw) of GDS, otherwise streamout hangs.
*/ */
gds_bo = queue->device->ws->buffer_create(queue->device->ws, 256, 4, RADEON_DOMAIN_GDS, result = queue->device->ws->buffer_create(queue->device->ws, 256, 4, RADEON_DOMAIN_GDS,
ring_bo_flags, RADV_BO_PRIORITY_SCRATCH); ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, &gds_bo);
if (!gds_bo) if (result != VK_SUCCESS)
goto fail; goto fail;
} else { } else {
gds_bo = queue->gds_bo; gds_bo = queue->gds_bo;
@@ -3891,9 +3892,10 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
if (add_gds_oa) { if (add_gds_oa) {
assert(queue->device->physical_device->rad_info.chip_class >= GFX10); assert(queue->device->physical_device->rad_info.chip_class >= GFX10);
gds_oa_bo = queue->device->ws->buffer_create(queue->device->ws, 4, 1, RADEON_DOMAIN_OA, result =
ring_bo_flags, RADV_BO_PRIORITY_SCRATCH); queue->device->ws->buffer_create(queue->device->ws, 4, 1, RADEON_DOMAIN_OA, ring_bo_flags,
if (!gds_oa_bo) RADV_BO_PRIORITY_SCRATCH, &gds_oa_bo);
if (result != VK_SUCCESS)
goto fail; goto fail;
} else { } else {
gds_oa_bo = queue->gds_oa_bo; gds_oa_bo = queue->gds_oa_bo;
@@ -3910,11 +3912,11 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
} else if (scratch_bo) } else if (scratch_bo)
size = 8; /* 2 dword */ size = 8; /* 2 dword */
descriptor_bo = queue->device->ws->buffer_create( result = queue->device->ws->buffer_create(
queue->device->ws, size, 4096, RADEON_DOMAIN_VRAM, queue->device->ws, size, 4096, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY, RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
RADV_BO_PRIORITY_DESCRIPTOR); RADV_BO_PRIORITY_DESCRIPTOR, &descriptor_bo);
if (!descriptor_bo) if (result != VK_SUCCESS)
goto fail; goto fail;
} else } else
descriptor_bo = queue->descriptor_bo; descriptor_bo = queue->descriptor_bo;
@@ -3944,8 +3946,10 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
struct radeon_cmdbuf *cs = NULL; struct radeon_cmdbuf *cs = NULL;
cs = queue->device->ws->cs_create(queue->device->ws, cs = queue->device->ws->cs_create(queue->device->ws,
queue->queue_family_index ? RING_COMPUTE : RING_GFX); queue->queue_family_index ? RING_COMPUTE : RING_GFX);
if (!cs) if (!cs) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail; goto fail;
}
dest_cs[i] = cs; dest_cs[i] = cs;
@@ -4006,7 +4010,8 @@ radv_get_preamble_cs(struct radv_queue *queue, uint32_t scratch_size_per_wave,
&sqtt_flush_bits, 0); &sqtt_flush_bits, 0);
} }
if (queue->device->ws->cs_finalize(cs) != VK_SUCCESS) result = queue->device->ws->cs_finalize(cs);
if (result != VK_SUCCESS)
goto fail; goto fail;
} }
@@ -4105,7 +4110,7 @@ fail:
if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo) if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo)
queue->device->ws->buffer_destroy(queue->device->ws, gds_oa_bo); queue->device->ws->buffer_destroy(queue->device->ws, gds_oa_bo);
return vk_error(queue->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(queue->device->instance, result);
} }
static VkResult static VkResult
@@ -5312,9 +5317,8 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc
} else if (import_info) { } else if (import_info) {
assert(import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT || assert(import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT); import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd, priority, NULL); result = device->ws->buffer_from_fd(device->ws, import_info->fd, priority, &mem->bo, NULL);
if (!mem->bo) { if (result != VK_SUCCESS) {
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
goto fail; goto fail;
} else { } else {
close(import_info->fd); close(import_info->fd);
@@ -5340,10 +5344,9 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc
} }
} else if (host_ptr_info) { } else if (host_ptr_info) {
assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT); assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer, result = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
pAllocateInfo->allocationSize, priority); pAllocateInfo->allocationSize, priority, &mem->bo);
if (!mem->bo) { if (result != VK_SUCCESS) {
result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
goto fail; goto fail;
} else { } else {
mem->user_ptr = host_ptr_info->pHostPointer; mem->user_ptr = host_ptr_info->pHostPointer;
@@ -5379,17 +5382,16 @@ radv_alloc_memory(struct radv_device *device, const VkMemoryAllocateInfo *pAlloc
mtx_unlock(&device->overallocation_mutex); mtx_unlock(&device->overallocation_mutex);
} }
mem->bo = device->ws->buffer_create(device->ws, alloc_size, result = device->ws->buffer_create(device->ws, alloc_size,
device->physical_device->rad_info.max_alignment, domain, device->physical_device->rad_info.max_alignment, domain,
flags, priority); flags, priority, &mem->bo);
if (!mem->bo) { if (result != VK_SUCCESS) {
if (device->overallocation_disallowed) { if (device->overallocation_disallowed) {
mtx_lock(&device->overallocation_mutex); mtx_lock(&device->overallocation_mutex);
device->allocated_memory_size[heap_index] -= alloc_size; device->allocated_memory_size[heap_index] -= alloc_size;
mtx_unlock(&device->overallocation_mutex); mtx_unlock(&device->overallocation_mutex);
} }
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto fail; goto fail;
} }
@@ -6278,13 +6280,13 @@ radv_CreateEvent(VkDevice _device, const VkEventCreateInfo *pCreateInfo,
vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT); vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
event->bo = device->ws->buffer_create( VkResult result = device->ws->buffer_create(
device->ws, 8, 8, RADEON_DOMAIN_GTT, device->ws, 8, 8, RADEON_DOMAIN_GTT,
RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING, RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
RADV_BO_PRIORITY_FENCE); RADV_BO_PRIORITY_FENCE, &event->bo);
if (!event->bo) { if (result != VK_SUCCESS) {
radv_destroy_event(device, pAllocator, event); radv_destroy_event(device, pAllocator, event);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(device->instance, result);
} }
event->map = (uint64_t *)device->ws->buffer_map(event->bo); event->map = (uint64_t *)device->ws->buffer_map(event->bo);
@@ -6382,11 +6384,12 @@ radv_CreateBuffer(VkDevice _device, const VkBufferCreateInfo *pCreateInfo,
vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_MEMORY_BUFFER_CREATE_INFO) != NULL; vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_MEMORY_BUFFER_CREATE_INFO) != NULL;
if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) { if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
buffer->bo = device->ws->buffer_create(device->ws, align64(buffer->size, 4096), 4096, 0, VkResult result =
RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL); device->ws->buffer_create(device->ws, align64(buffer->size, 4096), 4096, 0,
if (!buffer->bo) { RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL, &buffer->bo);
if (result != VK_SUCCESS) {
radv_destroy_buffer(device, pAllocator, buffer); radv_destroy_buffer(device, pAllocator, buffer);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(device->instance, result);
} }
} }

View File

@@ -1700,11 +1700,11 @@ radv_image_create(VkDevice _device, const struct radv_image_create_info *create_
image->size = align64(image->size, image->alignment); image->size = align64(image->size, image->alignment);
image->offset = 0; image->offset = 0;
image->bo = device->ws->buffer_create(device->ws, image->size, image->alignment, 0, result = device->ws->buffer_create(device->ws, image->size, image->alignment, 0,
RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL); RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL, &image->bo);
if (!image->bo) { if (result != VK_SUCCESS) {
radv_destroy_image(device, alloc, image); radv_destroy_image(device, alloc, image);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(device->instance, result);
} }
} }
image->l2_coherent = radv_image_is_l2_coherent(device, image); image->l2_coherent = radv_image_is_l2_coherent(device, image);

View File

@@ -976,12 +976,12 @@ radv_CreateQueryPool(VkDevice _device, const VkQueryPoolCreateInfo *pCreateInfo,
if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
pool->size += 4 * pCreateInfo->queryCount; pool->size += 4 * pCreateInfo->queryCount;
pool->bo = VkResult result = device->ws->buffer_create(device->ws, pool->size, 64, RADEON_DOMAIN_GTT,
device->ws->buffer_create(device->ws, pool->size, 64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING,
RADEON_FLAG_NO_INTERPROCESS_SHARING, RADV_BO_PRIORITY_QUERY_POOL); RADV_BO_PRIORITY_QUERY_POOL, &pool->bo);
if (!pool->bo) { if (result != VK_SUCCESS) {
radv_destroy_query_pool(device, pAllocator, pool); radv_destroy_query_pool(device, pAllocator, pool);
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(device->instance, result);
} }
pool->ptr = device->ws->buffer_map(pool->bo); pool->ptr = device->ws->buffer_map(pool->bo);

View File

@@ -222,18 +222,18 @@ struct radeon_winsys {
const char *(*get_chip_name)(struct radeon_winsys *ws); const char *(*get_chip_name)(struct radeon_winsys *ws);
struct radeon_winsys_bo *(*buffer_create)(struct radeon_winsys *ws, uint64_t size, VkResult (*buffer_create)(struct radeon_winsys *ws, uint64_t size, unsigned alignment,
unsigned alignment, enum radeon_bo_domain domain, enum radeon_bo_domain domain, enum radeon_bo_flag flags,
enum radeon_bo_flag flags, unsigned priority); unsigned priority, struct radeon_winsys_bo **out_bo);
void (*buffer_destroy)(struct radeon_winsys *ws, struct radeon_winsys_bo *bo); void (*buffer_destroy)(struct radeon_winsys *ws, struct radeon_winsys_bo *bo);
void *(*buffer_map)(struct radeon_winsys_bo *bo); void *(*buffer_map)(struct radeon_winsys_bo *bo);
struct radeon_winsys_bo *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, VkResult (*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, uint64_t size,
uint64_t size, unsigned priority); unsigned priority, struct radeon_winsys_bo **out_bo);
struct radeon_winsys_bo *(*buffer_from_fd)(struct radeon_winsys *ws, int fd, unsigned priority, VkResult (*buffer_from_fd)(struct radeon_winsys *ws, int fd, unsigned priority,
uint64_t *alloc_size); struct radeon_winsys_bo **out_bo, uint64_t *alloc_size);
bool (*buffer_get_fd)(struct radeon_winsys *ws, struct radeon_winsys_bo *bo, int *fd); bool (*buffer_get_fd)(struct radeon_winsys *ws, struct radeon_winsys_bo *bo, int *fd);

View File

@@ -918,13 +918,13 @@ radv_alloc_shader_memory(struct radv_device *device, struct radv_shader_variant
struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab)); struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
slab->size = MAX2(256 * 1024, shader->code_size); slab->size = MAX2(256 * 1024, shader->code_size);
slab->bo = device->ws->buffer_create( VkResult result = device->ws->buffer_create(
device->ws, slab->size, 256, RADEON_DOMAIN_VRAM, device->ws, slab->size, 256, RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_NO_INTERPROCESS_SHARING |
(device->physical_device->rad_info.cpdma_prefetch_writes_memory ? 0 (device->physical_device->rad_info.cpdma_prefetch_writes_memory ? 0
: RADEON_FLAG_READ_ONLY), : RADEON_FLAG_READ_ONLY),
RADV_BO_PRIORITY_SHADER); RADV_BO_PRIORITY_SHADER, &slab->bo);
if (!slab->bo) { if (result != VK_SUCCESS) {
free(slab); free(slab);
return NULL; return NULL;
} }

View File

@@ -381,11 +381,13 @@ radv_thread_trace_init_bo(struct radv_device *device)
size = align64(sizeof(struct ac_thread_trace_info) * max_se, 1 << SQTT_BUFFER_ALIGN_SHIFT); size = align64(sizeof(struct ac_thread_trace_info) * max_se, 1 << SQTT_BUFFER_ALIGN_SHIFT);
size += device->thread_trace.buffer_size * (uint64_t)max_se; size += device->thread_trace.buffer_size * (uint64_t)max_se;
device->thread_trace.bo = ws->buffer_create( struct radeon_winsys_bo *bo = NULL;
VkResult result = ws->buffer_create(
ws, size, 4096, RADEON_DOMAIN_VRAM, ws, size, 4096, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM, RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
RADV_BO_PRIORITY_SCRATCH); RADV_BO_PRIORITY_SCRATCH, &bo);
if (!device->thread_trace.bo) device->thread_trace.bo = bo;
if (result != VK_SUCCESS)
return false; return false;
device->thread_trace.ptr = ws->buffer_map(device->thread_trace.bo); device->thread_trace.ptr = ws->buffer_map(device->thread_trace.bo);

View File

@@ -628,12 +628,12 @@ cik_create_gfx_config(struct radv_device *device)
radeon_emit(cs, PKT3_NOP_PAD); radeon_emit(cs, PKT3_NOP_PAD);
} }
device->gfx_init = VkResult result =
device->ws->buffer_create(device->ws, cs->cdw * 4, 4096, device->ws->cs_domain(device->ws), device->ws->buffer_create(device->ws, cs->cdw * 4, 4096, device->ws->cs_domain(device->ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC, RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
RADV_BO_PRIORITY_CS); RADV_BO_PRIORITY_CS, &device->gfx_init);
if (!device->gfx_init) if (result != VK_SUCCESS)
goto fail; goto fail;
void *map = device->ws->buffer_map(device->gfx_init); void *map = device->ws->buffer_map(device->gfx_init);

View File

@@ -391,10 +391,10 @@ radv_amdgpu_winsys_bo_destroy(struct radeon_winsys *_ws, struct radeon_winsys_bo
FREE(bo); FREE(bo);
} }
static struct radeon_winsys_bo * static VkResult
radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned alignment, radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned alignment,
enum radeon_bo_domain initial_domain, enum radeon_bo_flag flags, enum radeon_bo_domain initial_domain, enum radeon_bo_flag flags,
unsigned priority) unsigned priority, struct radeon_winsys_bo **out_bo)
{ {
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws); struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *bo; struct radv_amdgpu_winsys_bo *bo;
@@ -404,9 +404,15 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
uint64_t va = 0; uint64_t va = 0;
amdgpu_va_handle va_handle; amdgpu_va_handle va_handle;
int r; int r;
VkResult result = VK_SUCCESS;
/* Just be robust for callers that might use NULL-ness for determining if things should be freed.
*/
*out_bo = NULL;
bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo); bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo);
if (!bo) { if (!bo) {
return NULL; return VK_ERROR_OUT_OF_HOST_MEMORY;
} }
unsigned virt_alignment = alignment; unsigned virt_alignment = alignment;
@@ -416,8 +422,10 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
r = amdgpu_va_range_alloc( r = amdgpu_va_range_alloc(
ws->dev, amdgpu_gpu_va_range_general, size, virt_alignment, 0, &va, &va_handle, ws->dev, amdgpu_gpu_va_range_general, size, virt_alignment, 0, &va, &va_handle,
(flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) | AMDGPU_VA_RANGE_HIGH); (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) | AMDGPU_VA_RANGE_HIGH);
if (r) if (r) {
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_va_alloc; goto error_va_alloc;
}
bo->base.va = va; bo->base.va = va;
bo->va_handle = va_handle; bo->va_handle = va_handle;
@@ -427,8 +435,10 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
if (flags & RADEON_FLAG_VIRTUAL) { if (flags & RADEON_FLAG_VIRTUAL) {
ranges = realloc(NULL, sizeof(struct radv_amdgpu_map_range)); ranges = realloc(NULL, sizeof(struct radv_amdgpu_map_range));
if (!ranges) if (!ranges) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto error_ranges_alloc; goto error_ranges_alloc;
}
bo->ranges = ranges; bo->ranges = ranges;
bo->range_count = 1; bo->range_count = 1;
@@ -442,7 +452,8 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
radv_amdgpu_winsys_virtual_map(ws, bo, bo->ranges); radv_amdgpu_winsys_virtual_map(ws, bo, bo->ranges);
radv_amdgpu_log_bo(ws, bo, false); radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo; *out_bo = (struct radeon_winsys_bo *)bo;
return VK_SUCCESS;
} }
request.alloc_size = size; request.alloc_size = size;
@@ -504,12 +515,15 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
fprintf(stderr, "amdgpu: size : %" PRIu64 " bytes\n", size); fprintf(stderr, "amdgpu: size : %" PRIu64 " bytes\n", size);
fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment); fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
fprintf(stderr, "amdgpu: domains : %u\n", initial_domain); fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_bo_alloc; goto error_bo_alloc;
} }
r = radv_amdgpu_bo_va_op(ws, buf_handle, 0, size, va, flags, 0, AMDGPU_VA_OP_MAP); r = radv_amdgpu_bo_va_op(ws, buf_handle, 0, size, va, flags, 0, AMDGPU_VA_OP_MAP);
if (r) if (r) {
result = VK_ERROR_UNKNOWN;
goto error_va_map; goto error_va_map;
}
bo->bo = buf_handle; bo->bo = buf_handle;
bo->base.initial_domain = initial_domain; bo->base.initial_domain = initial_domain;
@@ -543,7 +557,8 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
radv_amdgpu_global_bo_list_add(ws, bo); radv_amdgpu_global_bo_list_add(ws, bo);
radv_amdgpu_log_bo(ws, bo, false); radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo; *out_bo = (struct radeon_winsys_bo *)bo;
return VK_SUCCESS;
error_va_map: error_va_map:
amdgpu_bo_free(buf_handle); amdgpu_bo_free(buf_handle);
@@ -555,7 +570,7 @@ error_ranges_alloc:
error_va_alloc: error_va_alloc:
FREE(bo); FREE(bo);
return NULL; return result;
} }
static void * static void *
@@ -599,9 +614,9 @@ radv_amdgpu_get_optimal_vm_alignment(struct radv_amdgpu_winsys *ws, uint64_t siz
return vm_alignment; return vm_alignment;
} }
static struct radeon_winsys_bo * static VkResult
radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_t size, radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_t size,
unsigned priority) unsigned priority, struct radeon_winsys_bo **out_bo)
{ {
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws); struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
amdgpu_bo_handle buf_handle; amdgpu_bo_handle buf_handle;
@@ -609,13 +624,20 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_
uint64_t va; uint64_t va;
amdgpu_va_handle va_handle; amdgpu_va_handle va_handle;
uint64_t vm_alignment; uint64_t vm_alignment;
VkResult result = VK_SUCCESS;
/* Just be robust for callers that might use NULL-ness for determining if things should be freed.
*/
*out_bo = NULL;
bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo); bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo);
if (!bo) if (!bo)
return NULL; return VK_ERROR_OUT_OF_HOST_MEMORY;
if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle)) if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle)) {
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error; goto error;
}
/* Using the optimal VM alignment also fixes GPU hangs for buffers that /* Using the optimal VM alignment also fixes GPU hangs for buffers that
* are imported. * are imported.
@@ -623,11 +645,15 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_
vm_alignment = radv_amdgpu_get_optimal_vm_alignment(ws, size, ws->info.gart_page_size); vm_alignment = radv_amdgpu_get_optimal_vm_alignment(ws, size, ws->info.gart_page_size);
if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, vm_alignment, 0, &va, if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, vm_alignment, 0, &va,
&va_handle, AMDGPU_VA_RANGE_HIGH)) &va_handle, AMDGPU_VA_RANGE_HIGH)) {
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_va_alloc; goto error_va_alloc;
}
if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP)) if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP)) {
result = VK_ERROR_UNKNOWN;
goto error_va_map; goto error_va_map;
}
/* Initialize it */ /* Initialize it */
bo->base.va = va; bo->base.va = va;
@@ -648,7 +674,8 @@ radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_
radv_amdgpu_global_bo_list_add(ws, bo); radv_amdgpu_global_bo_list_add(ws, bo);
radv_amdgpu_log_bo(ws, bo, false); radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo; *out_bo = (struct radeon_winsys_bo *)bo;
return VK_SUCCESS;
error_va_map: error_va_map:
amdgpu_va_range_free(va_handle); amdgpu_va_range_free(va_handle);
@@ -658,12 +685,12 @@ error_va_alloc:
error: error:
FREE(bo); FREE(bo);
return NULL; return result;
} }
static struct radeon_winsys_bo * static VkResult
radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priority, radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priority,
uint64_t *alloc_size) struct radeon_winsys_bo **out_bo, uint64_t *alloc_size)
{ {
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws); struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *bo; struct radv_amdgpu_winsys_bo *bo;
@@ -674,17 +701,27 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
struct amdgpu_bo_info info = {0}; struct amdgpu_bo_info info = {0};
enum radeon_bo_domain initial = 0; enum radeon_bo_domain initial = 0;
int r; int r;
VkResult vk_result = VK_SUCCESS;
/* Just be robust for callers that might use NULL-ness for determining if things should be freed.
*/
*out_bo = NULL;
bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo); bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo);
if (!bo) if (!bo)
return NULL; return VK_ERROR_OUT_OF_HOST_MEMORY;
r = amdgpu_bo_import(ws->dev, type, fd, &result); r = amdgpu_bo_import(ws->dev, type, fd, &result);
if (r) if (r) {
vk_result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
goto error; goto error;
}
r = amdgpu_bo_query_info(result.buf_handle, &info); r = amdgpu_bo_query_info(result.buf_handle, &info);
if (r) if (r) {
vk_result = VK_ERROR_UNKNOWN;
goto error_query; goto error_query;
}
if (alloc_size) { if (alloc_size) {
*alloc_size = info.alloc_size; *alloc_size = info.alloc_size;
@@ -692,13 +729,17 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, result.alloc_size, 1 << 20, 0, r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, result.alloc_size, 1 << 20, 0,
&va, &va_handle, AMDGPU_VA_RANGE_HIGH); &va, &va_handle, AMDGPU_VA_RANGE_HIGH);
if (r) if (r) {
vk_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_query; goto error_query;
}
r = r =
radv_amdgpu_bo_va_op(ws, result.buf_handle, 0, result.alloc_size, va, 0, 0, AMDGPU_VA_OP_MAP); radv_amdgpu_bo_va_op(ws, result.buf_handle, 0, result.alloc_size, va, 0, 0, AMDGPU_VA_OP_MAP);
if (r) if (r) {
vk_result = VK_ERROR_UNKNOWN;
goto error_va_map; goto error_va_map;
}
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM) if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
initial |= RADEON_DOMAIN_VRAM; initial |= RADEON_DOMAIN_VRAM;
@@ -727,7 +768,8 @@ radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priori
radv_amdgpu_global_bo_list_add(ws, bo); radv_amdgpu_global_bo_list_add(ws, bo);
radv_amdgpu_log_bo(ws, bo, false); radv_amdgpu_log_bo(ws, bo, false);
return (struct radeon_winsys_bo *)bo; *out_bo = (struct radeon_winsys_bo *)bo;
return VK_SUCCESS;
error_va_map: error_va_map:
amdgpu_va_range_free(va_handle); amdgpu_va_range_free(va_handle);
@@ -736,7 +778,7 @@ error_query:
error: error:
FREE(bo); FREE(bo);
return NULL; return vk_result;
} }
static bool static bool

View File

@@ -209,12 +209,12 @@ radv_amdgpu_cs_create(struct radeon_winsys *ws, enum ring_type ring_type)
radv_amdgpu_init_cs(cs, ring_type); radv_amdgpu_init_cs(cs, ring_type);
if (cs->ws->use_ib_bos) { if (cs->ws->use_ib_bos) {
cs->ib_buffer = VkResult result =
ws->buffer_create(ws, ib_size, 0, radv_amdgpu_cs_domain(ws), ws->buffer_create(ws, ib_size, 0, radv_amdgpu_cs_domain(ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC, RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
RADV_BO_PRIORITY_CS); RADV_BO_PRIORITY_CS, &cs->ib_buffer);
if (!cs->ib_buffer) { if (result != VK_SUCCESS) {
free(cs); free(cs);
return NULL; return NULL;
} }
@@ -330,13 +330,13 @@ radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
cs->old_ib_buffers[cs->num_old_ib_buffers].bo = cs->ib_buffer; cs->old_ib_buffers[cs->num_old_ib_buffers].bo = cs->ib_buffer;
cs->old_ib_buffers[cs->num_old_ib_buffers++].cdw = cs->base.cdw; cs->old_ib_buffers[cs->num_old_ib_buffers++].cdw = cs->base.cdw;
cs->ib_buffer = VkResult result =
cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0, radv_amdgpu_cs_domain(&cs->ws->base), cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0, radv_amdgpu_cs_domain(&cs->ws->base),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC, RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
RADV_BO_PRIORITY_CS); RADV_BO_PRIORITY_CS, &cs->ib_buffer);
if (!cs->ib_buffer) { if (result != VK_SUCCESS) {
cs->base.cdw = 0; cs->base.cdw = 0;
cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY; cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers].bo; cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers].bo;
@@ -1033,10 +1033,10 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, int queue_id
pad_words++; pad_words++;
} }
bos[j] = ws->buffer_create( ws->buffer_create(
ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws), ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY, RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
RADV_BO_PRIORITY_CS); RADV_BO_PRIORITY_CS, &bos[j]);
ptr = ws->buffer_map(bos[j]); ptr = ws->buffer_map(bos[j]);
if (needs_preamble) { if (needs_preamble) {
@@ -1076,10 +1076,10 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, int queue_id
} }
assert(cnt); assert(cnt);
bos[0] = ws->buffer_create( ws->buffer_create(
ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws), ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY, RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
RADV_BO_PRIORITY_CS); RADV_BO_PRIORITY_CS, &bos[0]);
ptr = ws->buffer_map(bos[0]); ptr = ws->buffer_map(bos[0]);
if (preamble_cs) { if (preamble_cs) {
@@ -1262,11 +1262,10 @@ radv_amdgpu_ctx_create(struct radeon_winsys *_ws, enum radeon_ctx_priority prior
ctx->ws = ws; ctx->ws = ws;
assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096); assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
ctx->fence_bo = ws->base.buffer_create( result = ws->base.buffer_create(&ws->base, 4096, 8, RADEON_DOMAIN_GTT,
&ws->base, 4096, 8, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING, RADV_BO_PRIORITY_CS); RADV_BO_PRIORITY_CS, &ctx->fence_bo);
if (!ctx->fence_bo) { if (result != VK_SUCCESS) {
result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto fail_alloc; goto fail_alloc;
} }

View File

@@ -28,25 +28,29 @@
#include "radv_null_bo.h" #include "radv_null_bo.h"
#include "util/u_memory.h" #include "util/u_memory.h"
static struct radeon_winsys_bo * static VkResult
radv_null_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned alignment, radv_null_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned alignment,
enum radeon_bo_domain initial_domain, enum radeon_bo_flag flags, enum radeon_bo_domain initial_domain, enum radeon_bo_flag flags,
unsigned priority) unsigned priority, struct radeon_winsys_bo **out_bo)
{ {
struct radv_null_winsys_bo *bo; struct radv_null_winsys_bo *bo;
/* Courtesy for users using NULL to check if they need to destroy the BO. */
*out_bo = NULL;
bo = CALLOC_STRUCT(radv_null_winsys_bo); bo = CALLOC_STRUCT(radv_null_winsys_bo);
if (!bo) if (!bo)
return NULL; return VK_ERROR_OUT_OF_HOST_MEMORY;
bo->ptr = malloc(size); bo->ptr = malloc(size);
if (!bo->ptr) if (!bo->ptr)
goto error_ptr_alloc; goto error_ptr_alloc;
return (struct radeon_winsys_bo *)bo; *out_bo = (struct radeon_winsys_bo *)bo;
return VK_SUCCESS;
error_ptr_alloc: error_ptr_alloc:
FREE(bo); FREE(bo);
return NULL; return VK_ERROR_OUT_OF_HOST_MEMORY;
} }
static void * static void *