anv: Implement VK_EXT_device_memory_report

Report device memory events for:
  - command buffers
  - pipelines
  - descriptor sets and descriptor pools
  - device memory

Co-authored-by: shenghualin <shenghua.lin@intel.com>
Co-authored-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33767>
This commit is contained in:
Lucas Fryzek
2025-02-17 10:07:04 +00:00
committed by Marge Bot
parent cfcc522bf8
commit f01ad7c34c
12 changed files with 191 additions and 32 deletions

View File

@@ -261,6 +261,7 @@ anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
size, &bbo->bo);
ANV_DMR_BO_ALLOC(&cmd_buffer->vk.base, bbo->bo, result);
if (result != VK_SUCCESS)
goto fail_alloc;
@@ -274,6 +275,7 @@ anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
return VK_SUCCESS;
fail_bo_alloc:
ANV_DMR_BO_FREE(&cmd_buffer->vk.base, bbo->bo);
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
fail_alloc:
vk_free(&cmd_buffer->vk.pool->alloc, bbo);
@@ -295,6 +297,7 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
other_bbo->bo->size, &bbo->bo);
ANV_DMR_BO_ALLOC(&cmd_buffer->vk.base, bbo->bo, result);
if (result != VK_SUCCESS)
goto fail_alloc;
@@ -309,7 +312,9 @@ anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
return VK_SUCCESS;
fail_bo_alloc:
ANV_DMR_BO_FREE(&cmd_buffer->vk.base, bbo->bo);
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
fail_alloc:
vk_free(&cmd_buffer->vk.pool->alloc, bbo);
@@ -374,6 +379,7 @@ anv_batch_bo_destroy(struct anv_batch_bo *bbo,
struct anv_cmd_buffer *cmd_buffer)
{
anv_reloc_list_finish(&bbo->relocs);
ANV_DMR_BO_FREE(&cmd_buffer->vk.base, bbo->bo);
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
vk_free(&cmd_buffer->vk.pool->alloc, bbo);
}
@@ -812,6 +818,7 @@ anv_cmd_buffer_alloc_space(struct anv_cmd_buffer *cmd_buffer,
&cmd_buffer->device->batch_bo_pool :
&cmd_buffer->device->bvh_bo_pool,
align(size, 4096), &bo);
ANV_DMR_BO_ALLOC(&cmd_buffer->vk.base, bo, result);
if (result != VK_SUCCESS) {
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_DEVICE_MEMORY);
return ANV_EMPTY_ALLOC;
@@ -821,9 +828,11 @@ anv_cmd_buffer_alloc_space(struct anv_cmd_buffer *cmd_buffer,
u_vector_add(&cmd_buffer->dynamic_bos);
if (bo_entry == NULL) {
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
ANV_DMR_BO_FREE(&cmd_buffer->vk.base, bo);
anv_bo_pool_free(bo->map != NULL ?
&cmd_buffer->device->batch_bo_pool :
&cmd_buffer->device->bvh_bo_pool, bo);
return ANV_EMPTY_ALLOC;
}
*bo_entry = bo;
@@ -952,6 +961,7 @@ anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
}
if (cmd_buffer->generation.ring_bo) {
ANV_DMR_BO_FREE(&cmd_buffer->vk.base, cmd_buffer->generation.ring_bo);
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool,
cmd_buffer->generation.ring_bo);
}
@@ -1006,6 +1016,7 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
cmd_buffer->generation.batch.next = NULL;
if (cmd_buffer->generation.ring_bo) {
ANV_DMR_BO_FREE(&cmd_buffer->vk.base, cmd_buffer->generation.ring_bo);
anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool,
cmd_buffer->generation.ring_bo);
cmd_buffer->generation.ring_bo = NULL;
@@ -1671,6 +1682,7 @@ anv_async_submit_extend_batch(struct anv_batch *batch, uint32_t size,
void *user_data)
{
struct anv_async_submit *submit = user_data;
struct anv_queue *queue = submit->queue;
uint32_t alloc_size = 0;
util_dynarray_foreach(&submit->batch_bos, struct anv_bo *, bo)
@@ -1681,6 +1693,7 @@ anv_async_submit_extend_batch(struct anv_batch *batch, uint32_t size,
VkResult result = anv_bo_pool_alloc(submit->bo_pool,
align(alloc_size, 4096),
&bo);
ANV_DMR_BO_ALLOC(&queue->vk.base, bo, result);
if (result != VK_SUCCESS)
return result;
@@ -1753,12 +1766,16 @@ void
anv_async_submit_fini(struct anv_async_submit *submit)
{
struct anv_device *device = submit->queue->device;
struct anv_queue *queue = submit->queue;
if (submit->owns_sync)
vk_sync_destroy(&device->vk, submit->signal.sync);
util_dynarray_foreach(&submit->batch_bos, struct anv_bo *, bo)
util_dynarray_foreach(&submit->batch_bos, struct anv_bo *, bo) {
ANV_DMR_BO_FREE(&queue->vk.base, *bo);
anv_bo_pool_free(submit->bo_pool, *bo);
}
util_dynarray_fini(&submit->batch_bos);
anv_reloc_list_finish(&submit->relocs);
}

View File

@@ -210,6 +210,7 @@ destroy_cmd_buffer(struct anv_cmd_buffer *cmd_buffer)
while (u_vector_length(&cmd_buffer->dynamic_bos) > 0) {
struct anv_bo **bo = u_vector_remove(&cmd_buffer->dynamic_bos);
ANV_DMR_BO_FREE(&cmd_buffer->vk.base, *bo);
anv_bo_pool_free((*bo)->map != NULL ?
&cmd_buffer->device->batch_bo_pool :
&cmd_buffer->device->bvh_bo_pool, *bo);
@@ -458,6 +459,7 @@ anv_cmd_buffer_set_ray_query_buffer(struct anv_cmd_buffer *cmd_buffer,
ANV_BO_ALLOC_INTERNAL, /* alloc_flags */
0, /* explicit_address */
&new_bo);
ANV_DMR_BO_ALLOC(&cmd_buffer->vk.base, new_bo, result);
if (result != VK_SUCCESS) {
anv_batch_set_error(&cmd_buffer->batch, result);
return;
@@ -465,6 +467,7 @@ anv_cmd_buffer_set_ray_query_buffer(struct anv_cmd_buffer *cmd_buffer,
bo = p_atomic_cmpxchg(&device->ray_query_shadow_bos[idx][bucket], NULL, new_bo);
if (bo != NULL) {
ANV_DMR_BO_FREE(&device->vk.base, new_bo);
anv_device_release_bo(device, new_bo);
} else {
bo = new_bo;
@@ -1445,6 +1448,7 @@ void anv_CmdSetRayTracingPipelineStackSizeKHR(
ANV_BO_ALLOC_INTERNAL, /* alloc_flags */
0, /* explicit_address */
&new_bo);
ANV_DMR_BO_ALLOC(&device->vk.base, new_bo, result);
if (result != VK_SUCCESS) {
rt->scratch.layout.total_size = 0;
anv_batch_set_error(&cmd_buffer->batch, result);
@@ -1453,6 +1457,7 @@ void anv_CmdSetRayTracingPipelineStackSizeKHR(
bo = p_atomic_cmpxchg(&device->rt_scratch_bos[bucket], NULL, new_bo);
if (bo != NULL) {
ANV_DMR_BO_FREE(&device->vk.base, new_bo);
anv_device_release_bo(device, new_bo);
} else {
bo = new_bo;

View File

@@ -1291,15 +1291,15 @@ void anv_DestroyPipelineLayout(
static VkResult
anv_descriptor_pool_heap_init(struct anv_device *device,
struct anv_descriptor_pool *pool,
struct anv_descriptor_pool_heap *heap,
uint32_t size,
bool host_only,
bool samplers)
{
if (size == 0)
return VK_SUCCESS;
if (host_only) {
if (pool->host_only) {
heap->size = size;
heap->host_mem = vk_zalloc(&device->vk.alloc, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
@@ -1322,6 +1322,7 @@ anv_descriptor_pool_heap_init(struct anv_device *device,
ANV_BO_ALLOC_DESCRIPTOR_POOL),
0 /* explicit_address */,
&heap->bo);
ANV_DMR_BO_ALLOC(&pool->base, heap->bo, result);
if (result != VK_SUCCESS)
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
@@ -1332,7 +1333,7 @@ anv_descriptor_pool_heap_init(struct anv_device *device,
}
static void
anv_descriptor_pool_heap_fini(struct anv_device *device,
anv_descriptor_pool_heap_fini(struct anv_device *device, struct anv_descriptor_pool *pool,
struct anv_descriptor_pool_heap *heap)
{
if (heap->size == 0)
@@ -1340,8 +1341,10 @@ anv_descriptor_pool_heap_fini(struct anv_device *device,
util_vma_heap_finish(&heap->heap);
if (heap->bo)
if (heap->bo) {
ANV_DMR_BO_FREE(&pool->base, heap->bo);
anv_device_release_bo(device, heap->bo);
}
if (heap->host_mem)
vk_free(&device->vk.alloc, heap->host_mem);
@@ -1359,8 +1362,10 @@ anv_descriptor_pool_heap_reset(struct anv_device *device,
}
static VkResult
anv_descriptor_pool_heap_alloc(struct anv_descriptor_pool *pool,
anv_descriptor_pool_heap_alloc(struct anv_device *device,
struct anv_descriptor_pool *pool,
struct anv_descriptor_pool_heap *heap,
struct anv_descriptor_set *set,
uint32_t size, uint32_t alignment,
struct anv_state *state)
{
@@ -1388,7 +1393,10 @@ anv_descriptor_pool_heap_alloc(struct anv_descriptor_pool *pool,
}
static void
anv_descriptor_pool_heap_free(struct anv_descriptor_pool_heap *heap,
anv_descriptor_pool_heap_free(struct anv_device *device,
struct anv_descriptor_pool *pool,
struct anv_descriptor_pool_heap *heap,
struct anv_descriptor_set *set,
struct anv_state state)
{
heap->alloc_size -= state.alloc_size;
@@ -1532,9 +1540,9 @@ VkResult anv_CreateDescriptorPool(
pool->host_only = host_only;
VkResult result = anv_descriptor_pool_heap_init(device,
pool,
&pool->surfaces,
descriptor_bo_surface_size,
pool->host_only,
false /* samplers */);
if (result != VK_SUCCESS) {
vk_object_free(&device->vk, pAllocator, pool);
@@ -1542,12 +1550,12 @@ VkResult anv_CreateDescriptorPool(
}
result = anv_descriptor_pool_heap_init(device,
pool,
&pool->samplers,
descriptor_bo_sampler_size,
pool->host_only,
true /* samplers */);
if (result != VK_SUCCESS) {
anv_descriptor_pool_heap_fini(device, &pool->surfaces);
anv_descriptor_pool_heap_fini(device, pool, &pool->surfaces);
vk_object_free(&device->vk, pAllocator, pool);
return result;
}
@@ -1591,8 +1599,8 @@ void anv_DestroyDescriptorPool(
anv_state_stream_finish(&pool->surface_state_stream);
anv_descriptor_pool_heap_fini(device, &pool->surfaces);
anv_descriptor_pool_heap_fini(device, &pool->samplers);
anv_descriptor_pool_heap_fini(device, pool, &pool->surfaces);
anv_descriptor_pool_heap_fini(device, pool, &pool->samplers);
vk_object_free(&device->vk, pAllocator, pool);
}
@@ -1727,6 +1735,9 @@ anv_descriptor_set_create(struct anv_device *device,
if (result != VK_SUCCESS)
return result;
vk_object_base_init(&device->vk, &set->base,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
uint32_t descriptor_buffer_surface_size, descriptor_buffer_sampler_size;
anv_descriptor_set_layout_descriptor_buffer_size(layout, var_desc_count,
&descriptor_buffer_surface_size,
@@ -1736,7 +1747,7 @@ anv_descriptor_set_create(struct anv_device *device,
set->is_push = false;
if (descriptor_buffer_surface_size) {
result = anv_descriptor_pool_heap_alloc(pool, &pool->surfaces,
result = anv_descriptor_pool_heap_alloc(device, pool, &pool->surfaces, set,
descriptor_buffer_surface_size,
ANV_UBO_ALIGNMENT,
&set->desc_surface_mem);
@@ -1775,7 +1786,8 @@ anv_descriptor_set_create(struct anv_device *device,
}
if (descriptor_buffer_sampler_size) {
result = anv_descriptor_pool_heap_alloc(pool, &pool->samplers,
result = anv_descriptor_pool_heap_alloc(device, pool, &pool->samplers,
set,
descriptor_buffer_sampler_size,
ANV_SAMPLER_STATE_SIZE,
&set->desc_sampler_mem);
@@ -1793,8 +1805,6 @@ anv_descriptor_set_create(struct anv_device *device,
set->desc_sampler_addr = ANV_NULL_ADDRESS;
}
vk_object_base_init(&device->vk, &set->base,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
set->pool = pool;
set->layout = layout;
anv_descriptor_set_layout_ref(layout);
@@ -1873,13 +1883,14 @@ anv_descriptor_set_destroy(struct anv_device *device,
anv_descriptor_set_layout_unref(device, set->layout);
if (set->desc_surface_mem.alloc_size) {
anv_descriptor_pool_heap_free(&pool->surfaces, set->desc_surface_mem);
anv_descriptor_pool_heap_free(device, pool, &pool->surfaces, set, set->desc_surface_mem);
if (set->desc_surface_state.alloc_size)
anv_descriptor_pool_free_state(pool, set->desc_surface_state);
}
if (set->desc_sampler_mem.alloc_size)
anv_descriptor_pool_heap_free(&pool->samplers, set->desc_sampler_mem);
if (set->desc_sampler_mem.alloc_size) {
anv_descriptor_pool_heap_free(device, pool, &pool->samplers, set, set->desc_sampler_mem);
}
if (device->physical->indirect_descriptors) {
if (!pool->host_only) {

View File

@@ -80,6 +80,7 @@ anv_device_init_trivial_batch(struct anv_device *device)
ANV_BO_ALLOC_CAPTURE,
0 /* explicit_address */,
&device->trivial_batch_bo);
ANV_DMR_BO_ALLOC(&device->vk.base, device->trivial_batch_bo, result);
if (result != VK_SUCCESS)
return result;
@@ -298,8 +299,11 @@ anv_device_finish_trtt(struct anv_device *device)
vk_free(&device->vk.alloc, trtt->l3_mirror);
vk_free(&device->vk.alloc, trtt->l2_mirror);
for (int i = 0; i < trtt->num_page_table_bos; i++)
for (int i = 0; i < trtt->num_page_table_bos; i++) {
struct anv_bo *bo = trtt->page_table_bos[i];
ANV_DMR_BO_FREE(&device->vk.base, bo);
anv_device_release_bo(device, trtt->page_table_bos[i]);
}
vk_free(&device->vk.alloc, trtt->page_table_bos);
}
@@ -699,6 +703,7 @@ VkResult anv_CreateDevice(
ANV_BO_ALLOC_INTERNAL,
0 /* explicit_address */,
&device->workaround_bo);
ANV_DMR_BO_ALLOC(&device->vk.base, device->workaround_bo, result);
if (result != VK_SUCCESS)
goto fail_surface_aux_map_pool;
@@ -707,6 +712,7 @@ VkResult anv_CreateDevice(
0 /* alloc_flags */,
0 /* explicit_address */,
&device->dummy_aux_bo);
ANV_DMR_BO_ALLOC(&device->vk.base, device->dummy_aux_bo, result);
if (result != VK_SUCCESS)
goto fail_alloc_device_bo;
@@ -724,6 +730,7 @@ VkResult anv_CreateDevice(
result = anv_device_alloc_bo(device, "mem_fence", 4096,
ANV_BO_ALLOC_NO_LOCAL_MEM, 0,
&device->mem_fence_bo);
ANV_DMR_BO_ALLOC(&device->vk.base, device->mem_fence_bo, result);
if (result != VK_SUCCESS)
goto fail_alloc_device_bo;
}
@@ -776,6 +783,7 @@ VkResult anv_CreateDevice(
ANV_BO_ALLOC_INTERNAL,
0 /* explicit_address */,
&device->ray_query_bo[0]);
ANV_DMR_BO_ALLOC(&device->vk.base, device->ray_query_bo[0], result);
if (result != VK_SUCCESS)
goto fail_alloc_device_bo;
@@ -787,6 +795,7 @@ VkResult anv_CreateDevice(
ANV_BO_ALLOC_INTERNAL,
0 /* explicit_address */,
&device->ray_query_bo[1]);
ANV_DMR_BO_ALLOC(&device->vk.base, device->ray_query_bo[1], result);
if (result != VK_SUCCESS)
goto fail_ray_query_bo;
}
@@ -865,6 +874,7 @@ VkResult anv_CreateDevice(
ANV_BO_ALLOC_INTERNAL,
0 /* explicit_address */,
&device->btd_fifo_bo);
ANV_DMR_BO_ALLOC(&device->vk.base, device->btd_fifo_bo, result);
if (result != VK_SUCCESS)
goto fail_trivial_batch_bo_and_scratch_pool;
}
@@ -1047,23 +1057,33 @@ VkResult anv_CreateDevice(
fail_default_pipeline_cache:
vk_pipeline_cache_destroy(device->vk.mem_cache, NULL);
fail_btd_fifo_bo:
if (ANV_SUPPORT_RT && device->info->has_ray_tracing)
if (ANV_SUPPORT_RT && device->info->has_ray_tracing) {
ANV_DMR_BO_FREE(&device->vk.base, device->btd_fifo_bo);
anv_device_release_bo(device, device->btd_fifo_bo);
}
fail_trivial_batch_bo_and_scratch_pool:
anv_scratch_pool_finish(device, &device->scratch_pool);
anv_scratch_pool_finish(device, &device->protected_scratch_pool);
fail_trivial_batch:
ANV_DMR_BO_FREE(&device->vk.base, device->trivial_batch_bo);
anv_device_release_bo(device, device->trivial_batch_bo);
fail_ray_query_bo:
for (unsigned i = 0; i < ARRAY_SIZE(device->ray_query_bo); i++) {
if (device->ray_query_bo[i])
if (device->ray_query_bo[i]) {
ANV_DMR_BO_FREE(&device->vk.base, device->ray_query_bo[i]);
anv_device_release_bo(device, device->ray_query_bo[i]);
}
}
fail_alloc_device_bo:
if (device->mem_fence_bo)
if (device->mem_fence_bo) {
ANV_DMR_BO_FREE(&device->vk.base, device->mem_fence_bo);
anv_device_release_bo(device, device->mem_fence_bo);
if (device->dummy_aux_bo)
}
if (device->dummy_aux_bo) {
ANV_DMR_BO_FREE(&device->vk.base, device->dummy_aux_bo);
anv_device_release_bo(device, device->dummy_aux_bo);
}
ANV_DMR_BO_FREE(&device->vk.base, device->workaround_bo);
anv_device_release_bo(device, device->workaround_bo);
fail_surface_aux_map_pool:
if (device->info->has_aux_map) {
@@ -1176,8 +1196,10 @@ void anv_DestroyDevice(
anv_device_finish_embedded_samplers(device);
if (ANV_SUPPORT_RT && device->info->has_ray_tracing)
if (ANV_SUPPORT_RT && device->info->has_ray_tracing) {
ANV_DMR_BO_FREE(&device->vk.base, device->btd_fifo_bo);
anv_device_release_bo(device, device->btd_fifo_bo);
}
if (device->info->verx10 >= 125) {
vk_common_DestroyCommandPool(anv_device_to_handle(device),
@@ -1196,8 +1218,11 @@ void anv_DestroyDevice(
#endif
for (unsigned i = 0; i < ARRAY_SIZE(device->rt_scratch_bos); i++) {
if (device->rt_scratch_bos[i] != NULL)
anv_device_release_bo(device, device->rt_scratch_bos[i]);
if (device->rt_scratch_bos[i] != NULL) {
struct anv_bo *bo = device->rt_scratch_bos[i];
ANV_DMR_BO_FREE(&device->vk.base, bo);
anv_device_release_bo(device, bo);
}
}
anv_scratch_pool_finish(device, &device->scratch_pool);
@@ -1206,18 +1231,28 @@ void anv_DestroyDevice(
if (device->vk.enabled_extensions.KHR_ray_query) {
for (unsigned i = 0; i < ARRAY_SIZE(device->ray_query_bo); i++) {
for (unsigned j = 0; j < ARRAY_SIZE(device->ray_query_shadow_bos[0]); j++) {
if (device->ray_query_shadow_bos[i][j] != NULL)
if (device->ray_query_shadow_bos[i][j] != NULL) {
ANV_DMR_BO_FREE(&device->vk.base, device->ray_query_shadow_bos[i][j]);
anv_device_release_bo(device, device->ray_query_shadow_bos[i][j]);
}
}
if (device->ray_query_bo[i])
if (device->ray_query_bo[i]) {
ANV_DMR_BO_FREE(&device->vk.base, device->ray_query_bo[i]);
anv_device_release_bo(device, device->ray_query_bo[i]);
}
}
}
ANV_DMR_BO_FREE(&device->vk.base, device->workaround_bo);
anv_device_release_bo(device, device->workaround_bo);
if (device->dummy_aux_bo)
if (device->dummy_aux_bo) {
ANV_DMR_BO_FREE(&device->vk.base, device->dummy_aux_bo);
anv_device_release_bo(device, device->dummy_aux_bo);
if (device->mem_fence_bo)
}
if (device->mem_fence_bo) {
ANV_DMR_BO_FREE(&device->vk.base, device->mem_fence_bo);
anv_device_release_bo(device, device->mem_fence_bo);
}
ANV_DMR_BO_FREE(&device->vk.base, device->trivial_batch_bo);
anv_device_release_bo(device, device->trivial_batch_bo);
if (device->info->has_aux_map) {
@@ -1688,12 +1723,16 @@ VkResult anv_AllocateMemory(
pthread_mutex_unlock(&device->mutex);
ANV_RMV(heap_create, device, mem, false, 0);
ANV_DMR_BO_ALLOC_IMPORT(&mem->vk.base, mem->bo, result,
mem->vk.import_handle_type);
*pMem = anv_device_memory_to_handle(mem);
return VK_SUCCESS;
fail:
ANV_DMR_BO_ALLOC_IMPORT(&mem->vk.base, mem->bo, result,
mem->vk.import_handle_type);
vk_device_memory_destroy(&device->vk, pAllocator, &mem->vk);
return result;
@@ -1792,6 +1831,9 @@ void anv_FreeMemory(
p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used,
-mem->bo->size);
ANV_DMR_BO_FREE_IMPORT(&mem->vk.base, mem->bo,
mem->vk.import_handle_type);
anv_device_release_bo(device, mem->bo);
ANV_RMV(resource_destroy, device, mem);

View File

@@ -1473,6 +1473,7 @@ alloc_private_binding(struct anv_device *device,
VkResult result = anv_device_alloc_bo(device, "image-binding-private",
binding->memory_range.size, 0, 0,
&binding->address.bo);
ANV_DMR_BO_ALLOC(&image->vk.base, binding->address.bo, result);
if (result == VK_SUCCESS) {
pthread_mutex_lock(&device->mutex);
list_addtail(&image->link, &device->image_private_objects);
@@ -1958,6 +1959,7 @@ anv_image_finish(struct anv_image *image)
pthread_mutex_lock(&device->mutex);
list_del(&image->link);
pthread_mutex_unlock(&device->mutex);
ANV_DMR_BO_FREE(&image->vk.base, private_bo);
anv_device_release_bo(device, private_bo);
}

View File

@@ -578,6 +578,9 @@ get_features(const struct anv_physical_device *pdevice,
/* VK_EXT_depth_clip_enable */
.depthClipEnable = true,
/* VK_EXT_device_memory_report */
.deviceMemoryReport = true,
/* VK_EXT_fragment_shader_interlock */
.fragmentShaderSampleInterlock = true,
.fragmentShaderPixelInterlock = true,

View File

@@ -165,6 +165,7 @@ anv_shader_bin_destroy(struct vk_device *_device,
for (uint32_t i = 0; i < shader->bind_map.embedded_sampler_count; i++)
anv_embedded_sampler_unref(device, shader->embedded_samplers[i]);
ANV_DMR_SP_FREE(&device->vk.base, &device->instruction_state_pool, shader->kernel);
anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
vk_pipeline_cache_object_finish(&shader->base);
vk_free(&device->vk.alloc, shader);
@@ -252,12 +253,14 @@ anv_shader_bin_create(struct anv_device *device,
shader->kernel =
anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
ANV_DMR_SP_ALLOC(&device->vk.base, &device->instruction_state_pool, shader->kernel);
memcpy(shader->kernel.map, kernel_data, kernel_size);
shader->kernel_size = kernel_size;
if (bind_map->embedded_sampler_count > 0) {
shader->embedded_samplers = embedded_samplers;
if (anv_shader_bin_get_embedded_samplers(device, shader, bind_map) != VK_SUCCESS) {
ANV_DMR_SP_FREE(&device->vk.base, &device->instruction_state_pool, shader->kernel);
anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
vk_free(&device->vk.alloc, shader);
return NULL;

View File

@@ -79,6 +79,7 @@
#include "vk_command_buffer.h"
#include "vk_command_pool.h"
#include "vk_debug_report.h"
#include "vk_debug_utils.h"
#include "vk_descriptor_update_template.h"
#include "vk_device.h"
#include "vk_device_memory.h"
@@ -6668,6 +6669,69 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(anv_video_session_params, vk.base,
# undef genX
#endif
static inline void
anv_emit_device_memory_report(struct vk_device* device,
VkDeviceMemoryReportEventTypeEXT type,
uint64_t mem_obj_id,
VkDeviceSize size,
VkObjectType obj_type,
uint64_t obj_handle,
uint32_t heap_index)
{
if (likely(!device->memory_reports))
return;
vk_emit_device_memory_report(device, type, mem_obj_id, size,
obj_type, obj_handle, heap_index);
}
/* VK_EXT_device_memory_report specific reporting macros */
#define ANV_DMR_BO_REPORT(_obj, _bo, _type) \
anv_emit_device_memory_report( \
(_obj)->device, _type, \
(_type) == VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT ? \
0 : (_bo)->offset, \
(_type) == VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT ? \
0 : (_bo)->actual_size, \
(_obj)->type, vk_object_to_u64_handle(_obj), 0)
#define ANV_DMR_BO_ALLOC(_obj, _bo, _result) \
ANV_DMR_BO_REPORT(_obj, _bo, \
(_result) == VK_SUCCESS ? \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT : \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT)
#define ANV_DMR_BO_FREE(_obj, _bo) \
ANV_DMR_BO_REPORT(_obj, _bo, \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT)
#define ANV_DMR_BO_ALLOC_IMPORT(_obj, _bo, _result, _import) \
ANV_DMR_BO_REPORT(_obj, _bo, \
(_result) == VK_SUCCESS ? \
((_import) ? \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT : \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT) : \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT)
#define ANV_DMR_BO_FREE_IMPORT(_obj, _bo, _import) \
ANV_DMR_BO_REPORT(_obj, _bo, \
(_import) ? \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT : \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT)
#define ANV_DMR_SP_REPORT(_obj, _pool, _state, _type) \
anv_emit_device_memory_report( \
(_obj)->device, _type, \
(_type) == VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT ? \
0 : \
anv_address_physical(anv_state_pool_state_address((_pool), (_state))), \
(_type) == VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT ? \
0 : (_state).alloc_size, \
(_obj)->type, vk_object_to_u64_handle(_obj), 0)
#define ANV_DMR_SP_ALLOC(_obj, _pool, _state) \
ANV_DMR_SP_REPORT(_obj, _pool, _state, \
(_state).alloc_size == 0 ? \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT : \
VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT)
#define ANV_DMR_SP_FREE(_obj, _pool, _state) \
ANV_DMR_SP_REPORT(_obj, _pool, _state, VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT)
#ifdef __cplusplus
}
#endif

View File

@@ -347,6 +347,7 @@ trtt_make_page_table_bo(struct anv_device *device, struct anv_bo **bo)
ANV_TRTT_PAGE_TABLE_BO_SIZE,
ANV_BO_ALLOC_INTERNAL,
0 /* explicit_address */, bo);
ANV_DMR_BO_ALLOC(&device->vk.base, *bo, result);
if (result != VK_SUCCESS)
return result;
@@ -360,6 +361,7 @@ trtt_make_page_table_bo(struct anv_device *device, struct anv_bo **bo)
new_capacity * sizeof(*trtt->page_table_bos), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!new_page_table_bos) {
ANV_DMR_BO_FREE(&device->vk.base, *bo);
anv_device_release_bo(device, *bo);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}

View File

@@ -425,6 +425,9 @@ genX(cmd_buffer_emit_indirect_generated_draws_inring)(struct anv_cmd_buffer *cmd
4096);
VkResult result = anv_bo_pool_alloc(&device->batch_bo_pool, bo_size,
&cmd_buffer->generation.ring_bo);
ANV_DMR_BO_ALLOC(&cmd_buffer->vk.base,
cmd_buffer->generation.ring_bo,
result);
if (result != VK_SUCCESS) {
anv_batch_set_error(&cmd_buffer->batch, result);
return;

View File

@@ -256,6 +256,7 @@ VkResult genX(CreateQueryPool)(
ANV_BO_ALLOC_CAPTURE,
0 /* explicit_address */,
&pool->bo);
ANV_DMR_BO_ALLOC(&pool->vk.base, pool->bo, result);
if (result != VK_SUCCESS)
goto fail;
@@ -299,7 +300,7 @@ void genX(DestroyQueryPool)(
return;
ANV_RMV(resource_destroy, device, pool);
ANV_DMR_BO_FREE(&pool->vk.base, pool->bo);
anv_device_release_bo(device, pool->bo);
vk_object_free(&device->vk, pAllocator, pool);
}

View File

@@ -128,6 +128,12 @@ vk_object_base_from_u64_handle(uint64_t handle, VkObjectType obj_type)
return base;
}
static inline uint64_t
vk_object_to_u64_handle(struct vk_object_base *obj)
{
return (uintptr_t)obj;
}
/** Define handle cast macros for the given dispatchable handle type
*
* For a given `driver_struct`, this defines `driver_struct_to_handle()` and