anv: initial RMV support
Launch with : $ MESA_VK_TRACE=rmv MESA_VK_TRACE_TRIGGER=/tmp/trig ./my_app In another terminal, trigger a capture : $ touch /tmp/trig The application with create a snapshot and print out : RMV capture saved to '/tmp/my_app_2024.01.19_10.56.33.rmv' Then just open it with RMV : ./RadeonMemoryVisualizer /tmp/my_app_2024.01.19_10.56.33.rmv Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Reviewed-by: José Roberto de Souza <jose.souza@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26843>
This commit is contained in:

committed by
Marge Bot

parent
6d53fcd54e
commit
e1b9a6e4f3
@@ -1567,6 +1567,8 @@ anv_device_alloc_bo(struct anv_device *device,
|
||||
|
||||
*bo_out = bo;
|
||||
|
||||
ANV_RMV(bo_allocate, device, bo);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1698,6 +1700,8 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
|
||||
}
|
||||
|
||||
*bo = new_bo;
|
||||
|
||||
ANV_RMV(bo_allocate, device, bo);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&cache->mutex);
|
||||
@@ -1791,6 +1795,8 @@ anv_device_import_bo(struct anv_device *device,
|
||||
}
|
||||
|
||||
*bo = new_bo;
|
||||
|
||||
ANV_RMV(bo_allocate, device, bo);
|
||||
}
|
||||
|
||||
bo->flags = bo_flags;
|
||||
@@ -1889,6 +1895,8 @@ anv_device_release_bo(struct anv_device *device,
|
||||
if (atomic_dec_not_one(&bo->refcount))
|
||||
return;
|
||||
|
||||
ANV_RMV(bo_destroy, device, bo);
|
||||
|
||||
pthread_mutex_lock(&cache->mutex);
|
||||
|
||||
/* We are probably the last reference since our attempt to decrement above
|
||||
|
@@ -239,6 +239,8 @@ anv_cmd_buffer_destroy(struct vk_command_buffer *vk_cmd_buffer)
|
||||
cmd_buffer->companion_rcs_cmd_buffer = NULL;
|
||||
}
|
||||
|
||||
ANV_RMV(cmd_buffer_destroy, cmd_buffer->device, cmd_buffer);
|
||||
|
||||
destroy_cmd_buffer(cmd_buffer);
|
||||
pthread_mutex_unlock(&device->mutex);
|
||||
}
|
||||
@@ -302,6 +304,8 @@ anv_cmd_buffer_reset(struct vk_command_buffer *vk_cmd_buffer,
|
||||
cmd_buffer->companion_rcs_cmd_buffer = NULL;
|
||||
}
|
||||
|
||||
ANV_RMV(cmd_buffer_destroy, cmd_buffer->device, cmd_buffer);
|
||||
|
||||
reset_cmd_buffer(cmd_buffer, flags);
|
||||
}
|
||||
|
||||
|
@@ -1396,6 +1396,8 @@ VkResult anv_CreateDescriptorPool(
|
||||
|
||||
list_inithead(&pool->desc_sets);
|
||||
|
||||
ANV_RMV(descriptor_pool_create, device, pCreateInfo, pool, false);
|
||||
|
||||
*pDescriptorPool = anv_descriptor_pool_to_handle(pool);
|
||||
|
||||
return VK_SUCCESS;
|
||||
@@ -1412,6 +1414,8 @@ void anv_DestroyDescriptorPool(
|
||||
if (!pool)
|
||||
return;
|
||||
|
||||
ANV_RMV(resource_destroy, device, pool);
|
||||
|
||||
list_for_each_entry_safe(struct anv_descriptor_set, set,
|
||||
&pool->desc_sets, pool_link) {
|
||||
anv_descriptor_set_layout_unref(device, set->layout);
|
||||
|
@@ -3103,6 +3103,12 @@ VkResult anv_CreateDevice(
|
||||
true);
|
||||
override_initial_entrypoints = false;
|
||||
#endif
|
||||
if (physical_device->instance->vk.trace_mode & VK_TRACE_MODE_RMV) {
|
||||
vk_device_dispatch_table_from_entrypoints(&dispatch_table,
|
||||
&anv_rmv_device_entrypoints,
|
||||
true);
|
||||
override_initial_entrypoints = false;
|
||||
}
|
||||
vk_device_dispatch_table_from_entrypoints(&dispatch_table,
|
||||
anv_genX(&physical_device->info, device_entrypoints),
|
||||
override_initial_entrypoints);
|
||||
@@ -3111,6 +3117,7 @@ VkResult anv_CreateDevice(
|
||||
vk_device_dispatch_table_from_entrypoints(&dispatch_table,
|
||||
&wsi_device_entrypoints, false);
|
||||
|
||||
|
||||
result = vk_device_init(&device->vk, &physical_device->vk,
|
||||
&dispatch_table, pCreateInfo, pAllocator);
|
||||
if (result != VK_SUCCESS)
|
||||
@@ -3250,6 +3257,9 @@ VkResult anv_CreateDevice(
|
||||
}
|
||||
pthread_condattr_destroy(&condattr);
|
||||
|
||||
if (physical_device->instance->vk.trace_mode & VK_TRACE_MODE_RMV)
|
||||
anv_memory_trace_init(device);
|
||||
|
||||
result = anv_bo_cache_init(&device->bo_cache, device);
|
||||
if (result != VK_SUCCESS)
|
||||
goto fail_queue_cond;
|
||||
@@ -3723,6 +3733,8 @@ void anv_DestroyDevice(
|
||||
u_gralloc_destroy(&device->u_gralloc);
|
||||
#endif
|
||||
|
||||
anv_memory_trace_finish(device);
|
||||
|
||||
struct anv_physical_device *pdevice = device->physical;
|
||||
|
||||
for (uint32_t i = 0; i < device->queue_count; i++)
|
||||
@@ -4184,6 +4196,8 @@ VkResult anv_AllocateMemory(
|
||||
list_addtail(&mem->link, &device->memory_objects);
|
||||
pthread_mutex_unlock(&device->mutex);
|
||||
|
||||
ANV_RMV(heap_create, device, mem, false, 0);
|
||||
|
||||
*pMem = anv_device_memory_to_handle(mem);
|
||||
|
||||
return VK_SUCCESS;
|
||||
@@ -4289,6 +4303,8 @@ void anv_FreeMemory(
|
||||
|
||||
anv_device_release_bo(device, mem->bo);
|
||||
|
||||
ANV_RMV(resource_destroy, device, mem);
|
||||
|
||||
vk_device_memory_destroy(&device->vk, pAllocator, &mem->vk);
|
||||
}
|
||||
|
||||
@@ -4456,7 +4472,8 @@ void anv_GetDeviceMemoryCommitment(
|
||||
}
|
||||
|
||||
static void
|
||||
anv_bind_buffer_memory(const VkBindBufferMemoryInfo *pBindInfo)
|
||||
anv_bind_buffer_memory(struct anv_device *device,
|
||||
const VkBindBufferMemoryInfo *pBindInfo)
|
||||
{
|
||||
ANV_FROM_HANDLE(anv_device_memory, mem, pBindInfo->memory);
|
||||
ANV_FROM_HANDLE(anv_buffer, buffer, pBindInfo->buffer);
|
||||
@@ -4478,17 +4495,21 @@ anv_bind_buffer_memory(const VkBindBufferMemoryInfo *pBindInfo)
|
||||
buffer->address = ANV_NULL_ADDRESS;
|
||||
}
|
||||
|
||||
ANV_RMV(buffer_bind, device, buffer);
|
||||
|
||||
if (bind_status)
|
||||
*bind_status->pResult = VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult anv_BindBufferMemory2(
|
||||
VkDevice device,
|
||||
VkDevice _device,
|
||||
uint32_t bindInfoCount,
|
||||
const VkBindBufferMemoryInfo* pBindInfos)
|
||||
{
|
||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||
|
||||
for (uint32_t i = 0; i < bindInfoCount; i++)
|
||||
anv_bind_buffer_memory(&pBindInfos[i]);
|
||||
anv_bind_buffer_memory(device, &pBindInfos[i]);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
@@ -4515,6 +4536,8 @@ VkResult anv_CreateEvent(
|
||||
sizeof(uint64_t), 8);
|
||||
*(uint64_t *)event->state.map = VK_EVENT_RESET;
|
||||
|
||||
ANV_RMV(event_create, device, event, pCreateInfo->flags, false);
|
||||
|
||||
*pEvent = anv_event_to_handle(event);
|
||||
|
||||
return VK_SUCCESS;
|
||||
@@ -4531,6 +4554,8 @@ void anv_DestroyEvent(
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
ANV_RMV(resource_destroy, device, event);
|
||||
|
||||
anv_state_pool_free(&device->dynamic_state_pool, event->state);
|
||||
|
||||
vk_object_free(&device->vk, pAllocator, event);
|
||||
@@ -4728,6 +4753,8 @@ VkResult anv_CreateBuffer(
|
||||
}
|
||||
}
|
||||
|
||||
ANV_RMV(buffer_create, device, false, buffer);
|
||||
|
||||
*pBuffer = anv_buffer_to_handle(buffer);
|
||||
|
||||
return VK_SUCCESS;
|
||||
@@ -4744,6 +4771,8 @@ void anv_DestroyBuffer(
|
||||
if (!buffer)
|
||||
return;
|
||||
|
||||
ANV_RMV(buffer_destroy, device, buffer);
|
||||
|
||||
if (anv_buffer_is_sparse(buffer)) {
|
||||
assert(buffer->address.offset == buffer->sparse_data.address);
|
||||
anv_free_sparse_bindings(device, &buffer->sparse_data);
|
||||
|
@@ -1898,6 +1898,8 @@ VkResult anv_CreateImage(
|
||||
return result;
|
||||
}
|
||||
|
||||
ANV_RMV(image_create, device, false, image);
|
||||
|
||||
*pImage = anv_image_to_handle(image);
|
||||
|
||||
return result;
|
||||
@@ -1913,6 +1915,8 @@ anv_DestroyImage(VkDevice _device, VkImage _image,
|
||||
if (!image)
|
||||
return;
|
||||
|
||||
ANV_RMV(image_destroy, device, image);
|
||||
|
||||
assert(&device->vk == image->vk.base.device);
|
||||
anv_image_finish(image);
|
||||
|
||||
@@ -2290,6 +2294,9 @@ anv_bind_image_memory(struct anv_device *device,
|
||||
.offset = bind_info->memoryOffset,
|
||||
};
|
||||
|
||||
ANV_RMV(image_bind, device, image,
|
||||
binding - image->bindings);
|
||||
|
||||
did_bind = true;
|
||||
break;
|
||||
}
|
||||
@@ -2353,6 +2360,9 @@ anv_bind_image_memory(struct anv_device *device,
|
||||
.offset = bind_info->memoryOffset,
|
||||
};
|
||||
|
||||
ANV_RMV(image_bind, device, image,
|
||||
ANV_IMAGE_MEMORY_BINDING_MAIN);
|
||||
|
||||
did_bind = true;
|
||||
}
|
||||
|
||||
|
@@ -327,6 +327,8 @@ void anv_DestroyPipeline(
|
||||
if (!pipeline)
|
||||
return;
|
||||
|
||||
ANV_RMV(resource_destroy, device, pipeline);
|
||||
|
||||
switch (pipeline->type) {
|
||||
case ANV_PIPELINE_GRAPHICS_LIB: {
|
||||
struct anv_graphics_lib_pipeline *gfx_pipeline =
|
||||
@@ -2765,6 +2767,8 @@ anv_compute_pipeline_create(struct anv_device *device,
|
||||
|
||||
anv_genX(device->info, compute_pipeline_emit)(pipeline);
|
||||
|
||||
ANV_RMV(compute_pipeline_create, device, pipeline, false);
|
||||
|
||||
*pPipeline = anv_pipeline_to_handle(&pipeline->base);
|
||||
|
||||
return pipeline->base.batch.status;
|
||||
@@ -3290,6 +3294,8 @@ anv_graphics_pipeline_create(struct anv_device *device,
|
||||
anv_fill_pipeline_creation_feedback(&pipeline->base, &pipeline_feedback,
|
||||
pCreateInfo, stages);
|
||||
|
||||
ANV_RMV(graphics_pipeline_create, device, pipeline, false);
|
||||
|
||||
*pPipeline = anv_pipeline_to_handle(&pipeline->base.base);
|
||||
|
||||
return pipeline->base.base.batch.status;
|
||||
@@ -4146,6 +4152,8 @@ anv_ray_tracing_pipeline_create(
|
||||
|
||||
ralloc_free(tmp_ctx);
|
||||
|
||||
ANV_RMV(rt_pipeline_create, device, pipeline, false);
|
||||
|
||||
*pPipeline = anv_pipeline_to_handle(&pipeline->base);
|
||||
|
||||
return pipeline->base.batch.status;
|
||||
|
@@ -129,6 +129,7 @@ struct intel_perf_query_result;
|
||||
#include "anv_android.h"
|
||||
#include "anv_entrypoints.h"
|
||||
#include "anv_kmd_backend.h"
|
||||
#include "anv_rmv.h"
|
||||
#include "isl/isl.h"
|
||||
|
||||
#include "dev/intel_debug.h"
|
||||
@@ -500,6 +501,9 @@ struct anv_bo {
|
||||
|
||||
/** True if this BO wraps a host pointer */
|
||||
bool from_host_ptr:1;
|
||||
|
||||
/** True if this BO is mapped in the GTT (only used for RMV) */
|
||||
bool gtt_mapped:1;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
|
864
src/intel/vulkan/anv_rmv.c
Normal file
864
src/intel/vulkan/anv_rmv.c
Normal file
@@ -0,0 +1,864 @@
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "anv_private.h"
|
||||
|
||||
static VkResult
|
||||
capture_trace(VkQueue _queue)
|
||||
{
|
||||
ANV_FROM_HANDLE(anv_queue, queue, _queue);
|
||||
|
||||
simple_mtx_lock(&queue->device->vk.memory_trace_data.token_mtx);
|
||||
vk_dump_rmv_capture(&queue->device->vk.memory_trace_data);
|
||||
simple_mtx_unlock(&queue->device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
void
|
||||
anv_memory_trace_init(struct anv_device *device)
|
||||
{
|
||||
struct vk_rmv_device_info info;
|
||||
memset(&info, 0, sizeof(info));
|
||||
anv_rmv_fill_device_info(device->physical, &info);
|
||||
vk_memory_trace_init(&device->vk, &info);
|
||||
|
||||
if (!device->vk.memory_trace_data.is_enabled)
|
||||
return;
|
||||
|
||||
device->vk.capture_trace = capture_trace;
|
||||
}
|
||||
|
||||
static void
|
||||
fill_memory_info(const struct anv_physical_device *device,
|
||||
struct vk_rmv_memory_info *out_info,
|
||||
int32_t index)
|
||||
{
|
||||
switch (index) {
|
||||
case VK_RMV_MEMORY_LOCATION_DEVICE:
|
||||
out_info->physical_base_address = 0;
|
||||
out_info->size = device->memory.heaps[0].size;
|
||||
break;
|
||||
case VK_RMV_MEMORY_LOCATION_DEVICE_INVISIBLE:
|
||||
out_info->physical_base_address = device->memory.heaps[0].size;
|
||||
out_info->size = device->vram_non_mappable.size;
|
||||
break;
|
||||
case VK_RMV_MEMORY_LOCATION_HOST:
|
||||
out_info->physical_base_address = 0;
|
||||
out_info->size = device->memory.heaps[1].size;
|
||||
break;
|
||||
default:
|
||||
unreachable("invalid memory index");
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_fill_device_info(const struct anv_physical_device *device,
|
||||
struct vk_rmv_device_info *info)
|
||||
{
|
||||
for (int32_t i = 0; i < VK_RMV_MEMORY_LOCATION_COUNT; ++i)
|
||||
fill_memory_info(device, &info->memory_infos[i], i);
|
||||
|
||||
strncpy(info->device_name, device->info.name, sizeof(info->device_name) - 1);
|
||||
info->pcie_revision_id = device->info.pci_revision_id;
|
||||
info->pcie_device_id = device->info.pci_device_id;
|
||||
/* TODO: */
|
||||
info->pcie_family_id = 0;
|
||||
info->minimum_shader_clock = 0;
|
||||
info->maximum_shader_clock = 1 * 1024 * 1024 * 1024;
|
||||
info->vram_type = VK_RMV_MEMORY_TYPE_DDR4;
|
||||
info->vram_bus_width = 256;
|
||||
info->vram_operations_per_clock = 1;
|
||||
info->minimum_memory_clock = 0;
|
||||
info->maximum_memory_clock = 1;
|
||||
info->vram_bandwidth = 256;
|
||||
}
|
||||
|
||||
void
|
||||
anv_memory_trace_finish(struct anv_device *device)
|
||||
{
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
resource_id_locked(struct anv_device *device, const void *obj)
|
||||
{
|
||||
return vk_rmv_get_resource_id_locked(&device->vk, (uint64_t)(uintptr_t)obj);
|
||||
}
|
||||
|
||||
static void
|
||||
resource_destroy_locked(struct anv_device *device, const void *obj)
|
||||
{
|
||||
vk_rmv_destroy_resource_id_locked(&device->vk, (uint64_t)(uintptr_t)obj);
|
||||
}
|
||||
|
||||
/* The token lock must be held when entering _locked functions */
|
||||
static void
|
||||
log_resource_bind_locked(struct anv_device *device, uint64_t resource_id,
|
||||
struct anv_bo *bo, uint64_t offset,
|
||||
uint64_t size)
|
||||
{
|
||||
struct vk_rmv_resource_bind_token token = {
|
||||
.resource_id = resource_id,
|
||||
.is_system_memory = bo ? (bo->alloc_flags & ANV_BO_ALLOC_NO_LOCAL_MEM) : 0,
|
||||
.address = (bo ? bo->offset : 0) + offset,
|
||||
.size = size,
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_BIND, &token);
|
||||
}
|
||||
|
||||
static void
|
||||
log_state_pool_bind_locked(struct anv_device *device, uint64_t resource_id,
|
||||
struct anv_state_pool *pool, struct anv_state *state)
|
||||
{
|
||||
struct vk_rmv_resource_bind_token token = {
|
||||
.resource_id = resource_id,
|
||||
.is_system_memory = (pool->block_pool.bo_alloc_flags &
|
||||
ANV_BO_ALLOC_NO_LOCAL_MEM) != 0,
|
||||
.address = anv_address_physical(
|
||||
anv_state_pool_state_address(pool, *state)),
|
||||
.size = state->alloc_size,
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_BIND, &token);
|
||||
}
|
||||
|
||||
static enum vk_rmv_memory_location
|
||||
anv_heap_index_to_memory_location(struct anv_device *device,
|
||||
unsigned heap_index)
|
||||
{
|
||||
if (heap_index == 0)
|
||||
return device->physical->vram_non_mappable.size != 0 ?
|
||||
VK_RMV_MEMORY_LOCATION_DEVICE_INVISIBLE :
|
||||
VK_RMV_MEMORY_LOCATION_DEVICE;
|
||||
else if (heap_index == 1)
|
||||
return VK_RMV_MEMORY_LOCATION_HOST;
|
||||
else
|
||||
return VK_RMV_MEMORY_LOCATION_DEVICE;
|
||||
}
|
||||
|
||||
static void
|
||||
anv_rmv_log_bo_gtt_unmap_locked(struct anv_device *device,
|
||||
struct anv_bo *bo)
|
||||
{
|
||||
if (!bo->gtt_mapped)
|
||||
return;
|
||||
|
||||
struct vk_rmv_token token = {
|
||||
.type = VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE,
|
||||
.timestamp = (uint64_t)os_time_get_nano(),
|
||||
.data = {
|
||||
.page_table_update = {
|
||||
.type = VK_RMV_PAGE_TABLE_UPDATE_TYPE_UPDATE,
|
||||
.page_size = device->info->mem_alignment,
|
||||
.page_count = DIV_ROUND_UP(bo->size,
|
||||
device->info->mem_alignment),
|
||||
.pid = getpid(),
|
||||
.virtual_address = bo->offset,
|
||||
.physical_address = bo->offset,
|
||||
.is_unmap = true,
|
||||
},
|
||||
},
|
||||
};
|
||||
util_dynarray_append(&device->vk.memory_trace_data.tokens,
|
||||
struct vk_rmv_token, token);
|
||||
|
||||
bo->gtt_mapped = false;
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_bo_gtt_unmap(struct anv_device *device,
|
||||
struct anv_bo *bo)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
anv_rmv_log_bo_gtt_unmap_locked(device, bo);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_bo_gtt_map(struct anv_device *device,
|
||||
struct anv_bo *bo)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_token token = {
|
||||
.type = VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE,
|
||||
.timestamp = (uint64_t)os_time_get_nano(),
|
||||
.data = {
|
||||
.page_table_update = {
|
||||
.type = VK_RMV_PAGE_TABLE_UPDATE_TYPE_UPDATE,
|
||||
.page_size = device->info->mem_alignment,
|
||||
.page_count = DIV_ROUND_UP(bo->size,
|
||||
device->info->mem_alignment),
|
||||
.pid = getpid(),
|
||||
.virtual_address = bo->offset,
|
||||
.physical_address = bo->offset,
|
||||
.is_unmap = false,
|
||||
},
|
||||
},
|
||||
};
|
||||
util_dynarray_append(&device->vk.memory_trace_data.tokens,
|
||||
struct vk_rmv_token, token);
|
||||
|
||||
bo->gtt_mapped = true;
|
||||
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_bos_gtt_map(struct anv_device *device,
|
||||
struct anv_bo **bos,
|
||||
uint32_t bo_count)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
for (uint32_t i = 0; i < bo_count; i++) {
|
||||
struct anv_bo *bo = bos[i];
|
||||
|
||||
if (bo->gtt_mapped)
|
||||
continue;
|
||||
|
||||
struct vk_rmv_token token = {
|
||||
.type = VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE,
|
||||
.timestamp = (uint64_t)os_time_get_nano(),
|
||||
.data = {
|
||||
.page_table_update = {
|
||||
.type = VK_RMV_PAGE_TABLE_UPDATE_TYPE_UPDATE,
|
||||
.page_size = device->info->mem_alignment,
|
||||
.page_count = DIV_ROUND_UP(bo->size,
|
||||
device->info->mem_alignment),
|
||||
.pid = getpid(),
|
||||
.virtual_address = bo->offset,
|
||||
.physical_address = bo->offset,
|
||||
.is_unmap = false,
|
||||
},
|
||||
},
|
||||
};
|
||||
util_dynarray_append(&device->vk.memory_trace_data.tokens,
|
||||
struct vk_rmv_token, token);
|
||||
|
||||
bo->gtt_mapped = true;
|
||||
}
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_vm_binds(struct anv_device *device,
|
||||
struct anv_vm_bind *binds,
|
||||
uint32_t bind_count)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
for (uint32_t i = 0; i < bind_count; i++) {
|
||||
|
||||
struct vk_rmv_token token = {
|
||||
.type = VK_RMV_TOKEN_TYPE_PAGE_TABLE_UPDATE,
|
||||
.timestamp = (uint64_t)os_time_get_nano(),
|
||||
.data = {
|
||||
.page_table_update = {
|
||||
.type = VK_RMV_PAGE_TABLE_UPDATE_TYPE_UPDATE,
|
||||
.page_size = device->info->mem_alignment,
|
||||
.page_count = DIV_ROUND_UP(binds[i].size,
|
||||
device->info->mem_alignment),
|
||||
.pid = getpid(),
|
||||
.virtual_address = binds[i].address,
|
||||
.physical_address = binds[i].bo_offset,
|
||||
.is_unmap = binds[i].op == ANV_VM_UNBIND,
|
||||
},
|
||||
},
|
||||
};
|
||||
util_dynarray_append(&device->vk.memory_trace_data.tokens,
|
||||
struct vk_rmv_token, token);
|
||||
}
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_heap_create(struct anv_device *device,
|
||||
struct anv_device_memory *memory,
|
||||
bool is_internal,
|
||||
VkMemoryAllocateFlags alloc_flags)
|
||||
{
|
||||
/* Do not log zero-sized device memory objects. */
|
||||
if (!memory->vk.size)
|
||||
return;
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
struct vk_rmv_resource_create_token token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_HEAP,
|
||||
.resource_id = resource_id_locked(device, memory),
|
||||
.is_driver_internal = is_internal,
|
||||
.heap = {
|
||||
.alignment = device->info->mem_alignment,
|
||||
.size = memory->vk.size,
|
||||
.heap_index = anv_heap_index_to_memory_location(device,
|
||||
memory->type->heapIndex),
|
||||
.alloc_flags = alloc_flags,
|
||||
},
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
|
||||
log_resource_bind_locked(device, token.resource_id, memory->bo, 0, memory->vk.size);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
anv_rmv_log_vma_locked(struct anv_device *device, uint64_t address, uint64_t size,
|
||||
bool internal, bool vram, bool in_invisible_vram)
|
||||
{
|
||||
struct vk_rmv_virtual_allocate_token token = {
|
||||
.address = address,
|
||||
/* If all VRAM is visible, no bo will be in invisible memory. */
|
||||
.is_in_invisible_vram = in_invisible_vram,
|
||||
.preferred_domains = (vram ?
|
||||
VK_RMV_KERNEL_MEMORY_DOMAIN_VRAM :
|
||||
VK_RMV_KERNEL_MEMORY_DOMAIN_GTT),
|
||||
.is_driver_internal = internal,
|
||||
.page_count = DIV_ROUND_UP(size, 4096),
|
||||
};
|
||||
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_VIRTUAL_ALLOCATE, &token);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_bo_allocate(struct anv_device *device,
|
||||
struct anv_bo *bo)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
anv_rmv_log_vma_locked(device, bo->offset, bo->size,
|
||||
bo->alloc_flags & ANV_BO_ALLOC_INTERNAL,
|
||||
(bo->alloc_flags & ANV_BO_ALLOC_NO_LOCAL_MEM) == 0,
|
||||
device->physical->vram_non_mappable.size != 0 &&
|
||||
(bo->alloc_flags & (ANV_BO_ALLOC_MAPPED |
|
||||
ANV_BO_ALLOC_HOST_CACHED_COHERENT |
|
||||
ANV_BO_ALLOC_LOCAL_MEM_CPU_VISIBLE |
|
||||
ANV_BO_ALLOC_NO_LOCAL_MEM)) == 0);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
if (bo->alloc_flags & ANV_BO_ALLOC_MAPPED)
|
||||
vk_rmv_log_cpu_map(&device->vk, bo->offset, false);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_bo_destroy(struct anv_device *device, struct anv_bo *bo)
|
||||
{
|
||||
struct vk_rmv_virtual_free_token token = {
|
||||
.address = bo->offset,
|
||||
};
|
||||
|
||||
if (bo->alloc_flags & ANV_BO_ALLOC_MAPPED)
|
||||
vk_rmv_log_cpu_map(&device->vk, bo->offset, true);
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
anv_rmv_log_bo_gtt_unmap_locked(device, bo);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_VIRTUAL_FREE, &token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_buffer_create(struct anv_device *device,
|
||||
bool is_internal,
|
||||
struct anv_buffer *buffer)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_BUFFER,
|
||||
.is_driver_internal = is_internal,
|
||||
.resource_id = resource_id_locked(device, buffer),
|
||||
.buffer = {
|
||||
.create_flags = buffer->vk.create_flags,
|
||||
.size = buffer->vk.size,
|
||||
.usage_flags = buffer->vk.usage,
|
||||
},
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
|
||||
if (buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
|
||||
assert(buffer->sparse_data.size != 0);
|
||||
anv_rmv_log_vma_locked(device,
|
||||
buffer->sparse_data.address,
|
||||
buffer->sparse_data.size,
|
||||
false /* internal */, true /* TODO: vram */,
|
||||
true /* in_invisible_vram */);
|
||||
log_resource_bind_locked(device,
|
||||
resource_id_locked(device, buffer),
|
||||
NULL,
|
||||
buffer->sparse_data.address,
|
||||
buffer->sparse_data.size);
|
||||
}
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_buffer_destroy(struct anv_device *device,
|
||||
struct anv_buffer *buffer)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
if (buffer->vk.create_flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
|
||||
struct vk_rmv_virtual_free_token token = {
|
||||
.address = buffer->sparse_data.address,
|
||||
};
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_VIRTUAL_FREE, &token);
|
||||
}
|
||||
resource_destroy_locked(device, buffer);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_buffer_bind(struct anv_device *device, struct anv_buffer *buffer)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
log_resource_bind_locked(device,
|
||||
resource_id_locked(device, buffer),
|
||||
buffer->address.bo,
|
||||
buffer->address.offset, buffer->vk.size);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_image_create(struct anv_device *device,
|
||||
bool is_internal,
|
||||
struct anv_image *image)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_IMAGE,
|
||||
.resource_id = resource_id_locked(device, image),
|
||||
.is_driver_internal = is_internal,
|
||||
.image = {
|
||||
.create_flags = image->vk.create_flags,
|
||||
.usage_flags = image->vk.usage,
|
||||
.type = image->vk.image_type,
|
||||
.extent = image->vk.extent,
|
||||
.format = image->vk.format,
|
||||
.num_mips = image->vk.mip_levels,
|
||||
.num_slices = image->vk.array_layers,
|
||||
.tiling = image->vk.tiling,
|
||||
.alignment_log2 = util_logbase2(
|
||||
image->bindings[ANV_IMAGE_MEMORY_BINDING_MAIN].memory_range.alignment),
|
||||
.log2_samples = util_logbase2(image->vk.samples),
|
||||
.metadata_alignment_log2 = util_logbase2(
|
||||
image->planes[0].aux_surface.isl.alignment_B),
|
||||
.image_alignment_log2 = util_logbase2(
|
||||
image->planes[0].primary_surface.isl.alignment_B),
|
||||
.size = image->planes[0].primary_surface.memory_range.size,
|
||||
.metadata_size = image->planes[0].aux_surface.memory_range.size,
|
||||
.metadata_header_size = 0,
|
||||
.metadata_offset = image->planes[0].aux_surface.memory_range.offset,
|
||||
.metadata_header_offset = image->planes[0].aux_surface.memory_range.offset,
|
||||
.presentable = (image->planes[0].primary_surface.isl.usage &
|
||||
ISL_SURF_USAGE_DISPLAY_BIT) != 0,
|
||||
},
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &token);
|
||||
if (image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
|
||||
for (uint32_t b = 0; b < ARRAY_SIZE(image->bindings); b++) {
|
||||
if (image->bindings[b].sparse_data.size != 0) {
|
||||
anv_rmv_log_vma_locked(device,
|
||||
image->bindings[b].sparse_data.address,
|
||||
image->bindings[b].sparse_data.size,
|
||||
false /* internal */, true /* TODO: vram */,
|
||||
true /* in_invisible_vram */);
|
||||
log_resource_bind_locked(device,
|
||||
resource_id_locked(device, image),
|
||||
NULL,
|
||||
image->bindings[b].sparse_data.address,
|
||||
image->bindings[b].sparse_data.size);
|
||||
}
|
||||
}
|
||||
}
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_image_destroy(struct anv_device *device,
|
||||
struct anv_image *image)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
if (image->vk.create_flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
|
||||
for (uint32_t b = 0; b < ARRAY_SIZE(image->bindings); b++) {
|
||||
if (image->bindings[b].sparse_data.size != 0) {
|
||||
struct vk_rmv_virtual_free_token token = {
|
||||
.address = image->bindings[b].sparse_data.address,
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_VIRTUAL_FREE, &token);
|
||||
}
|
||||
}
|
||||
}
|
||||
resource_destroy_locked(device, image);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_image_bind(struct anv_device *device,
|
||||
struct anv_image *image,
|
||||
enum anv_image_memory_binding binding)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
log_resource_bind_locked(device,
|
||||
resource_id_locked(device, image),
|
||||
image->bindings[binding].address.bo,
|
||||
image->bindings[binding].address.offset,
|
||||
image->bindings[binding].memory_range.size);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_query_pool_create(struct anv_device *device,
|
||||
struct anv_query_pool *pool,
|
||||
bool is_internal)
|
||||
{
|
||||
if (pool->vk.query_type != VK_QUERY_TYPE_OCCLUSION &&
|
||||
pool->vk.query_type != VK_QUERY_TYPE_PIPELINE_STATISTICS &&
|
||||
pool->vk.query_type != VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT)
|
||||
return;
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token create_token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_QUERY_HEAP,
|
||||
.resource_id = resource_id_locked(device, pool),
|
||||
.is_driver_internal = is_internal,
|
||||
.query_pool = {
|
||||
.type = pool->vk.query_type,
|
||||
.has_cpu_access = true,
|
||||
},
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data,
|
||||
VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
log_resource_bind_locked(device, create_token.resource_id,
|
||||
pool->bo, 0, pool->bo->size);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
bind_cmd_buffer_state_stream_locked(struct anv_device *device,
|
||||
uint64_t resource_id,
|
||||
struct anv_state_stream *stream)
|
||||
{
|
||||
util_dynarray_foreach(&stream->all_blocks, struct anv_state, block)
|
||||
log_state_pool_bind_locked(device, resource_id, stream->state_pool, block);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_cmd_buffer_create(struct anv_device *device,
|
||||
struct anv_cmd_buffer *cmd_buffer)
|
||||
{
|
||||
uint64_t data_size =
|
||||
cmd_buffer->surface_state_stream.total_size +
|
||||
cmd_buffer->dynamic_state_stream.total_size +
|
||||
cmd_buffer->general_state_stream.total_size +
|
||||
cmd_buffer->indirect_push_descriptor_stream.total_size;
|
||||
|
||||
uint64_t executable_size = 0;
|
||||
list_for_each_entry(struct anv_batch_bo, bbo, &cmd_buffer->batch_bos, link)
|
||||
executable_size += bbo->length;
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token create_token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_COMMAND_ALLOCATOR,
|
||||
.resource_id = resource_id_locked(device, cmd_buffer),
|
||||
.is_driver_internal = true,
|
||||
.command_buffer = {
|
||||
.preferred_domain = VK_RMV_KERNEL_MEMORY_DOMAIN_GTT /* TODO */,
|
||||
.executable_size = executable_size,
|
||||
.app_available_executable_size = executable_size,
|
||||
.embedded_data_size = data_size,
|
||||
.app_available_embedded_data_size = data_size,
|
||||
.scratch_size = 0,
|
||||
.app_available_scratch_size = 0,
|
||||
},
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data,
|
||||
VK_RMV_TOKEN_TYPE_RESOURCE_CREATE,
|
||||
&create_token);
|
||||
list_for_each_entry(struct anv_batch_bo, bbo, &cmd_buffer->batch_bos, link) {
|
||||
log_resource_bind_locked(device, create_token.resource_id,
|
||||
bbo->bo, 0, bbo->length);
|
||||
}
|
||||
bind_cmd_buffer_state_stream_locked(device, create_token.resource_id,
|
||||
&cmd_buffer->surface_state_stream);
|
||||
bind_cmd_buffer_state_stream_locked(device, create_token.resource_id,
|
||||
&cmd_buffer->dynamic_state_stream);
|
||||
bind_cmd_buffer_state_stream_locked(device, create_token.resource_id,
|
||||
&cmd_buffer->general_state_stream);
|
||||
bind_cmd_buffer_state_stream_locked(device, create_token.resource_id,
|
||||
&cmd_buffer->indirect_push_descriptor_stream);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_cmd_buffer_destroy(struct anv_device *device,
|
||||
struct anv_cmd_buffer *cmd_buffer)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_destroy_token destroy_token = {
|
||||
.resource_id = resource_id_locked(device, cmd_buffer),
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data,
|
||||
VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY, &destroy_token);
|
||||
resource_destroy_locked(device, cmd_buffer);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_sparse_add_residency(struct anv_device *device,
|
||||
struct anv_bo *src_bo,
|
||||
uint64_t offset)
|
||||
{
|
||||
struct vk_rmv_resource_reference_token token = {
|
||||
.virtual_address = src_bo->offset + offset,
|
||||
.residency_removed = false,
|
||||
};
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data,
|
||||
VK_RMV_TOKEN_TYPE_RESOURCE_REFERENCE, &token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_sparse_remove_residency(struct anv_device *device,
|
||||
struct anv_bo *src_bo,
|
||||
uint64_t offset)
|
||||
{
|
||||
struct vk_rmv_resource_reference_token token = {
|
||||
.virtual_address = src_bo->offset + offset,
|
||||
.residency_removed = true,
|
||||
};
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data,
|
||||
VK_RMV_TOKEN_TYPE_RESOURCE_REFERENCE, &token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_descriptor_pool_create(struct anv_device *device,
|
||||
const VkDescriptorPoolCreateInfo *create_info,
|
||||
struct anv_descriptor_pool *pool,
|
||||
bool is_internal)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token create_token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_DESCRIPTOR_POOL,
|
||||
.resource_id = resource_id_locked(device, pool),
|
||||
.is_driver_internal = false,
|
||||
.descriptor_pool = {
|
||||
.max_sets = create_info->maxSets,
|
||||
.pool_size_count = create_info->poolSizeCount,
|
||||
/* Using vk_rmv_token_pool_alloc frees the allocation automatically
|
||||
* when the trace is done. */
|
||||
.pool_sizes = malloc(create_info->poolSizeCount *
|
||||
sizeof(VkDescriptorPoolSize)),
|
||||
},
|
||||
};
|
||||
|
||||
if (!create_token.descriptor_pool.pool_sizes) {
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(create_token.descriptor_pool.pool_sizes, create_info->pPoolSizes,
|
||||
create_info->poolSizeCount * sizeof(VkDescriptorPoolSize));
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data,
|
||||
VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
if (pool->surfaces.bo) {
|
||||
struct vk_rmv_resource_bind_token bind_token = {
|
||||
.resource_id = create_token.resource_id,
|
||||
.is_system_memory = false,
|
||||
.address = pool->surfaces.bo->offset,
|
||||
.size = pool->surfaces.bo->size,
|
||||
};
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_BIND, &bind_token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
if (pool->samplers.bo) {
|
||||
struct vk_rmv_resource_bind_token bind_token = {
|
||||
.resource_id = create_token.resource_id,
|
||||
.is_system_memory = false,
|
||||
.address = pool->samplers.bo->offset,
|
||||
.size = pool->samplers.bo->size,
|
||||
};
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_BIND, &bind_token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_graphics_pipeline_create(struct anv_device *device,
|
||||
struct anv_graphics_pipeline *pipeline,
|
||||
bool is_internal)
|
||||
{
|
||||
struct vk_rmv_resource_create_token create_token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_PIPELINE,
|
||||
.resource_id = resource_id_locked(device, pipeline),
|
||||
.is_driver_internal = is_internal,
|
||||
.pipeline = {
|
||||
.is_internal = is_internal,
|
||||
.hash_lo = 0,/* TODO pipeline->pipeline_hash; */
|
||||
.shader_stages = pipeline->base.base.active_stages,
|
||||
},
|
||||
};
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->base.shaders); s++) {
|
||||
struct anv_shader_bin *shader = pipeline->base.shaders[s];
|
||||
|
||||
if (!shader)
|
||||
continue;
|
||||
|
||||
log_state_pool_bind_locked(device, create_token.resource_id,
|
||||
&device->instruction_state_pool,
|
||||
&shader->kernel);
|
||||
}
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_compute_pipeline_create(struct anv_device *device,
|
||||
struct anv_compute_pipeline *pipeline,
|
||||
bool is_internal)
|
||||
{
|
||||
VkShaderStageFlagBits active_stages =
|
||||
pipeline->base.type == ANV_PIPELINE_COMPUTE ?
|
||||
VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_RAYGEN_BIT_KHR;
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token create_token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_PIPELINE,
|
||||
.resource_id = resource_id_locked(device, pipeline),
|
||||
.is_driver_internal = is_internal,
|
||||
.pipeline = {
|
||||
.is_internal = is_internal,
|
||||
.hash_lo = 0,/* TODO pipeline->pipeline_hash; */
|
||||
.shader_stages = active_stages,
|
||||
},
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
struct anv_shader_bin *shader = pipeline->cs;
|
||||
log_state_pool_bind_locked(device, create_token.resource_id,
|
||||
&device->instruction_state_pool,
|
||||
&shader->kernel);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_rt_pipeline_create(struct anv_device *device,
|
||||
struct anv_ray_tracing_pipeline *pipeline,
|
||||
bool is_internal)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
struct vk_rmv_resource_create_token create_token = {
|
||||
.resource_id = resource_id_locked(device, pipeline),
|
||||
.type = VK_RMV_RESOURCE_TYPE_PIPELINE,
|
||||
.is_driver_internal = is_internal,
|
||||
.pipeline = {
|
||||
.is_internal = is_internal,
|
||||
.hash_lo = 0, /* TODO */
|
||||
.shader_stages = pipeline->base.active_stages,
|
||||
},
|
||||
};
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
|
||||
struct anv_state_pool *state_pool = &device->instruction_state_pool;
|
||||
for (uint32_t i = 0; i < pipeline->group_count; i++) {
|
||||
struct anv_rt_shader_group *group = &pipeline->groups[i];
|
||||
|
||||
if (group->imported)
|
||||
continue;
|
||||
|
||||
if (group->general) {
|
||||
log_state_pool_bind_locked(device, create_token.resource_id, state_pool,
|
||||
&group->general->kernel);
|
||||
}
|
||||
if (group->closest_hit) {
|
||||
log_state_pool_bind_locked(device, create_token.resource_id, state_pool,
|
||||
&group->closest_hit->kernel);
|
||||
}
|
||||
if (group->any_hit) {
|
||||
log_state_pool_bind_locked(device, create_token.resource_id, state_pool,
|
||||
&group->any_hit->kernel);
|
||||
}
|
||||
if (group->intersection) {
|
||||
log_state_pool_bind_locked(device, create_token.resource_id, state_pool,
|
||||
&group->intersection->kernel);
|
||||
}
|
||||
}
|
||||
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_event_create(struct anv_device *device,
|
||||
struct anv_event *event,
|
||||
VkEventCreateFlags flags,
|
||||
bool is_internal)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_create_token create_token = {
|
||||
.type = VK_RMV_RESOURCE_TYPE_GPU_EVENT,
|
||||
.resource_id = resource_id_locked(device, event),
|
||||
.is_driver_internal = is_internal,
|
||||
.event = {
|
||||
.flags = flags,
|
||||
},
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_CREATE, &create_token);
|
||||
log_state_pool_bind_locked(device, create_token.resource_id,
|
||||
&device->dynamic_state_pool,
|
||||
&event->state);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
anv_rmv_log_resource_destroy(struct anv_device *device, const void *obj)
|
||||
{
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_resource_destroy_token token = {
|
||||
.resource_id = resource_id_locked(device, obj),
|
||||
};
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_RESOURCE_DESTROY, &token);
|
||||
resource_destroy_locked(device, obj);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
}
|
118
src/intel/vulkan/anv_rmv.h
Normal file
118
src/intel/vulkan/anv_rmv.h
Normal file
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright © 2024 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef ANV_RMV_H
|
||||
#define ANV_RMV_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "vulkan/vulkan_core.h"
|
||||
|
||||
struct anv_device;
|
||||
struct anv_device_memory;
|
||||
struct anv_physical_device;
|
||||
struct anv_descriptor_pool;
|
||||
struct anv_buffer;
|
||||
struct anv_image;
|
||||
struct anv_bo;
|
||||
struct anv_event;
|
||||
struct anv_graphics_pipeline;
|
||||
struct anv_compute_pipeline;
|
||||
struct anv_ray_tracing_pipeline;
|
||||
|
||||
enum anv_image_memory_binding;
|
||||
|
||||
#define ANV_RMV(func, device, ...) do { \
|
||||
if (unlikely((device)->vk.memory_trace_data.is_enabled)) \
|
||||
anv_rmv_log_##func(device, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
void anv_memory_trace_init(struct anv_device *device);
|
||||
void anv_rmv_fill_device_info(const struct anv_physical_device *device,
|
||||
struct vk_rmv_device_info *info);
|
||||
void anv_memory_trace_finish(struct anv_device *device);
|
||||
|
||||
void anv_rmv_log_heap_create(struct anv_device *device,
|
||||
struct anv_device_memory *memory,
|
||||
bool is_internal,
|
||||
VkMemoryAllocateFlags alloc_flags);
|
||||
void anv_rmv_log_bo_gtt_map(struct anv_device *device,
|
||||
struct anv_bo *bo);
|
||||
void anv_rmv_log_bo_gtt_unmap(struct anv_device *device,
|
||||
struct anv_bo *bo);
|
||||
void anv_rmv_log_bos_gtt_map(struct anv_device *device,
|
||||
struct anv_bo **bos,
|
||||
uint32_t bo_count);
|
||||
void anv_rmv_log_vm_binds(struct anv_device *device,
|
||||
struct anv_vm_bind *binds,
|
||||
uint32_t bind_count);
|
||||
void anv_rmv_log_bo_allocate(struct anv_device *device,
|
||||
struct anv_bo *bo);
|
||||
void anv_rmv_log_bo_destroy(struct anv_device *device, struct anv_bo *bo);
|
||||
void anv_rmv_log_buffer_create(struct anv_device *device,
|
||||
bool is_internal,
|
||||
struct anv_buffer *buffer);
|
||||
void anv_rmv_log_buffer_destroy(struct anv_device *device,
|
||||
struct anv_buffer *buffer);
|
||||
void anv_rmv_log_buffer_bind(struct anv_device *device, struct anv_buffer *buffer);
|
||||
void anv_rmv_log_image_create(struct anv_device *device,
|
||||
bool is_internal,
|
||||
struct anv_image *image);
|
||||
void anv_rmv_log_image_destroy(struct anv_device *device,
|
||||
struct anv_image *image);
|
||||
void anv_rmv_log_image_bind(struct anv_device *device,
|
||||
struct anv_image *image,
|
||||
enum anv_image_memory_binding binding);
|
||||
void anv_rmv_log_query_pool_create(struct anv_device *device,
|
||||
struct anv_query_pool *pool,
|
||||
bool is_internal);
|
||||
void anv_rmv_log_cmd_buffer_create(struct anv_device *device,
|
||||
struct anv_cmd_buffer *cmd_buffer);
|
||||
void anv_rmv_log_cmd_buffer_destroy(struct anv_device *device,
|
||||
struct anv_cmd_buffer *cmd_buffer);
|
||||
void anv_rmv_log_sparse_add_residency(struct anv_device *device,
|
||||
struct anv_bo *src_bo,
|
||||
uint64_t offset);
|
||||
void anv_rmv_log_sparse_remove_residency(struct anv_device *device,
|
||||
struct anv_bo *src_bo,
|
||||
uint64_t offset);
|
||||
void anv_rmv_log_descriptor_pool_create(struct anv_device *device,
|
||||
const VkDescriptorPoolCreateInfo *create_info,
|
||||
struct anv_descriptor_pool *pool,
|
||||
bool is_internal);
|
||||
void anv_rmv_log_graphics_pipeline_create(struct anv_device *device,
|
||||
struct anv_graphics_pipeline *pipeline,
|
||||
bool is_internal);
|
||||
void anv_rmv_log_compute_pipeline_create(struct anv_device *device,
|
||||
struct anv_compute_pipeline *pipeline,
|
||||
bool is_internal);
|
||||
void anv_rmv_log_rt_pipeline_create(struct anv_device *device,
|
||||
struct anv_ray_tracing_pipeline *pipeline,
|
||||
bool is_internal);
|
||||
void anv_rmv_log_event_create(struct anv_device *device,
|
||||
struct anv_event *event,
|
||||
VkEventCreateFlags flags, bool is_internal);
|
||||
void anv_rmv_log_resource_destroy(struct anv_device *device, const void *obj);
|
||||
|
||||
#endif /* ANV_RMV_H */
|
@@ -594,6 +594,9 @@ anv_sparse_bind_trtt(struct anv_device *device,
|
||||
if (trtt_submit.l3l2_binds_len || trtt_submit.l1_binds_len)
|
||||
result = anv_genX(device->info, write_trtt_entries)(&trtt_submit);
|
||||
|
||||
if (result == VK_SUCCESS)
|
||||
ANV_RMV(vm_binds, device, sparse_submit->binds, sparse_submit->binds_len);
|
||||
|
||||
out:
|
||||
pthread_mutex_unlock(&trtt->mutex);
|
||||
STACK_ARRAY_FINISH(l1_binds);
|
||||
|
@@ -2815,6 +2815,8 @@ genX(EndCommandBuffer)(
|
||||
status = end_command_buffer(cmd_buffer->companion_rcs_cmd_buffer);
|
||||
}
|
||||
|
||||
ANV_RMV(cmd_buffer_create, cmd_buffer->device, cmd_buffer);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@@ -258,6 +258,8 @@ VkResult genX(CreateQueryPool)(
|
||||
}
|
||||
}
|
||||
|
||||
ANV_RMV(query_pool_create, device, pool, false);
|
||||
|
||||
*pQueryPool = anv_query_pool_to_handle(pool);
|
||||
|
||||
return VK_SUCCESS;
|
||||
@@ -279,6 +281,8 @@ void genX(DestroyQueryPool)(
|
||||
if (!pool)
|
||||
return;
|
||||
|
||||
ANV_RMV(resource_destroy, device, pool);
|
||||
|
||||
anv_device_release_bo(device, pool->bo);
|
||||
vk_object_free(&device->vk, pAllocator, pool);
|
||||
}
|
||||
|
@@ -643,6 +643,8 @@ anv_queue_exec_utrace_locked(struct anv_queue *queue,
|
||||
if (result != VK_SUCCESS)
|
||||
goto error;
|
||||
|
||||
ANV_RMV(bos_gtt_map, device, execbuf.bos, execbuf.bo_count);
|
||||
|
||||
int ret = queue->device->info->no_hw ? 0 :
|
||||
anv_gem_execbuffer(queue->device, &execbuf.execbuf);
|
||||
if (ret)
|
||||
@@ -746,6 +748,8 @@ i915_companion_rcs_queue_exec_locked(struct anv_queue *queue,
|
||||
|
||||
setup_execbuf_fence_params(&execbuf);
|
||||
|
||||
ANV_RMV(bos_gtt_map, device, execbuf.bos, execbuf.bo_count);
|
||||
|
||||
int ret = queue->device->info->no_hw ? 0 :
|
||||
anv_gem_execbuffer(queue->device, &execbuf.execbuf);
|
||||
if (ret) {
|
||||
@@ -895,6 +899,8 @@ i915_queue_exec_locked(struct anv_queue *queue,
|
||||
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
|
||||
}
|
||||
|
||||
ANV_RMV(bos_gtt_map, device, execbuf.bos, execbuf.bo_count);
|
||||
|
||||
int ret = queue->device->info->no_hw ? 0 :
|
||||
anv_gem_execbuffer(queue->device, &execbuf.execbuf);
|
||||
if (ret) {
|
||||
@@ -958,6 +964,8 @@ i915_execute_simple_batch(struct anv_queue *queue, struct anv_bo *batch_bo,
|
||||
.rsvd2 = 0,
|
||||
};
|
||||
|
||||
ANV_RMV(bos_gtt_map, device, execbuf.bos, execbuf.bo_count);
|
||||
|
||||
if (anv_gem_execbuffer(device, &execbuf.execbuf)) {
|
||||
result = vk_device_set_lost(&device->vk, "anv_gem_execbuffer failed: %m");
|
||||
goto fail;
|
||||
@@ -1051,6 +1059,8 @@ i915_execute_trtt_batch(struct anv_sparse_submission *submit,
|
||||
};
|
||||
setup_execbuf_fence_params(&execbuf);
|
||||
|
||||
ANV_RMV(bos_gtt_map, device, execbuf.bos, execbuf.bo_count);
|
||||
|
||||
int ret = queue->device->info->no_hw ? 0 :
|
||||
anv_gem_execbuffer(device, &execbuf.execbuf);
|
||||
if (ret) {
|
||||
|
136
src/intel/vulkan/layers/anv_rmv_layer.c
Normal file
136
src/intel/vulkan/layers/anv_rmv_layer.c
Normal file
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright © 2023 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "rmv/vk_rmv_common.h"
|
||||
#include "rmv/vk_rmv_tokens.h"
|
||||
#include "anv_private.h"
|
||||
#include "vk_common_entrypoints.h"
|
||||
#include "wsi_common_entrypoints.h"
|
||||
|
||||
VkResult anv_rmv_QueuePresentKHR(
|
||||
VkQueue _queue,
|
||||
const VkPresentInfoKHR* pPresentInfo)
|
||||
{
|
||||
ANV_FROM_HANDLE(anv_queue, queue, _queue);
|
||||
struct anv_device *device = queue->device;
|
||||
|
||||
VkResult res = anv_QueuePresentKHR(_queue, pPresentInfo);
|
||||
if ((res != VK_SUCCESS && res != VK_SUBOPTIMAL_KHR) ||
|
||||
!device->vk.memory_trace_data.is_enabled)
|
||||
return res;
|
||||
|
||||
vk_rmv_log_misc_token(&device->vk, VK_RMV_MISC_EVENT_TYPE_PRESENT);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult anv_rmv_FlushMappedMemoryRanges(
|
||||
VkDevice _device,
|
||||
uint32_t memoryRangeCount,
|
||||
const VkMappedMemoryRange* pMemoryRanges)
|
||||
{
|
||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||
|
||||
VkResult res = anv_FlushMappedMemoryRanges(_device, memoryRangeCount, pMemoryRanges);
|
||||
if (res != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)
|
||||
return res;
|
||||
|
||||
vk_rmv_log_misc_token(&device->vk, VK_RMV_MISC_EVENT_TYPE_FLUSH_MAPPED_RANGE);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult anv_rmv_InvalidateMappedMemoryRanges(
|
||||
VkDevice _device,
|
||||
uint32_t memoryRangeCount,
|
||||
const VkMappedMemoryRange* pMemoryRanges)
|
||||
{
|
||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||
|
||||
VkResult res = anv_InvalidateMappedMemoryRanges(_device, memoryRangeCount, pMemoryRanges);
|
||||
if (res != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)
|
||||
return res;
|
||||
|
||||
vk_rmv_log_misc_token(&device->vk, VK_RMV_MISC_EVENT_TYPE_INVALIDATE_RANGES);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
VkResult anv_rmv_DebugMarkerSetObjectNameEXT(
|
||||
VkDevice device,
|
||||
const VkDebugMarkerObjectNameInfoEXT* pNameInfo)
|
||||
{
|
||||
assert(pNameInfo->sType == VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT);
|
||||
VkDebugUtilsObjectNameInfoEXT name_info;
|
||||
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
|
||||
name_info.objectType = pNameInfo->objectType;
|
||||
name_info.objectHandle = pNameInfo->object;
|
||||
name_info.pObjectName = pNameInfo->pObjectName;
|
||||
return anv_rmv_SetDebugUtilsObjectNameEXT(device, &name_info);
|
||||
}
|
||||
|
||||
VkResult anv_rmv_SetDebugUtilsObjectNameEXT(
|
||||
VkDevice _device,
|
||||
const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
|
||||
{
|
||||
assert(pNameInfo->sType == VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT);
|
||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||
|
||||
VkResult result = vk_common_SetDebugUtilsObjectNameEXT(_device, pNameInfo);
|
||||
if (result != VK_SUCCESS || !device->vk.memory_trace_data.is_enabled)
|
||||
return result;
|
||||
|
||||
switch (pNameInfo->objectType) {
|
||||
/* only name object types we care about */
|
||||
case VK_OBJECT_TYPE_BUFFER:
|
||||
case VK_OBJECT_TYPE_DEVICE_MEMORY:
|
||||
case VK_OBJECT_TYPE_IMAGE:
|
||||
case VK_OBJECT_TYPE_EVENT:
|
||||
case VK_OBJECT_TYPE_QUERY_POOL:
|
||||
case VK_OBJECT_TYPE_DESCRIPTOR_POOL:
|
||||
case VK_OBJECT_TYPE_PIPELINE:
|
||||
break;
|
||||
default:
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
size_t name_len = strlen(pNameInfo->pObjectName);
|
||||
char *name_buf = malloc(name_len + 1);
|
||||
if (!name_buf) {
|
||||
/*
|
||||
* Silently fail, so that applications may still continue if possible.
|
||||
*/
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
strcpy(name_buf, pNameInfo->pObjectName);
|
||||
|
||||
simple_mtx_lock(&device->vk.memory_trace_data.token_mtx);
|
||||
struct vk_rmv_userdata_token token;
|
||||
token.name = name_buf;
|
||||
token.resource_id = vk_rmv_get_resource_id_locked(&device->vk, pNameInfo->objectHandle);
|
||||
|
||||
vk_rmv_emit_token(&device->vk.memory_trace_data, VK_RMV_TOKEN_TYPE_USERDATA, &token);
|
||||
simple_mtx_unlock(&device->vk.memory_trace_data.token_mtx);
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
@@ -45,6 +45,7 @@ anv_entrypoints = custom_target(
|
||||
'--device-prefix', 'anv_doom64',
|
||||
'--device-prefix', 'anv_hitman3',
|
||||
'--device-prefix', 'anv_android',
|
||||
'--device-prefix', 'anv_rmv',
|
||||
],
|
||||
depend_files : vk_entrypoints_gen_depend_files,
|
||||
)
|
||||
@@ -148,6 +149,7 @@ libanv_files = files(
|
||||
'layers/anv_android_layer.c',
|
||||
'layers/anv_doom64.c',
|
||||
'layers/anv_hitman3.c',
|
||||
'layers/anv_rmv_layer.c',
|
||||
'xe/anv_batch_chain.c',
|
||||
'xe/anv_batch_chain.h',
|
||||
'xe/anv_kmd_backend.c',
|
||||
@@ -187,6 +189,8 @@ libanv_files = files(
|
||||
'anv_pipeline_cache.c',
|
||||
'anv_private.h',
|
||||
'anv_queue.c',
|
||||
'anv_rmv.c',
|
||||
'anv_rmv.h',
|
||||
'anv_sparse.c',
|
||||
'anv_util.c',
|
||||
'anv_utrace.c',
|
||||
|
@@ -192,6 +192,8 @@ xe_vm_bind_op(struct anv_device *device,
|
||||
if (ret)
|
||||
goto out_destroy_syncobj;
|
||||
|
||||
ANV_RMV(vm_binds, device, submit->binds, submit->binds_len);
|
||||
|
||||
syncobj_wait.handles = (uintptr_t)&xe_sync.handle;
|
||||
ret = intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &syncobj_wait);
|
||||
|
||||
|
Reference in New Issue
Block a user