anv: Add an anv_physical_device field to anv_device
Having to always pull the physical device from the instance has been annoying for almost as long as the driver has existed. It also won't work in a world where we ever have more than one physical device. This commit adds a new field called "physical" to anv_device and switches every location where we use device->instance->physicalDevice to use the new field instead. Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3461>
This commit is contained in:

committed by
Marge Bot

parent
735a3ba007
commit
70e8064e13
@@ -369,7 +369,7 @@ anv_block_pool_init(struct anv_block_pool *pool,
|
|||||||
VkResult result;
|
VkResult result;
|
||||||
|
|
||||||
pool->device = device;
|
pool->device = device;
|
||||||
pool->use_softpin = device->instance->physicalDevice.use_softpin;
|
pool->use_softpin = device->physical->use_softpin;
|
||||||
pool->nbos = 0;
|
pool->nbos = 0;
|
||||||
pool->size = 0;
|
pool->size = 0;
|
||||||
pool->center_bo_offset = 0;
|
pool->center_bo_offset = 0;
|
||||||
@@ -1376,11 +1376,9 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
|
|||||||
if (bo != NULL)
|
if (bo != NULL)
|
||||||
return bo;
|
return bo;
|
||||||
|
|
||||||
const struct anv_physical_device *physical_device =
|
const struct gen_device_info *devinfo = &device->info;
|
||||||
&device->instance->physicalDevice;
|
|
||||||
const struct gen_device_info *devinfo = &physical_device->info;
|
|
||||||
|
|
||||||
const unsigned subslices = MAX2(physical_device->subslice_total, 1);
|
const unsigned subslices = MAX2(device->physical->subslice_total, 1);
|
||||||
|
|
||||||
unsigned scratch_ids_per_subslice;
|
unsigned scratch_ids_per_subslice;
|
||||||
if (devinfo->gen >= 11) {
|
if (devinfo->gen >= 11) {
|
||||||
@@ -1499,7 +1497,7 @@ static uint32_t
|
|||||||
anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
|
anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
|
||||||
enum anv_bo_alloc_flags alloc_flags)
|
enum anv_bo_alloc_flags alloc_flags)
|
||||||
{
|
{
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
struct anv_physical_device *pdevice = device->physical;
|
||||||
|
|
||||||
uint64_t bo_flags = 0;
|
uint64_t bo_flags = 0;
|
||||||
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
|
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
|
||||||
|
@@ -192,7 +192,6 @@ anv_GetAndroidHardwareBufferPropertiesANDROID(
|
|||||||
VkAndroidHardwareBufferPropertiesANDROID *pProperties)
|
VkAndroidHardwareBufferPropertiesANDROID *pProperties)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, dev, device_h);
|
ANV_FROM_HANDLE(anv_device, dev, device_h);
|
||||||
struct anv_physical_device *pdevice = &dev->instance->physicalDevice;
|
|
||||||
|
|
||||||
VkAndroidHardwareBufferFormatPropertiesANDROID *format_prop =
|
VkAndroidHardwareBufferFormatPropertiesANDROID *format_prop =
|
||||||
vk_find_struct(pProperties->pNext,
|
vk_find_struct(pProperties->pNext,
|
||||||
@@ -214,7 +213,7 @@ anv_GetAndroidHardwareBufferPropertiesANDROID(
|
|||||||
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
||||||
|
|
||||||
/* All memory types. */
|
/* All memory types. */
|
||||||
uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
|
uint32_t memory_types = (1ull << dev->physical->memory.type_count) - 1;
|
||||||
|
|
||||||
pProperties->allocationSize = lseek(dma_buf, 0, SEEK_END);
|
pProperties->allocationSize = lseek(dma_buf, 0, SEEK_END);
|
||||||
pProperties->memoryTypeBits = memory_types;
|
pProperties->memoryTypeBits = memory_types;
|
||||||
@@ -550,8 +549,7 @@ format_supported_with_usage(VkDevice device_h, VkFormat format,
|
|||||||
VkImageUsageFlags imageUsage)
|
VkImageUsageFlags imageUsage)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, device_h);
|
ANV_FROM_HANDLE(anv_device, device, device_h);
|
||||||
struct anv_physical_device *phys_dev = &device->instance->physicalDevice;
|
VkPhysicalDevice phys_dev_h = anv_physical_device_to_handle(device->physical);
|
||||||
VkPhysicalDevice phys_dev_h = anv_physical_device_to_handle(phys_dev);
|
|
||||||
VkResult result;
|
VkResult result;
|
||||||
|
|
||||||
const VkPhysicalDeviceImageFormatInfo2 image_format_info = {
|
const VkPhysicalDeviceImageFormatInfo2 image_format_info = {
|
||||||
@@ -679,9 +677,6 @@ VkResult anv_GetSwapchainGrallocUsageANDROID(
|
|||||||
VkImageUsageFlags imageUsage,
|
VkImageUsageFlags imageUsage,
|
||||||
int* grallocUsage)
|
int* grallocUsage)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, device_h);
|
|
||||||
struct anv_physical_device *phys_dev = &device->instance->physicalDevice;
|
|
||||||
VkPhysicalDevice phys_dev_h = anv_physical_device_to_handle(phys_dev);
|
|
||||||
VkResult result;
|
VkResult result;
|
||||||
|
|
||||||
*grallocUsage = 0;
|
*grallocUsage = 0;
|
||||||
|
@@ -465,7 +465,7 @@ anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
|
|||||||
assert(((*bb_start >> 29) & 0x07) == 0);
|
assert(((*bb_start >> 29) & 0x07) == 0);
|
||||||
assert(((*bb_start >> 23) & 0x3f) == 49);
|
assert(((*bb_start >> 23) & 0x3f) == 49);
|
||||||
|
|
||||||
if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
|
if (cmd_buffer->device->physical->use_softpin) {
|
||||||
assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
|
assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
|
||||||
assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
|
assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
|
||||||
|
|
||||||
@@ -722,7 +722,7 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
|
|||||||
cmd_buffer->bt_next.map += bt_size;
|
cmd_buffer->bt_next.map += bt_size;
|
||||||
cmd_buffer->bt_next.alloc_size -= bt_size;
|
cmd_buffer->bt_next.alloc_size -= bt_size;
|
||||||
|
|
||||||
if (device->instance->physicalDevice.use_softpin) {
|
if (device->physical->use_softpin) {
|
||||||
assert(bt_block->offset >= 0);
|
assert(bt_block->offset >= 0);
|
||||||
*state_offset = device->surface_state_pool.block_pool.start_address -
|
*state_offset = device->surface_state_pool.block_pool.start_address -
|
||||||
device->binding_table_pool.block_pool.start_address - bt_block->offset;
|
device->binding_table_pool.block_pool.start_address - bt_block->offset;
|
||||||
@@ -1383,7 +1383,7 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
|
|||||||
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
|
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
|
||||||
cmd_buffer->last_ss_pool_center);
|
cmd_buffer->last_ss_pool_center);
|
||||||
VkResult result;
|
VkResult result;
|
||||||
if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
|
if (cmd_buffer->device->physical->use_softpin) {
|
||||||
anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
|
anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
|
||||||
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
|
result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
|
||||||
bo, NULL, 0);
|
bo, NULL, 0);
|
||||||
@@ -1486,7 +1486,7 @@ setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* If we are pinning our BOs, we shouldn't have to relocate anything */
|
/* If we are pinning our BOs, we shouldn't have to relocate anything */
|
||||||
if (cmd_buffer->device->instance->physicalDevice.use_softpin)
|
if (cmd_buffer->device->physical->use_softpin)
|
||||||
assert(!execbuf->has_relocs);
|
assert(!execbuf->has_relocs);
|
||||||
|
|
||||||
/* Now we go through and fixup all of the relocation lists to point to
|
/* Now we go through and fixup all of the relocation lists to point to
|
||||||
@@ -1676,7 +1676,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (submit->fence_count > 0) {
|
if (submit->fence_count > 0) {
|
||||||
assert(device->instance->physicalDevice.has_syncobj);
|
assert(device->physical->has_syncobj);
|
||||||
execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
|
execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
|
||||||
execbuf.execbuf.num_cliprects = submit->fence_count;
|
execbuf.execbuf.num_cliprects = submit->fence_count;
|
||||||
execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
|
execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
|
||||||
|
@@ -94,7 +94,7 @@ void
|
|||||||
anv_device_init_blorp(struct anv_device *device)
|
anv_device_init_blorp(struct anv_device *device)
|
||||||
{
|
{
|
||||||
blorp_init(&device->blorp, device, &device->isl_dev);
|
blorp_init(&device->blorp, device, &device->isl_dev);
|
||||||
device->blorp.compiler = device->instance->physicalDevice.compiler;
|
device->blorp.compiler = device->physical->compiler;
|
||||||
device->blorp.lookup_shader = lookup_blorp_shader;
|
device->blorp.lookup_shader = lookup_blorp_shader;
|
||||||
device->blorp.upload_shader = upload_blorp_shader;
|
device->blorp.upload_shader = upload_blorp_shader;
|
||||||
switch (device->info.gen) {
|
switch (device->info.gen) {
|
||||||
|
@@ -245,8 +245,7 @@ void anv_GetDescriptorSetLayoutSupport(
|
|||||||
VkDescriptorSetLayoutSupport* pSupport)
|
VkDescriptorSetLayoutSupport* pSupport)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
const struct anv_physical_device *pdevice =
|
const struct anv_physical_device *pdevice = device->physical;
|
||||||
&device->instance->physicalDevice;
|
|
||||||
|
|
||||||
uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
|
uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
|
||||||
bool needs_descriptor_buffer = false;
|
bool needs_descriptor_buffer = false;
|
||||||
@@ -427,7 +426,7 @@ VkResult anv_CreateDescriptorSetLayout(
|
|||||||
}
|
}
|
||||||
|
|
||||||
set_layout->binding[b].data =
|
set_layout->binding[b].data =
|
||||||
anv_descriptor_data_for_type(&device->instance->physicalDevice,
|
anv_descriptor_data_for_type(device->physical,
|
||||||
binding->descriptorType);
|
binding->descriptorType);
|
||||||
set_layout->binding[b].array_size = binding->descriptorCount;
|
set_layout->binding[b].array_size = binding->descriptorCount;
|
||||||
set_layout->binding[b].descriptor_index = set_layout->size;
|
set_layout->binding[b].descriptor_index = set_layout->size;
|
||||||
@@ -683,7 +682,7 @@ VkResult anv_CreateDescriptorPool(
|
|||||||
uint32_t descriptor_bo_size = 0;
|
uint32_t descriptor_bo_size = 0;
|
||||||
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
|
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
|
||||||
enum anv_descriptor_data desc_data =
|
enum anv_descriptor_data desc_data =
|
||||||
anv_descriptor_data_for_type(&device->instance->physicalDevice,
|
anv_descriptor_data_for_type(device->physical,
|
||||||
pCreateInfo->pPoolSizes[i].type);
|
pCreateInfo->pPoolSizes[i].type);
|
||||||
|
|
||||||
if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW)
|
if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW)
|
||||||
|
@@ -2588,8 +2588,8 @@ gen_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
struct anv_device *device = (struct anv_device*)driver_ctx;
|
struct anv_device *device = (struct anv_device*)driver_ctx;
|
||||||
assert(device->instance->physicalDevice.supports_48bit_addresses &&
|
assert(device->physical->supports_48bit_addresses &&
|
||||||
device->instance->physicalDevice.use_softpin);
|
device->physical->use_softpin);
|
||||||
|
|
||||||
struct anv_state_pool *pool = &device->dynamic_state_pool;
|
struct anv_state_pool *pool = &device->dynamic_state_pool;
|
||||||
buf->state = anv_state_pool_alloc(pool, size, size);
|
buf->state = anv_state_pool_alloc(pool, size, size);
|
||||||
@@ -2698,6 +2698,7 @@ VkResult anv_CreateDevice(
|
|||||||
|
|
||||||
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
|
||||||
device->instance = physical_device->instance;
|
device->instance = physical_device->instance;
|
||||||
|
device->physical = physical_device;
|
||||||
device->chipset_id = physical_device->chipset_id;
|
device->chipset_id = physical_device->chipset_id;
|
||||||
device->no_hw = physical_device->no_hw;
|
device->no_hw = physical_device->no_hw;
|
||||||
device->_lost = false;
|
device->_lost = false;
|
||||||
@@ -2947,13 +2948,10 @@ void anv_DestroyDevice(
|
|||||||
const VkAllocationCallbacks* pAllocator)
|
const VkAllocationCallbacks* pAllocator)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct anv_physical_device *physical_device;
|
|
||||||
|
|
||||||
if (!device)
|
if (!device)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
physical_device = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
anv_device_finish_blorp(device);
|
anv_device_finish_blorp(device);
|
||||||
|
|
||||||
anv_pipeline_cache_finish(&device->default_pipeline_cache);
|
anv_pipeline_cache_finish(&device->default_pipeline_cache);
|
||||||
@@ -2980,7 +2978,7 @@ void anv_DestroyDevice(
|
|||||||
device->aux_map_ctx = NULL;
|
device->aux_map_ctx = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (physical_device->use_softpin)
|
if (device->physical->use_softpin)
|
||||||
anv_state_pool_finish(&device->binding_table_pool);
|
anv_state_pool_finish(&device->binding_table_pool);
|
||||||
anv_state_pool_finish(&device->surface_state_pool);
|
anv_state_pool_finish(&device->surface_state_pool);
|
||||||
anv_state_pool_finish(&device->instruction_state_pool);
|
anv_state_pool_finish(&device->instruction_state_pool);
|
||||||
@@ -2990,7 +2988,7 @@ void anv_DestroyDevice(
|
|||||||
|
|
||||||
anv_bo_cache_finish(&device->bo_cache);
|
anv_bo_cache_finish(&device->bo_cache);
|
||||||
|
|
||||||
if (physical_device->use_softpin) {
|
if (device->physical->use_softpin) {
|
||||||
util_vma_heap_finish(&device->vma_hi);
|
util_vma_heap_finish(&device->vma_hi);
|
||||||
util_vma_heap_finish(&device->vma_cva);
|
util_vma_heap_finish(&device->vma_cva);
|
||||||
util_vma_heap_finish(&device->vma_lo);
|
util_vma_heap_finish(&device->vma_lo);
|
||||||
@@ -3197,8 +3195,7 @@ bool
|
|||||||
anv_vma_alloc(struct anv_device *device, struct anv_bo *bo,
|
anv_vma_alloc(struct anv_device *device, struct anv_bo *bo,
|
||||||
uint64_t client_address)
|
uint64_t client_address)
|
||||||
{
|
{
|
||||||
const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
const struct gen_device_info *devinfo = &device->info;
|
||||||
const struct gen_device_info *devinfo = &pdevice->info;
|
|
||||||
/* Gen12 CCS surface addresses need to be 64K aligned. We have no way of
|
/* Gen12 CCS surface addresses need to be 64K aligned. We have no way of
|
||||||
* telling what this allocation is for so pick the largest alignment.
|
* telling what this allocation is for so pick the largest alignment.
|
||||||
*/
|
*/
|
||||||
@@ -3292,7 +3289,7 @@ VkResult anv_AllocateMemory(
|
|||||||
VkDeviceMemory* pMem)
|
VkDeviceMemory* pMem)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
struct anv_physical_device *pdevice = device->physical;
|
||||||
struct anv_device_memory *mem;
|
struct anv_device_memory *mem;
|
||||||
VkResult result = VK_SUCCESS;
|
VkResult result = VK_SUCCESS;
|
||||||
|
|
||||||
@@ -3566,13 +3563,12 @@ VkResult anv_GetMemoryFdPropertiesKHR(
|
|||||||
VkMemoryFdPropertiesKHR* pMemoryFdProperties)
|
VkMemoryFdPropertiesKHR* pMemoryFdProperties)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
switch (handleType) {
|
switch (handleType) {
|
||||||
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
|
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
|
||||||
/* dma-buf can be imported as any memory type */
|
/* dma-buf can be imported as any memory type */
|
||||||
pMemoryFdProperties->memoryTypeBits =
|
pMemoryFdProperties->memoryTypeBits =
|
||||||
(1 << pdevice->memory.type_count) - 1;
|
(1 << device->physical->memory.type_count) - 1;
|
||||||
return VK_SUCCESS;
|
return VK_SUCCESS;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@@ -3599,15 +3595,13 @@ VkResult anv_GetMemoryHostPointerPropertiesEXT(
|
|||||||
VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT);
|
VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT);
|
||||||
|
|
||||||
switch (handleType) {
|
switch (handleType) {
|
||||||
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
|
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT:
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
/* Host memory can be imported as any memory type. */
|
/* Host memory can be imported as any memory type. */
|
||||||
pMemoryHostPointerProperties->memoryTypeBits =
|
pMemoryHostPointerProperties->memoryTypeBits =
|
||||||
(1ull << pdevice->memory.type_count) - 1;
|
(1ull << device->physical->memory.type_count) - 1;
|
||||||
|
|
||||||
return VK_SUCCESS;
|
return VK_SUCCESS;
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
|
||||||
}
|
}
|
||||||
@@ -3620,7 +3614,6 @@ void anv_FreeMemory(
|
|||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
|
ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
if (mem == NULL)
|
if (mem == NULL)
|
||||||
return;
|
return;
|
||||||
@@ -3632,7 +3625,7 @@ void anv_FreeMemory(
|
|||||||
if (mem->map)
|
if (mem->map)
|
||||||
anv_UnmapMemory(_device, _mem);
|
anv_UnmapMemory(_device, _mem);
|
||||||
|
|
||||||
p_atomic_add(&pdevice->memory.heaps[mem->type->heapIndex].used,
|
p_atomic_add(&device->physical->memory.heaps[mem->type->heapIndex].used,
|
||||||
-mem->bo->size);
|
-mem->bo->size);
|
||||||
|
|
||||||
anv_device_release_bo(device, mem->bo);
|
anv_device_release_bo(device, mem->bo);
|
||||||
@@ -3785,7 +3778,6 @@ void anv_GetBufferMemoryRequirements(
|
|||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
/* The Vulkan spec (git aaed022) says:
|
/* The Vulkan spec (git aaed022) says:
|
||||||
*
|
*
|
||||||
@@ -3794,7 +3786,7 @@ void anv_GetBufferMemoryRequirements(
|
|||||||
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
|
* only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
|
||||||
* structure for the physical device is supported.
|
* structure for the physical device is supported.
|
||||||
*/
|
*/
|
||||||
uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
|
uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
|
||||||
|
|
||||||
/* Base alignment requirement of a cache line */
|
/* Base alignment requirement of a cache line */
|
||||||
uint32_t alignment = 16;
|
uint32_t alignment = 16;
|
||||||
@@ -3850,7 +3842,6 @@ void anv_GetImageMemoryRequirements(
|
|||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_image, image, _image);
|
ANV_FROM_HANDLE(anv_image, image, _image);
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
/* The Vulkan spec (git aaed022) says:
|
/* The Vulkan spec (git aaed022) says:
|
||||||
*
|
*
|
||||||
@@ -3861,7 +3852,7 @@ void anv_GetImageMemoryRequirements(
|
|||||||
*
|
*
|
||||||
* All types are currently supported for images.
|
* All types are currently supported for images.
|
||||||
*/
|
*/
|
||||||
uint32_t memory_types = (1ull << pdevice->memory.type_count) - 1;
|
uint32_t memory_types = (1ull << device->physical->memory.type_count) - 1;
|
||||||
|
|
||||||
/* We must have image allocated or imported at this point. According to the
|
/* We must have image allocated or imported at this point. According to the
|
||||||
* specification, external images must have been bound to memory before
|
* specification, external images must have been bound to memory before
|
||||||
@@ -3888,7 +3879,6 @@ void anv_GetImageMemoryRequirements2(
|
|||||||
vk_foreach_struct_const(ext, pInfo->pNext) {
|
vk_foreach_struct_const(ext, pInfo->pNext) {
|
||||||
switch (ext->sType) {
|
switch (ext->sType) {
|
||||||
case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
|
case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
const VkImagePlaneMemoryRequirementsInfo *plane_reqs =
|
const VkImagePlaneMemoryRequirementsInfo *plane_reqs =
|
||||||
(const VkImagePlaneMemoryRequirementsInfo *) ext;
|
(const VkImagePlaneMemoryRequirementsInfo *) ext;
|
||||||
uint32_t plane = anv_image_aspect_to_plane(image->aspects,
|
uint32_t plane = anv_image_aspect_to_plane(image->aspects,
|
||||||
@@ -3907,7 +3897,7 @@ void anv_GetImageMemoryRequirements2(
|
|||||||
* All types are currently supported for images.
|
* All types are currently supported for images.
|
||||||
*/
|
*/
|
||||||
pMemoryRequirements->memoryRequirements.memoryTypeBits =
|
pMemoryRequirements->memoryRequirements.memoryTypeBits =
|
||||||
(1ull << pdevice->memory.type_count) - 1;
|
(1ull << device->physical->memory.type_count) - 1;
|
||||||
|
|
||||||
/* We must have image allocated or imported at this point. According to the
|
/* We must have image allocated or imported at this point. According to the
|
||||||
* specification, external images must have been bound to memory before
|
* specification, external images must have been bound to memory before
|
||||||
@@ -4148,7 +4138,6 @@ VkResult anv_CreateBuffer(
|
|||||||
VkBuffer* pBuffer)
|
VkBuffer* pBuffer)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
struct anv_buffer *buffer;
|
struct anv_buffer *buffer;
|
||||||
|
|
||||||
/* Don't allow creating buffers bigger than our address space. The real
|
/* Don't allow creating buffers bigger than our address space. The real
|
||||||
@@ -4156,7 +4145,7 @@ VkResult anv_CreateBuffer(
|
|||||||
* doing so to cause roll-over. However, no one has any business
|
* doing so to cause roll-over. However, no one has any business
|
||||||
* allocating a buffer larger than our GTT size.
|
* allocating a buffer larger than our GTT size.
|
||||||
*/
|
*/
|
||||||
if (pCreateInfo->size > pdevice->gtt_size)
|
if (pCreateInfo->size > device->physical->gtt_size)
|
||||||
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||||
|
|
||||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
|
||||||
|
@@ -594,7 +594,7 @@ anv_image_create(VkDevice _device,
|
|||||||
const VkImageDrmFormatModifierListCreateInfoEXT *mod_info =
|
const VkImageDrmFormatModifierListCreateInfoEXT *mod_info =
|
||||||
vk_find_struct_const(pCreateInfo->pNext,
|
vk_find_struct_const(pCreateInfo->pNext,
|
||||||
IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
|
IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
|
||||||
isl_mod_info = choose_drm_format_mod(&device->instance->physicalDevice,
|
isl_mod_info = choose_drm_format_mod(device->physical,
|
||||||
mod_info->drmFormatModifierCount,
|
mod_info->drmFormatModifierCount,
|
||||||
mod_info->pDrmFormatModifiers);
|
mod_info->pDrmFormatModifiers);
|
||||||
assert(isl_mod_info);
|
assert(isl_mod_info);
|
||||||
|
@@ -98,9 +98,8 @@ VkResult anv_InitializePerformanceApiINTEL(
|
|||||||
const VkInitializePerformanceApiInfoINTEL* pInitializeInfo)
|
const VkInitializePerformanceApiInfoINTEL* pInitializeInfo)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
if (!pdevice->perf)
|
if (!device->physical->perf)
|
||||||
return VK_ERROR_EXTENSION_NOT_PRESENT;
|
return VK_ERROR_EXTENSION_NOT_PRESENT;
|
||||||
|
|
||||||
/* Not much to do here */
|
/* Not much to do here */
|
||||||
@@ -113,9 +112,8 @@ VkResult anv_GetPerformanceParameterINTEL(
|
|||||||
VkPerformanceValueINTEL* pValue)
|
VkPerformanceValueINTEL* pValue)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
if (!pdevice->perf)
|
if (!device->physical->perf)
|
||||||
return VK_ERROR_EXTENSION_NOT_PRESENT;
|
return VK_ERROR_EXTENSION_NOT_PRESENT;
|
||||||
|
|
||||||
VkResult result = VK_SUCCESS;
|
VkResult result = VK_SUCCESS;
|
||||||
@@ -155,15 +153,14 @@ VkResult anv_AcquirePerformanceConfigurationINTEL(
|
|||||||
VkPerformanceConfigurationINTEL* pConfiguration)
|
VkPerformanceConfigurationINTEL* pConfiguration)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
struct gen_perf_registers *perf_config =
|
struct gen_perf_registers *perf_config =
|
||||||
gen_perf_load_configuration(pdevice->perf, device->fd,
|
gen_perf_load_configuration(device->physical->perf, device->fd,
|
||||||
GEN_PERF_QUERY_GUID_MDAPI);
|
GEN_PERF_QUERY_GUID_MDAPI);
|
||||||
if (!perf_config)
|
if (!perf_config)
|
||||||
return VK_INCOMPLETE;
|
return VK_INCOMPLETE;
|
||||||
|
|
||||||
int ret = gen_perf_store_configuration(pdevice->perf, device->fd,
|
int ret = gen_perf_store_configuration(device->physical->perf, device->fd,
|
||||||
perf_config, NULL /* guid */);
|
perf_config, NULL /* guid */);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ralloc_free(perf_config);
|
ralloc_free(perf_config);
|
||||||
|
@@ -134,8 +134,7 @@ anv_shader_compile_to_nir(struct anv_device *device,
|
|||||||
gl_shader_stage stage,
|
gl_shader_stage stage,
|
||||||
const VkSpecializationInfo *spec_info)
|
const VkSpecializationInfo *spec_info)
|
||||||
{
|
{
|
||||||
const struct anv_physical_device *pdevice =
|
const struct anv_physical_device *pdevice = device->physical;
|
||||||
&device->instance->physicalDevice;
|
|
||||||
const struct brw_compiler *compiler = pdevice->compiler;
|
const struct brw_compiler *compiler = pdevice->compiler;
|
||||||
const nir_shader_compiler_options *nir_options =
|
const nir_shader_compiler_options *nir_options =
|
||||||
compiler->glsl_compiler_options[stage].NirOptions;
|
compiler->glsl_compiler_options[stage].NirOptions;
|
||||||
@@ -624,7 +623,7 @@ anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
|
|||||||
struct anv_pipeline_stage *stage)
|
struct anv_pipeline_stage *stage)
|
||||||
{
|
{
|
||||||
const struct brw_compiler *compiler =
|
const struct brw_compiler *compiler =
|
||||||
pipeline->device->instance->physicalDevice.compiler;
|
pipeline->device->physical->compiler;
|
||||||
const nir_shader_compiler_options *nir_options =
|
const nir_shader_compiler_options *nir_options =
|
||||||
compiler->glsl_compiler_options[stage->stage].NirOptions;
|
compiler->glsl_compiler_options[stage->stage].NirOptions;
|
||||||
nir_shader *nir;
|
nir_shader *nir;
|
||||||
@@ -658,8 +657,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
|
|||||||
struct anv_pipeline_stage *stage,
|
struct anv_pipeline_stage *stage,
|
||||||
struct anv_pipeline_layout *layout)
|
struct anv_pipeline_layout *layout)
|
||||||
{
|
{
|
||||||
const struct anv_physical_device *pdevice =
|
const struct anv_physical_device *pdevice = pipeline->device->physical;
|
||||||
&pipeline->device->instance->physicalDevice;
|
|
||||||
const struct brw_compiler *compiler = pdevice->compiler;
|
const struct brw_compiler *compiler = pdevice->compiler;
|
||||||
|
|
||||||
struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
|
struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
|
||||||
@@ -1114,8 +1112,7 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
|
|||||||
};
|
};
|
||||||
int64_t pipeline_start = os_time_get_nano();
|
int64_t pipeline_start = os_time_get_nano();
|
||||||
|
|
||||||
const struct brw_compiler *compiler =
|
const struct brw_compiler *compiler = pipeline->device->physical->compiler;
|
||||||
pipeline->device->instance->physicalDevice.compiler;
|
|
||||||
struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
|
struct anv_pipeline_stage stages[MESA_SHADER_STAGES] = {};
|
||||||
|
|
||||||
pipeline->active_stages = 0;
|
pipeline->active_stages = 0;
|
||||||
@@ -1466,8 +1463,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
|
|||||||
};
|
};
|
||||||
int64_t pipeline_start = os_time_get_nano();
|
int64_t pipeline_start = os_time_get_nano();
|
||||||
|
|
||||||
const struct brw_compiler *compiler =
|
const struct brw_compiler *compiler = pipeline->device->physical->compiler;
|
||||||
pipeline->device->instance->physicalDevice.compiler;
|
|
||||||
|
|
||||||
struct anv_pipeline_stage stage = {
|
struct anv_pipeline_stage stage = {
|
||||||
.stage = MESA_SHADER_COMPUTE,
|
.stage = MESA_SHADER_COMPUTE,
|
||||||
|
@@ -465,7 +465,7 @@ anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
|
|||||||
const void *data, size_t size)
|
const void *data, size_t size)
|
||||||
{
|
{
|
||||||
struct anv_device *device = cache->device;
|
struct anv_device *device = cache->device;
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
struct anv_physical_device *pdevice = device->physical;
|
||||||
|
|
||||||
if (cache->cache == NULL)
|
if (cache->cache == NULL)
|
||||||
return;
|
return;
|
||||||
@@ -554,7 +554,6 @@ VkResult anv_GetPipelineCacheData(
|
|||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
|
ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
struct blob blob;
|
struct blob blob;
|
||||||
if (pData) {
|
if (pData) {
|
||||||
@@ -569,7 +568,7 @@ VkResult anv_GetPipelineCacheData(
|
|||||||
.vendor_id = 0x8086,
|
.vendor_id = 0x8086,
|
||||||
.device_id = device->chipset_id,
|
.device_id = device->chipset_id,
|
||||||
};
|
};
|
||||||
memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
|
memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
|
||||||
blob_write_bytes(&blob, &header, sizeof(header));
|
blob_write_bytes(&blob, &header, sizeof(header));
|
||||||
|
|
||||||
uint32_t count = 0;
|
uint32_t count = 0;
|
||||||
@@ -656,7 +655,7 @@ anv_device_search_for_kernel(struct anv_device *device,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ENABLE_SHADER_CACHE
|
#ifdef ENABLE_SHADER_CACHE
|
||||||
struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
|
struct disk_cache *disk_cache = device->physical->disk_cache;
|
||||||
if (disk_cache && device->instance->pipeline_cache_enabled) {
|
if (disk_cache && device->instance->pipeline_cache_enabled) {
|
||||||
cache_key cache_key;
|
cache_key cache_key;
|
||||||
disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
|
disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
|
||||||
@@ -717,7 +716,7 @@ anv_device_upload_kernel(struct anv_device *device,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
#ifdef ENABLE_SHADER_CACHE
|
#ifdef ENABLE_SHADER_CACHE
|
||||||
struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
|
struct disk_cache *disk_cache = device->physical->disk_cache;
|
||||||
if (disk_cache) {
|
if (disk_cache) {
|
||||||
struct blob binary;
|
struct blob binary;
|
||||||
blob_init(&binary);
|
blob_init(&binary);
|
||||||
|
@@ -1209,6 +1209,7 @@ struct anv_device {
|
|||||||
VkAllocationCallbacks alloc;
|
VkAllocationCallbacks alloc;
|
||||||
|
|
||||||
struct anv_instance * instance;
|
struct anv_instance * instance;
|
||||||
|
struct anv_physical_device * physical;
|
||||||
uint32_t chipset_id;
|
uint32_t chipset_id;
|
||||||
bool no_hw;
|
bool no_hw;
|
||||||
struct gen_device_info info;
|
struct gen_device_info info;
|
||||||
@@ -1272,7 +1273,7 @@ struct anv_device {
|
|||||||
static inline struct anv_state_pool *
|
static inline struct anv_state_pool *
|
||||||
anv_binding_table_pool(struct anv_device *device)
|
anv_binding_table_pool(struct anv_device *device)
|
||||||
{
|
{
|
||||||
if (device->instance->physicalDevice.use_softpin)
|
if (device->physical->use_softpin)
|
||||||
return &device->binding_table_pool;
|
return &device->binding_table_pool;
|
||||||
else
|
else
|
||||||
return &device->surface_state_pool;
|
return &device->surface_state_pool;
|
||||||
@@ -1280,7 +1281,7 @@ anv_binding_table_pool(struct anv_device *device)
|
|||||||
|
|
||||||
static inline struct anv_state
|
static inline struct anv_state
|
||||||
anv_binding_table_pool_alloc(struct anv_device *device) {
|
anv_binding_table_pool_alloc(struct anv_device *device) {
|
||||||
if (device->instance->physicalDevice.use_softpin)
|
if (device->physical->use_softpin)
|
||||||
return anv_state_pool_alloc(&device->binding_table_pool,
|
return anv_state_pool_alloc(&device->binding_table_pool,
|
||||||
device->binding_table_pool.block_size, 0);
|
device->binding_table_pool.block_size, 0);
|
||||||
else
|
else
|
||||||
|
@@ -569,7 +569,7 @@ anv_queue_submit_simple_batch(struct anv_queue *queue,
|
|||||||
if (!submit)
|
if (!submit)
|
||||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||||
|
|
||||||
bool has_syncobj_wait = device->instance->physicalDevice.has_syncobj_wait;
|
bool has_syncobj_wait = device->physical->has_syncobj_wait;
|
||||||
VkResult result;
|
VkResult result;
|
||||||
uint32_t syncobj;
|
uint32_t syncobj;
|
||||||
struct anv_bo *batch_bo, *sync_bo;
|
struct anv_bo *batch_bo, *sync_bo;
|
||||||
@@ -720,7 +720,7 @@ anv_queue_submit(struct anv_queue *queue,
|
|||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_fence, fence, _fence);
|
ANV_FROM_HANDLE(anv_fence, fence, _fence);
|
||||||
struct anv_device *device = queue->device;
|
struct anv_device *device = queue->device;
|
||||||
UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
UNUSED struct anv_physical_device *pdevice = device->physical;
|
||||||
struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
|
struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
|
||||||
if (!submit)
|
if (!submit)
|
||||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||||
@@ -1099,7 +1099,7 @@ VkResult anv_CreateFence(
|
|||||||
if (fence == NULL)
|
if (fence == NULL)
|
||||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||||
|
|
||||||
if (device->instance->physicalDevice.has_syncobj_wait) {
|
if (device->physical->has_syncobj_wait) {
|
||||||
fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
|
fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
|
||||||
|
|
||||||
uint32_t create_flags = 0;
|
uint32_t create_flags = 0;
|
||||||
@@ -1728,7 +1728,7 @@ binary_semaphore_create(struct anv_device *device,
|
|||||||
struct anv_semaphore_impl *impl,
|
struct anv_semaphore_impl *impl,
|
||||||
bool exportable)
|
bool exportable)
|
||||||
{
|
{
|
||||||
if (device->instance->physicalDevice.has_syncobj) {
|
if (device->physical->has_syncobj) {
|
||||||
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
|
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
|
||||||
impl->syncobj = anv_gem_syncobj_create(device, 0);
|
impl->syncobj = anv_gem_syncobj_create(device, 0);
|
||||||
if (!impl->syncobj)
|
if (!impl->syncobj)
|
||||||
@@ -1807,7 +1807,7 @@ VkResult anv_CreateSemaphore(
|
|||||||
} else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
|
} else if (handleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
|
||||||
assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
|
assert(handleTypes == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
|
||||||
assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
|
assert(sem_type == VK_SEMAPHORE_TYPE_BINARY_KHR);
|
||||||
if (device->instance->physicalDevice.has_syncobj) {
|
if (device->physical->has_syncobj) {
|
||||||
semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
|
semaphore->permanent.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
|
||||||
semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
|
semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
|
||||||
if (!semaphore->permanent.syncobj) {
|
if (!semaphore->permanent.syncobj) {
|
||||||
@@ -1970,7 +1970,7 @@ VkResult anv_ImportSemaphoreFdKHR(
|
|||||||
|
|
||||||
switch (pImportSemaphoreFdInfo->handleType) {
|
switch (pImportSemaphoreFdInfo->handleType) {
|
||||||
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
|
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
|
||||||
if (device->instance->physicalDevice.has_syncobj) {
|
if (device->physical->has_syncobj) {
|
||||||
new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
|
new_impl.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
|
||||||
|
|
||||||
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
|
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
|
||||||
@@ -2011,7 +2011,7 @@ VkResult anv_ImportSemaphoreFdKHR(
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
|
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
|
||||||
if (device->instance->physicalDevice.has_syncobj) {
|
if (device->physical->has_syncobj) {
|
||||||
new_impl = (struct anv_semaphore_impl) {
|
new_impl = (struct anv_semaphore_impl) {
|
||||||
.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
|
.type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
|
||||||
.syncobj = anv_gem_syncobj_create(device, 0),
|
.syncobj = anv_gem_syncobj_create(device, 0),
|
||||||
|
@@ -213,7 +213,7 @@ VkResult anv_CreateSwapchainKHR(
|
|||||||
VkSwapchainKHR* pSwapchain)
|
VkSwapchainKHR* pSwapchain)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct wsi_device *wsi_device = &device->instance->physicalDevice.wsi_device;
|
struct wsi_device *wsi_device = &device->physical->wsi_device;
|
||||||
const VkAllocationCallbacks *alloc;
|
const VkAllocationCallbacks *alloc;
|
||||||
|
|
||||||
if (pAllocator)
|
if (pAllocator)
|
||||||
@@ -278,10 +278,9 @@ VkResult anv_AcquireNextImage2KHR(
|
|||||||
uint32_t* pImageIndex)
|
uint32_t* pImageIndex)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
|
||||||
|
|
||||||
return wsi_common_acquire_next_image2(&pdevice->wsi_device, _device,
|
return wsi_common_acquire_next_image2(&device->physical->wsi_device,
|
||||||
pAcquireInfo, pImageIndex);
|
_device, pAcquireInfo, pImageIndex);
|
||||||
}
|
}
|
||||||
|
|
||||||
VkResult anv_QueuePresentKHR(
|
VkResult anv_QueuePresentKHR(
|
||||||
@@ -289,10 +288,8 @@ VkResult anv_QueuePresentKHR(
|
|||||||
const VkPresentInfoKHR* pPresentInfo)
|
const VkPresentInfoKHR* pPresentInfo)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_queue, queue, _queue);
|
ANV_FROM_HANDLE(anv_queue, queue, _queue);
|
||||||
struct anv_physical_device *pdevice =
|
|
||||||
&queue->device->instance->physicalDevice;
|
|
||||||
|
|
||||||
return wsi_common_queue_present(&pdevice->wsi_device,
|
return wsi_common_queue_present(&queue->device->physical->wsi_device,
|
||||||
anv_device_to_handle(queue->device),
|
anv_device_to_handle(queue->device),
|
||||||
_queue, 0,
|
_queue, 0,
|
||||||
pPresentInfo);
|
pPresentInfo);
|
||||||
|
@@ -241,7 +241,7 @@ anv_DisplayPowerControlEXT(VkDevice _device,
|
|||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
|
|
||||||
return wsi_display_power_control(
|
return wsi_display_power_control(
|
||||||
_device, &device->instance->physicalDevice.wsi_device,
|
_device, &device->physical->wsi_device,
|
||||||
display, display_power_info);
|
display, display_power_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -263,7 +263,7 @@ anv_RegisterDeviceEventEXT(VkDevice _device,
|
|||||||
fence->permanent.type = ANV_FENCE_TYPE_WSI;
|
fence->permanent.type = ANV_FENCE_TYPE_WSI;
|
||||||
|
|
||||||
ret = wsi_register_device_event(_device,
|
ret = wsi_register_device_event(_device,
|
||||||
&device->instance->physicalDevice.wsi_device,
|
&device->physical->wsi_device,
|
||||||
device_event_info,
|
device_event_info,
|
||||||
allocator,
|
allocator,
|
||||||
&fence->permanent.fence_wsi);
|
&fence->permanent.fence_wsi);
|
||||||
@@ -293,7 +293,7 @@ anv_RegisterDisplayEventEXT(VkDevice _device,
|
|||||||
fence->permanent.type = ANV_FENCE_TYPE_WSI;
|
fence->permanent.type = ANV_FENCE_TYPE_WSI;
|
||||||
|
|
||||||
ret = wsi_register_display_event(
|
ret = wsi_register_display_event(
|
||||||
_device, &device->instance->physicalDevice.wsi_device,
|
_device, &device->physical->wsi_device,
|
||||||
display, display_event_info, allocator, &(fence->permanent.fence_wsi));
|
display, display_event_info, allocator, &(fence->permanent.fence_wsi));
|
||||||
|
|
||||||
if (ret == VK_SUCCESS)
|
if (ret == VK_SUCCESS)
|
||||||
@@ -312,6 +312,6 @@ anv_GetSwapchainCounterEXT(VkDevice _device,
|
|||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
|
|
||||||
return wsi_get_swapchain_counter(
|
return wsi_get_swapchain_counter(
|
||||||
_device, &device->instance->physicalDevice.wsi_device,
|
_device, &device->physical->wsi_device,
|
||||||
swapchain, flag_bits, value);
|
swapchain, flag_bits, value);
|
||||||
}
|
}
|
||||||
|
@@ -159,7 +159,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
|
|||||||
sba.InstructionAccessUpperBoundModifyEnable = true;
|
sba.InstructionAccessUpperBoundModifyEnable = true;
|
||||||
# endif
|
# endif
|
||||||
# if (GEN_GEN >= 9)
|
# if (GEN_GEN >= 9)
|
||||||
if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
|
if (cmd_buffer->device->physical->use_softpin) {
|
||||||
sba.BindlessSurfaceStateBaseAddress = (struct anv_address) {
|
sba.BindlessSurfaceStateBaseAddress = (struct anv_address) {
|
||||||
.bo = device->surface_state_pool.block_pool.bo,
|
.bo = device->surface_state_pool.block_pool.bo,
|
||||||
.offset = 0,
|
.offset = 0,
|
||||||
@@ -1820,7 +1820,7 @@ genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
|
|||||||
emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
|
emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
|
||||||
|
|
||||||
#if GEN_IS_HASWELL
|
#if GEN_IS_HASWELL
|
||||||
if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) {
|
if (cmd_buffer->device->physical->cmd_parser_version >= 4) {
|
||||||
/* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
|
/* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
|
||||||
* them disabled to avoid crashing the system hard.
|
* them disabled to avoid crashing the system hard.
|
||||||
*/
|
*/
|
||||||
@@ -1845,7 +1845,7 @@ genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
|
|||||||
{
|
{
|
||||||
enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
|
enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
|
||||||
|
|
||||||
if (cmd_buffer->device->instance->physicalDevice.always_flush_cache)
|
if (cmd_buffer->device->physical->always_flush_cache)
|
||||||
bits |= ANV_PIPE_FLUSH_BITS | ANV_PIPE_INVALIDATE_BITS;
|
bits |= ANV_PIPE_FLUSH_BITS | ANV_PIPE_INVALIDATE_BITS;
|
||||||
|
|
||||||
/* Flushes are pipelined while invalidations are handled immediately.
|
/* Flushes are pipelined while invalidations are handled immediately.
|
||||||
@@ -2209,7 +2209,7 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
|
|||||||
* softpin then we always keep all user-allocated memory objects resident.
|
* softpin then we always keep all user-allocated memory objects resident.
|
||||||
*/
|
*/
|
||||||
const bool need_client_mem_relocs =
|
const bool need_client_mem_relocs =
|
||||||
!cmd_buffer->device->instance->physicalDevice.use_softpin;
|
!cmd_buffer->device->physical->use_softpin;
|
||||||
|
|
||||||
for (uint32_t s = 0; s < map->surface_count; s++) {
|
for (uint32_t s = 0; s < map->surface_count; s++) {
|
||||||
struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
|
struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
|
||||||
@@ -3837,7 +3837,7 @@ verify_cmd_parser(const struct anv_device *device,
|
|||||||
int required_version,
|
int required_version,
|
||||||
const char *function)
|
const char *function)
|
||||||
{
|
{
|
||||||
if (device->instance->physicalDevice.cmd_parser_version < required_version) {
|
if (device->physical->cmd_parser_version < required_version) {
|
||||||
return vk_errorf(device->instance, device->instance,
|
return vk_errorf(device->instance, device->instance,
|
||||||
VK_ERROR_FEATURE_NOT_PRESENT,
|
VK_ERROR_FEATURE_NOT_PRESENT,
|
||||||
"cmd parser version %d is required for %s",
|
"cmd parser version %d is required for %s",
|
||||||
@@ -4081,7 +4081,7 @@ genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
|
|||||||
* really know why.
|
* really know why.
|
||||||
*/
|
*/
|
||||||
const uint32_t subslices =
|
const uint32_t subslices =
|
||||||
MAX2(cmd_buffer->device->instance->physicalDevice.subslice_total, 1);
|
MAX2(cmd_buffer->device->physical->subslice_total, 1);
|
||||||
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_VFE_STATE), vfe) {
|
anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_VFE_STATE), vfe) {
|
||||||
vfe.MaximumNumberofThreads =
|
vfe.MaximumNumberofThreads =
|
||||||
devinfo->max_cs_threads * subslices - 1;
|
devinfo->max_cs_threads * subslices - 1;
|
||||||
@@ -4242,7 +4242,7 @@ genX(cmd_buffer_set_binding_for_gen8_vb_flush)(struct anv_cmd_buffer *cmd_buffer
|
|||||||
uint32_t vb_size)
|
uint32_t vb_size)
|
||||||
{
|
{
|
||||||
if (GEN_GEN < 8 || GEN_GEN > 9 ||
|
if (GEN_GEN < 8 || GEN_GEN > 9 ||
|
||||||
!cmd_buffer->device->instance->physicalDevice.use_softpin)
|
!cmd_buffer->device->physical->use_softpin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
struct anv_vb_cache_range *bound, *dirty;
|
struct anv_vb_cache_range *bound, *dirty;
|
||||||
@@ -4290,7 +4290,7 @@ genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)(struct anv_cmd_buffer *cmd_b
|
|||||||
uint64_t vb_used)
|
uint64_t vb_used)
|
||||||
{
|
{
|
||||||
if (GEN_GEN < 8 || GEN_GEN > 9 ||
|
if (GEN_GEN < 8 || GEN_GEN > 9 ||
|
||||||
!cmd_buffer->device->instance->physicalDevice.use_softpin)
|
!cmd_buffer->device->physical->use_softpin)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (access_type == RANDOM) {
|
if (access_type == RANDOM) {
|
||||||
|
@@ -2192,9 +2192,7 @@ compute_pipeline_create(
|
|||||||
VkPipeline* pPipeline)
|
VkPipeline* pPipeline)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
const struct anv_physical_device *physical_device =
|
const struct gen_device_info *devinfo = &device->info;
|
||||||
&device->instance->physicalDevice;
|
|
||||||
const struct gen_device_info *devinfo = &physical_device->info;
|
|
||||||
struct anv_pipeline *pipeline;
|
struct anv_pipeline *pipeline;
|
||||||
VkResult result;
|
VkResult result;
|
||||||
|
|
||||||
@@ -2267,7 +2265,7 @@ compute_pipeline_create(
|
|||||||
ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
|
ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
|
||||||
cs_prog_data->push.cross_thread.regs, 2);
|
cs_prog_data->push.cross_thread.regs, 2);
|
||||||
|
|
||||||
const uint32_t subslices = MAX2(physical_device->subslice_total, 1);
|
const uint32_t subslices = MAX2(device->physical->subslice_total, 1);
|
||||||
|
|
||||||
const struct anv_shader_bin *cs_bin =
|
const struct anv_shader_bin *cs_bin =
|
||||||
pipeline->shaders[MESA_SHADER_COMPUTE];
|
pipeline->shaders[MESA_SHADER_COMPUTE];
|
||||||
|
@@ -49,7 +49,7 @@ VkResult genX(CreateQueryPool)(
|
|||||||
VkQueryPool* pQueryPool)
|
VkQueryPool* pQueryPool)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
|
const struct anv_physical_device *pdevice = device->physical;
|
||||||
struct anv_query_pool *pool;
|
struct anv_query_pool *pool;
|
||||||
VkResult result;
|
VkResult result;
|
||||||
|
|
||||||
|
@@ -304,8 +304,7 @@ genX(init_device_state)(struct anv_device *device)
|
|||||||
*
|
*
|
||||||
* This is only safe on kernels with context isolation support.
|
* This is only safe on kernels with context isolation support.
|
||||||
*/
|
*/
|
||||||
if (GEN_GEN >= 8 &&
|
if (GEN_GEN >= 8 && device->physical->has_context_isolation) {
|
||||||
device->instance->physicalDevice.has_context_isolation) {
|
|
||||||
UNUSED uint32_t tmp_reg;
|
UNUSED uint32_t tmp_reg;
|
||||||
#if GEN_GEN >= 9
|
#if GEN_GEN >= 9
|
||||||
anv_pack_struct(&tmp_reg, GENX(CS_DEBUG_MODE2),
|
anv_pack_struct(&tmp_reg, GENX(CS_DEBUG_MODE2),
|
||||||
@@ -402,8 +401,6 @@ VkResult genX(CreateSampler)(
|
|||||||
VkSampler* pSampler)
|
VkSampler* pSampler)
|
||||||
{
|
{
|
||||||
ANV_FROM_HANDLE(anv_device, device, _device);
|
ANV_FROM_HANDLE(anv_device, device, _device);
|
||||||
const struct anv_physical_device *pdevice =
|
|
||||||
&device->instance->physicalDevice;
|
|
||||||
struct anv_sampler *sampler;
|
struct anv_sampler *sampler;
|
||||||
|
|
||||||
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
|
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
|
||||||
@@ -461,7 +458,7 @@ VkResult genX(CreateSampler)(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pdevice->has_bindless_samplers) {
|
if (device->physical->has_bindless_samplers) {
|
||||||
/* If we have bindless, allocate enough samplers. We allocate 32 bytes
|
/* If we have bindless, allocate enough samplers. We allocate 32 bytes
|
||||||
* for each sampler instead of 16 bytes because we want all bindless
|
* for each sampler instead of 16 bytes because we want all bindless
|
||||||
* samplers to be 32-byte aligned so we don't have to use indirect
|
* samplers to be 32-byte aligned so we don't have to use indirect
|
||||||
|
@@ -27,13 +27,11 @@
|
|||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
struct anv_instance instance = {
|
struct anv_physical_device physical_device = {
|
||||||
.physicalDevice = {
|
.use_softpin = true,
|
||||||
.use_softpin = true,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
struct anv_device device = {
|
struct anv_device device = {
|
||||||
.instance = &instance,
|
.physical = &physical_device,
|
||||||
};
|
};
|
||||||
struct anv_block_pool pool;
|
struct anv_block_pool pool;
|
||||||
|
|
||||||
|
@@ -111,9 +111,9 @@ static void validate_monotonic(int32_t **blocks)
|
|||||||
|
|
||||||
static void run_test()
|
static void run_test()
|
||||||
{
|
{
|
||||||
struct anv_instance instance = { };
|
struct anv_physical_device physical_device = { };
|
||||||
struct anv_device device = {
|
struct anv_device device = {
|
||||||
.instance = &instance,
|
.physical = &physical_device,
|
||||||
};
|
};
|
||||||
struct anv_block_pool pool;
|
struct anv_block_pool pool;
|
||||||
|
|
||||||
|
@@ -36,9 +36,9 @@
|
|||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
struct anv_instance instance = { };
|
struct anv_physical_device physical_device = { };
|
||||||
struct anv_device device = {
|
struct anv_device device = {
|
||||||
.instance = &instance,
|
.physical = &physical_device,
|
||||||
};
|
};
|
||||||
struct anv_state_pool state_pool;
|
struct anv_state_pool state_pool;
|
||||||
|
|
||||||
|
@@ -35,9 +35,9 @@
|
|||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
struct anv_instance instance = { };
|
struct anv_physical_device physical_device = { };
|
||||||
struct anv_device device = {
|
struct anv_device device = {
|
||||||
.instance = &instance,
|
.physical = &physical_device,
|
||||||
};
|
};
|
||||||
struct anv_state_pool state_pool;
|
struct anv_state_pool state_pool;
|
||||||
|
|
||||||
|
@@ -56,9 +56,9 @@ static void *alloc_states(void *_job)
|
|||||||
|
|
||||||
static void run_test()
|
static void run_test()
|
||||||
{
|
{
|
||||||
struct anv_instance instance = { };
|
struct anv_physical_device physical_device = { };
|
||||||
struct anv_device device = {
|
struct anv_device device = {
|
||||||
.instance = &instance,
|
.physical = &physical_device,
|
||||||
};
|
};
|
||||||
struct anv_state_pool state_pool;
|
struct anv_state_pool state_pool;
|
||||||
|
|
||||||
|
@@ -27,13 +27,11 @@
|
|||||||
|
|
||||||
int main(int argc, char **argv)
|
int main(int argc, char **argv)
|
||||||
{
|
{
|
||||||
struct anv_instance instance = {
|
struct anv_physical_device physical_device = {
|
||||||
.physicalDevice = {
|
.use_softpin = true,
|
||||||
.use_softpin = true,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
struct anv_device device = {
|
struct anv_device device = {
|
||||||
.instance = &instance,
|
.physical = &physical_device,
|
||||||
};
|
};
|
||||||
struct anv_state_pool state_pool;
|
struct anv_state_pool state_pool;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user