hasvk: Support builiding on non-Intel
Should help Eric build test releases on their MacBook :-)
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Cc: mesa-stable
Reviewed-by: Eric Engestrom <eric@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25882>
(cherry picked from commit c8192c1c93
)
This commit is contained in:

committed by
Eric Engestrom

parent
4a30434a0c
commit
eb6e3a2a89
@@ -1204,7 +1204,7 @@
|
||||
"description": "hasvk: Support builiding on non-Intel",
|
||||
"nominated": true,
|
||||
"nomination_type": 0,
|
||||
"resolution": 0,
|
||||
"resolution": 1,
|
||||
"main_sha": null,
|
||||
"because_sha": null,
|
||||
"notes": null
|
||||
|
@@ -1459,7 +1459,6 @@ elif with_intel_vk or with_intel_hasvk
|
||||
error('Intel "Anvil" Vulkan driver requires the dl_iterate_phdr function')
|
||||
endif
|
||||
|
||||
# only used in Iris and ANV
|
||||
if with_any_intel and ['x86', 'x86_64'].contains(host_machine.cpu_family())
|
||||
pre_args += '-DSUPPORT_INTEL_INTEGRATED_GPUS'
|
||||
endif
|
||||
@@ -1468,9 +1467,6 @@ if get_option('intel-xe-kmd').enabled()
|
||||
pre_args += '-DINTEL_XE_KMD_SUPPORTED'
|
||||
endif
|
||||
|
||||
if with_intel_hasvk and host_machine.cpu_family().startswith('x86') == false
|
||||
error('Intel "hasvk" Vulkan driver requires x86 or x86_64 CPU family')
|
||||
endif
|
||||
|
||||
if with_gallium_crocus and host_machine.cpu_family().startswith('x86') == false
|
||||
error('Intel "crocus" Gallium driver requires x86 or x86_64 CPU family')
|
||||
|
@@ -1899,6 +1899,7 @@ setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
|
||||
anv_cmd_buffer_process_relocs(cmd_buffers[0], &cmd_buffers[0]->surface_relocs);
|
||||
}
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
if (device->physical->memory.need_flush) {
|
||||
__builtin_ia32_mfence();
|
||||
for (uint32_t i = 0; i < num_cmd_buffers; i++) {
|
||||
@@ -1908,6 +1909,7 @@ setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
|
||||
}
|
||||
__builtin_ia32_mfence();
|
||||
}
|
||||
#endif
|
||||
|
||||
struct anv_batch *batch = &cmd_buffers[0]->batch;
|
||||
execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
|
||||
@@ -1986,8 +1988,10 @@ setup_utrace_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue,
|
||||
flush->batch_bo->exec_obj_index = last_idx;
|
||||
}
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
if (device->physical->memory.need_flush)
|
||||
intel_flush_range(flush->batch_bo->map, flush->batch_bo->size);
|
||||
#endif
|
||||
|
||||
execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
|
||||
.buffers_ptr = (uintptr_t) execbuf->objects,
|
||||
@@ -2421,8 +2425,10 @@ anv_queue_submit_simple_batch(struct anv_queue *queue,
|
||||
return result;
|
||||
|
||||
memcpy(batch_bo->map, batch->start, batch_size);
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
if (device->physical->memory.need_flush)
|
||||
intel_flush_range(batch_bo->map, batch_size);
|
||||
#endif
|
||||
|
||||
struct anv_execbuf execbuf = {
|
||||
.alloc = &queue->device->vk.alloc,
|
||||
|
@@ -2333,8 +2333,10 @@ anv_device_init_trivial_batch(struct anv_device *device)
|
||||
anv_batch_emit(&batch, GFX7_MI_BATCH_BUFFER_END, bbe);
|
||||
anv_batch_emit(&batch, GFX7_MI_NOOP, noop);
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
if (device->physical->memory.need_flush)
|
||||
intel_flush_range(batch.start, batch.next - batch.start);
|
||||
#endif
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
@@ -3480,8 +3482,10 @@ VkResult anv_FlushMappedMemoryRanges(
|
||||
if (!device->physical->memory.need_flush)
|
||||
return VK_SUCCESS;
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
/* Make sure the writes we're flushing have landed. */
|
||||
__builtin_ia32_mfence();
|
||||
#endif
|
||||
|
||||
for (uint32_t i = 0; i < memoryRangeCount; i++) {
|
||||
ANV_FROM_HANDLE(anv_device_memory, mem, pMemoryRanges[i].memory);
|
||||
@@ -3492,9 +3496,11 @@ VkResult anv_FlushMappedMemoryRanges(
|
||||
if (map_offset >= mem->map_size)
|
||||
continue;
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
intel_flush_range(mem->map + map_offset,
|
||||
MIN2(pMemoryRanges[i].size,
|
||||
mem->map_size - map_offset));
|
||||
#endif
|
||||
}
|
||||
|
||||
return VK_SUCCESS;
|
||||
@@ -3519,13 +3525,17 @@ VkResult anv_InvalidateMappedMemoryRanges(
|
||||
if (map_offset >= mem->map_size)
|
||||
continue;
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
intel_invalidate_range(mem->map + map_offset,
|
||||
MIN2(pMemoryRanges[i].size,
|
||||
mem->map_size - map_offset));
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
/* Make sure no reads get moved up above the invalidate. */
|
||||
__builtin_ia32_mfence();
|
||||
#endif
|
||||
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
@@ -1416,7 +1416,7 @@ anv_batch_emit_reloc(struct anv_batch *batch,
|
||||
static inline void
|
||||
write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
|
||||
{
|
||||
unsigned reloc_size = 0;
|
||||
UNUSED unsigned reloc_size = 0;
|
||||
if (device->info->ver >= 8) {
|
||||
reloc_size = sizeof(uint64_t);
|
||||
*(uint64_t *)p = intel_canonical_address(v);
|
||||
@@ -1425,8 +1425,10 @@ write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
|
||||
*(uint32_t *)p = v;
|
||||
}
|
||||
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
if (flush && device->physical->memory.need_flush)
|
||||
intel_flush_range(p, reloc_size);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
|
@@ -97,10 +97,12 @@ VkResult anv_QueuePresentKHR(
|
||||
|
||||
if (device->debug_frame_desc) {
|
||||
device->debug_frame_desc->frame_id++;
|
||||
#ifdef SUPPORT_INTEL_INTEGRATED_GPUS
|
||||
if (device->physical->memory.need_flush) {
|
||||
intel_flush_range(device->debug_frame_desc,
|
||||
sizeof(*device->debug_frame_desc));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
result = vk_queue_wait_before_present(&queue->vk, pPresentInfo);
|
||||
|
Reference in New Issue
Block a user