anv: move internal RT shaders to the internal cache

Those shaders are just like the blorp ones.

v2: Use a single internal cache for blorp/RT (Jason)

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Fixes: 7f1e82306c ("anv: Switch to the new common pipeline cache")
Reviewed-by: Jason Ekstrand <jason.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16741>
This commit is contained in:
Lionel Landwerlin
2022-05-27 11:27:55 +03:00
committed by Marge Bot
parent 0eee071038
commit 09caa8902c
4 changed files with 41 additions and 38 deletions

View File

@@ -32,7 +32,7 @@ lookup_blorp_shader(struct blorp_batch *batch,
struct anv_device *device = blorp->driver_ctx; struct anv_device *device = blorp->driver_ctx;
struct anv_shader_bin *bin = struct anv_shader_bin *bin =
anv_device_search_for_kernel(device, device->blorp_cache, anv_device_search_for_kernel(device, device->internal_cache,
key, key_size, NULL); key, key_size, NULL);
if (!bin) if (!bin)
return false; return false;
@@ -65,7 +65,7 @@ upload_blorp_shader(struct blorp_batch *batch, uint32_t stage,
}; };
struct anv_shader_bin *bin = struct anv_shader_bin *bin =
anv_device_upload_kernel(device, device->blorp_cache, stage, anv_device_upload_kernel(device, device->internal_cache, stage,
key, key_size, kernel, kernel_size, key, key_size, kernel, kernel_size,
prog_data, prog_data_size, prog_data, prog_data_size,
NULL, 0, NULL, &bind_map); NULL, 0, NULL, &bind_map);
@@ -84,23 +84,9 @@ upload_blorp_shader(struct blorp_batch *batch, uint32_t stage,
return true; return true;
} }
bool void
anv_device_init_blorp(struct anv_device *device) anv_device_init_blorp(struct anv_device *device)
{ {
/* BLORP needs its own pipeline cache because, unlike the rest of ANV, it
* won't work at all without the cache. It depends on it for shaders to
* remain resident while it runs. Therefore, we need a special cache just
* for BLORP that's forced to always be enabled.
*/
struct vk_pipeline_cache_create_info pcc_info = {
.force_enable = true,
};
device->blorp_cache =
vk_pipeline_cache_create(&device->vk, &pcc_info, NULL);
if (device->blorp_cache == NULL)
return false;
const struct blorp_config config = { const struct blorp_config config = {
.use_mesh_shading = device->physical->vk.supported_extensions.NV_mesh_shader, .use_mesh_shading = device->physical->vk.supported_extensions.NV_mesh_shader,
}; };
@@ -134,13 +120,11 @@ anv_device_init_blorp(struct anv_device *device)
default: default:
unreachable("Unknown hardware generation"); unreachable("Unknown hardware generation");
} }
return true;
} }
void void
anv_device_finish_blorp(struct anv_device *device) anv_device_finish_blorp(struct anv_device *device)
{ {
vk_pipeline_cache_destroy(device->blorp_cache, NULL);
blorp_finish(&device->blorp); blorp_finish(&device->blorp);
} }

View File

@@ -3462,15 +3462,27 @@ VkResult anv_CreateDevice(
goto fail_trivial_batch_bo_and_scratch_pool; goto fail_trivial_batch_bo_and_scratch_pool;
} }
result = anv_device_init_rt_shaders(device); /* Internal shaders need their own pipeline cache because, unlike the rest
if (result != VK_SUCCESS) * of ANV, it won't work at all without the cache. It depends on it for
goto fail_default_pipeline_cache; * shaders to remain resident while it runs. Therefore, we need a special
* cache just for BLORP/RT that's forced to always be enabled.
if (!anv_device_init_blorp(device)) { */
pcc_info.force_enable = true;
device->internal_cache =
vk_pipeline_cache_create(&device->vk, &pcc_info, NULL);
if (device->internal_cache == NULL) {
result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_rt_shaders; goto fail_default_pipeline_cache;
} }
result = anv_device_init_rt_shaders(device);
if (result != VK_SUCCESS) {
result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_internal_cache;
}
anv_device_init_blorp(device);
anv_device_init_border_colors(device); anv_device_init_border_colors(device);
anv_device_perf_init(device); anv_device_perf_init(device);
@@ -3481,8 +3493,8 @@ VkResult anv_CreateDevice(
return VK_SUCCESS; return VK_SUCCESS;
fail_rt_shaders: fail_internal_cache:
anv_device_finish_rt_shaders(device); vk_pipeline_cache_destroy(device->internal_cache, NULL);
fail_default_pipeline_cache: fail_default_pipeline_cache:
vk_pipeline_cache_destroy(device->default_pipeline_cache, NULL); vk_pipeline_cache_destroy(device->default_pipeline_cache, NULL);
fail_trivial_batch_bo_and_scratch_pool: fail_trivial_batch_bo_and_scratch_pool:
@@ -3556,6 +3568,7 @@ void anv_DestroyDevice(
anv_device_finish_rt_shaders(device); anv_device_finish_rt_shaders(device);
vk_pipeline_cache_destroy(device->internal_cache, NULL);
vk_pipeline_cache_destroy(device->default_pipeline_cache, NULL); vk_pipeline_cache_destroy(device->default_pipeline_cache, NULL);
#ifdef HAVE_VALGRIND #ifdef HAVE_VALGRIND

View File

@@ -3062,7 +3062,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
}, },
}; };
device->rt_trampoline = device->rt_trampoline =
anv_device_search_for_kernel(device, device->default_pipeline_cache, anv_device_search_for_kernel(device, device->internal_cache,
&trampoline_key, sizeof(trampoline_key), &trampoline_key, sizeof(trampoline_key),
&cache_hit); &cache_hit);
if (device->rt_trampoline == NULL) { if (device->rt_trampoline == NULL) {
@@ -3092,7 +3092,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
brw_compile_cs(device->physical->compiler, tmp_ctx, &params); brw_compile_cs(device->physical->compiler, tmp_ctx, &params);
device->rt_trampoline = device->rt_trampoline =
anv_device_upload_kernel(device, device->default_pipeline_cache, anv_device_upload_kernel(device, device->internal_cache,
MESA_SHADER_COMPUTE, MESA_SHADER_COMPUTE,
&trampoline_key, sizeof(trampoline_key), &trampoline_key, sizeof(trampoline_key),
tramp_data, tramp_data,
@@ -3107,6 +3107,11 @@ anv_device_init_rt_shaders(struct anv_device *device)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
} }
/* The cache already has a reference and it's not going anywhere so there
* is no need to hold a second reference.
*/
anv_shader_bin_unref(device, device->rt_trampoline);
struct brw_rt_trivial_return { struct brw_rt_trivial_return {
char name[16]; char name[16];
struct brw_bs_prog_key key; struct brw_bs_prog_key key;
@@ -3114,7 +3119,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
.name = "rt-trivial-ret", .name = "rt-trivial-ret",
}; };
device->rt_trivial_return = device->rt_trivial_return =
anv_device_search_for_kernel(device, device->default_pipeline_cache, anv_device_search_for_kernel(device, device->internal_cache,
&return_key, sizeof(return_key), &return_key, sizeof(return_key),
&cache_hit); &cache_hit);
if (device->rt_trivial_return == NULL) { if (device->rt_trivial_return == NULL) {
@@ -3140,7 +3145,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
brw_compile_bs(device->physical->compiler, tmp_ctx, &params); brw_compile_bs(device->physical->compiler, tmp_ctx, &params);
device->rt_trivial_return = device->rt_trivial_return =
anv_device_upload_kernel(device, device->default_pipeline_cache, anv_device_upload_kernel(device, device->internal_cache,
MESA_SHADER_CALLABLE, MESA_SHADER_CALLABLE,
&return_key, sizeof(return_key), &return_key, sizeof(return_key),
return_data, return_prog_data.base.program_size, return_data, return_prog_data.base.program_size,
@@ -3149,12 +3154,15 @@ anv_device_init_rt_shaders(struct anv_device *device)
ralloc_free(tmp_ctx); ralloc_free(tmp_ctx);
if (device->rt_trivial_return == NULL) { if (device->rt_trivial_return == NULL)
anv_shader_bin_unref(device, device->rt_trampoline);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
} }
/* The cache already has a reference and it's not going anywhere so there
* is no need to hold a second reference.
*/
anv_shader_bin_unref(device, device->rt_trivial_return);
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -3163,8 +3171,6 @@ anv_device_finish_rt_shaders(struct anv_device *device)
{ {
if (!device->vk.enabled_extensions.KHR_ray_tracing_pipeline) if (!device->vk.enabled_extensions.KHR_ray_tracing_pipeline)
return; return;
anv_shader_bin_unref(device, device->rt_trampoline);
} }
VkResult VkResult

View File

@@ -1197,7 +1197,7 @@ struct anv_device {
struct anv_state null_surface_state; struct anv_state null_surface_state;
struct vk_pipeline_cache * default_pipeline_cache; struct vk_pipeline_cache * default_pipeline_cache;
struct vk_pipeline_cache * blorp_cache; struct vk_pipeline_cache * internal_cache;
struct blorp_context blorp; struct blorp_context blorp;
struct anv_state border_colors; struct anv_state border_colors;
@@ -1318,7 +1318,7 @@ anv_mocs(const struct anv_device *device,
return isl_mocs(&device->isl_dev, usage, bo && bo->is_external); return isl_mocs(&device->isl_dev, usage, bo && bo->is_external);
} }
bool anv_device_init_blorp(struct anv_device *device); void anv_device_init_blorp(struct anv_device *device);
void anv_device_finish_blorp(struct anv_device *device); void anv_device_finish_blorp(struct anv_device *device);
enum anv_bo_alloc_flags { enum anv_bo_alloc_flags {