vulkan/pipeline_cache: remove vk_device from vk_pipeline_cache_object

It is not necessary to store the extra pointer.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21967>
This commit is contained in:
Daniel Schürmann
2023-03-16 15:35:48 +01:00
committed by Marge Bot
parent 4ac56e3e5a
commit 5daff41e27
8 changed files with 67 additions and 54 deletions

View File

@@ -2874,7 +2874,8 @@ tu_shaders_deserialize(struct vk_device *device,
struct blob_reader *blob);
static void
tu_shaders_destroy(struct vk_pipeline_cache_object *object)
tu_shaders_destroy(struct vk_device *device,
struct vk_pipeline_cache_object *object)
{
struct tu_compiled_shaders *shaders =
container_of(object, struct tu_compiled_shaders, base);
@@ -2886,7 +2887,7 @@ tu_shaders_destroy(struct vk_pipeline_cache_object *object)
ralloc_free(shaders->safe_const_variants[i]);
vk_pipeline_cache_object_finish(&shaders->base);
vk_free(&object->device->alloc, shaders);
vk_free(&device->alloc, shaders);
}
const struct vk_pipeline_cache_object_ops tu_shaders_ops = {
@@ -3003,7 +3004,8 @@ tu_nir_shaders_deserialize(struct vk_device *device,
struct blob_reader *blob);
static void
tu_nir_shaders_destroy(struct vk_pipeline_cache_object *object)
tu_nir_shaders_destroy(struct vk_device *device,
struct vk_pipeline_cache_object *object)
{
struct tu_nir_shaders *shaders =
container_of(object, struct tu_nir_shaders, base);
@@ -3012,7 +3014,7 @@ tu_nir_shaders_destroy(struct vk_pipeline_cache_object *object)
ralloc_free(shaders->nir[i]);
vk_pipeline_cache_object_finish(&shaders->base);
vk_free(&object->device->alloc, shaders);
vk_free(&device->alloc, shaders);
}
const struct vk_pipeline_cache_object_ops tu_nir_shaders_ops = {
@@ -3540,7 +3542,8 @@ done:;
* when compiling all stages, but make sure we don't leak.
*/
if (nir_shaders)
vk_pipeline_cache_object_unref(&nir_shaders->base);
vk_pipeline_cache_object_unref(&builder->device->vk,
&nir_shaders->base);
} else {
pipeline->compiled_shaders = compiled_shaders;
pipeline->nir_shaders = nir_shaders;
@@ -3579,10 +3582,12 @@ fail:
}
if (compiled_shaders)
vk_pipeline_cache_object_unref(&compiled_shaders->base);
vk_pipeline_cache_object_unref(&builder->device->vk,
&compiled_shaders->base);
if (nir_shaders)
vk_pipeline_cache_object_unref(&nir_shaders->base);
vk_pipeline_cache_object_unref(&builder->device->vk,
&nir_shaders->base);
return result;
}
@@ -4738,10 +4743,11 @@ tu_pipeline_finish(struct tu_pipeline *pipeline,
tu_bo_finish(dev, pipeline->pvtmem_bo);
if (pipeline->compiled_shaders)
vk_pipeline_cache_object_unref(&pipeline->compiled_shaders->base);
vk_pipeline_cache_object_unref(&dev->vk,
&pipeline->compiled_shaders->base);
if (pipeline->nir_shaders)
vk_pipeline_cache_object_unref(&pipeline->nir_shaders->base);
vk_pipeline_cache_object_unref(&dev->vk, &pipeline->nir_shaders->base);
for (unsigned i = 0; i < pipeline->num_sets; i++) {
if (pipeline->layouts[i])
@@ -4893,7 +4899,8 @@ static void
tu_pipeline_builder_finish(struct tu_pipeline_builder *builder)
{
if (builder->compiled_shaders)
vk_pipeline_cache_object_unref(&builder->compiled_shaders->base);
vk_pipeline_cache_object_unref(&builder->device->vk,
&builder->compiled_shaders->base);
ralloc_free(builder->mem_ctx);
}
@@ -5305,7 +5312,7 @@ tu_compute_pipeline_create(VkDevice device,
pipeline->program.cs_instrlen = v->instrlen;
vk_pipeline_cache_object_unref(&compiled->base);
vk_pipeline_cache_object_unref(&dev->vk, &compiled->base);
ralloc_free(pipeline_mem_ctx);
*pPipeline = tu_pipeline_to_handle(pipeline);
@@ -5314,7 +5321,7 @@ tu_compute_pipeline_create(VkDevice device,
fail:
if (compiled)
vk_pipeline_cache_object_unref(&compiled->base);
vk_pipeline_cache_object_unref(&dev->vk, &compiled->base);
ralloc_free(pipeline_mem_ctx);

View File

@@ -43,10 +43,12 @@ anv_shader_bin_deserialize(struct vk_device *device,
struct blob_reader *blob);
static void
anv_shader_bin_destroy(struct vk_pipeline_cache_object *object)
anv_shader_bin_destroy(struct vk_device *_device,
struct vk_pipeline_cache_object *object)
{
struct anv_device *device =
container_of(object->device, struct anv_device, vk);
container_of(_device, struct anv_device, vk);
struct anv_shader_bin *shader =
container_of(object, struct anv_shader_bin, base);

View File

@@ -3063,7 +3063,7 @@ anv_shader_bin_ref(struct anv_shader_bin *shader)
static inline void
anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
{
vk_pipeline_cache_object_unref(&shader->base);
vk_pipeline_cache_object_unref(&device->vk, &shader->base);
}
struct anv_pipeline_executable {

View File

@@ -41,10 +41,11 @@ anv_shader_bin_deserialize(struct vk_device *device,
struct blob_reader *blob);
static void
anv_shader_bin_destroy(struct vk_pipeline_cache_object *object)
anv_shader_bin_destroy(struct vk_device *_device,
struct vk_pipeline_cache_object *object)
{
struct anv_device *device =
container_of(object->device, struct anv_device, vk);
container_of(_device, struct anv_device, vk);
struct anv_shader_bin *shader =
container_of(object, struct anv_shader_bin, base);

View File

@@ -2817,7 +2817,7 @@ anv_shader_bin_ref(struct anv_shader_bin *shader)
static inline void
anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
{
vk_pipeline_cache_object_unref(&shader->base);
vk_pipeline_cache_object_unref(&device->vk, &shader->base);
}
struct anv_pipeline_executable {

View File

@@ -93,12 +93,13 @@ dzn_cached_blob_serialize(struct vk_pipeline_cache_object *object,
}
static void
dzn_cached_blob_destroy(struct vk_pipeline_cache_object *object)
dzn_cached_blob_destroy(struct vk_device *device,
struct vk_pipeline_cache_object *object)
{
struct dzn_cached_blob *shader =
container_of(object, struct dzn_cached_blob, base);
vk_free(&shader->base.device->alloc, shader);
vk_free(&device->alloc, shader);
}
static struct vk_pipeline_cache_object *
@@ -544,7 +545,7 @@ dzn_pipeline_cache_lookup_dxil_shader(struct vk_pipeline_cache *cache,
*stage = info->stage;
out:
vk_pipeline_cache_object_unref(cache_obj);
vk_pipeline_cache_object_unref(cache->base.device, cache_obj);
return ret;
}
@@ -571,7 +572,7 @@ dzn_pipeline_cache_add_dxil_shader(struct vk_pipeline_cache *cache,
memcpy(info->data, bc->pShaderBytecode, bc->BytecodeLength);
cache_obj = vk_pipeline_cache_add_object(cache, cache_obj);
vk_pipeline_cache_object_unref(cache_obj);
vk_pipeline_cache_object_unref(cache->base.device, cache_obj);
}
struct dzn_cached_gfx_pipeline_header {
@@ -647,7 +648,7 @@ dzn_pipeline_cache_lookup_gfx_pipeline(struct dzn_graphics_pipeline *pipeline,
*cache_hit = true;
vk_pipeline_cache_object_unref(cache_obj);
vk_pipeline_cache_object_unref(cache->base.device, cache_obj);
return VK_SUCCESS;
}
@@ -702,7 +703,7 @@ dzn_pipeline_cache_add_gfx_pipeline(struct dzn_graphics_pipeline *pipeline,
}
cache_obj = vk_pipeline_cache_add_object(cache, cache_obj);
vk_pipeline_cache_object_unref(cache_obj);
vk_pipeline_cache_object_unref(cache->base.device, cache_obj);
}
static void
@@ -2333,7 +2334,7 @@ dzn_pipeline_cache_lookup_compute_pipeline(struct vk_pipeline_cache *cache,
*cache_hit = true;
out:
vk_pipeline_cache_object_unref(cache_obj);
vk_pipeline_cache_object_unref(cache->base.device, cache_obj);
return ret;
}
@@ -2353,7 +2354,7 @@ dzn_pipeline_cache_add_compute_pipeline(struct vk_pipeline_cache *cache,
memcpy((void *)cached_blob->data, dxil_hash, SHA1_DIGEST_LENGTH);
cache_obj = vk_pipeline_cache_add_object(cache, cache_obj);
vk_pipeline_cache_object_unref(cache_obj);
vk_pipeline_cache_object_unref(cache->base.device, cache_obj);
}
static VkResult

View File

@@ -83,12 +83,13 @@ raw_data_object_deserialize(struct vk_device *device,
}
static void
raw_data_object_destroy(struct vk_pipeline_cache_object *object)
raw_data_object_destroy(struct vk_device *device,
struct vk_pipeline_cache_object *object)
{
struct raw_data_object *data_obj =
container_of(object, struct raw_data_object, base);
vk_free(&data_obj->base.device->alloc, data_obj);
vk_free(&device->alloc, data_obj);
}
static const struct vk_pipeline_cache_object_ops raw_data_object_ops = {
@@ -165,14 +166,14 @@ vk_pipeline_cache_remove_object(struct vk_pipeline_cache *cache,
_mesa_set_search_pre_hashed(cache->object_cache, hash, object);
if (entry && entry->key == (const void *)object) {
/* Drop the reference owned by the cache */
vk_pipeline_cache_object_unref(object);
vk_pipeline_cache_object_unref(cache->base.device, object);
_mesa_set_remove(cache->object_cache, entry);
}
vk_pipeline_cache_unlock(cache);
/* Drop our reference */
vk_pipeline_cache_object_unref(object);
vk_pipeline_cache_object_unref(cache->base.device, object);
}
/* Consumes references to both search and replace and produces a reference */
@@ -192,7 +193,7 @@ vk_pipeline_cache_replace_object(struct vk_pipeline_cache *cache,
if (entry) {
if (entry->key == (const void *)search) {
/* Drop the reference owned by the cache */
vk_pipeline_cache_object_unref(search);
vk_pipeline_cache_object_unref(cache->base.device, search);
entry->key = vk_pipeline_cache_object_ref(replace);
} else {
@@ -205,10 +206,10 @@ vk_pipeline_cache_replace_object(struct vk_pipeline_cache *cache,
}
vk_pipeline_cache_unlock(cache);
vk_pipeline_cache_object_unref(search);
vk_pipeline_cache_object_unref(cache->base.device, search);
if (found) {
vk_pipeline_cache_object_unref(replace);
vk_pipeline_cache_object_unref(cache->base.device, replace);
return found;
} else {
return replace;
@@ -293,7 +294,6 @@ vk_pipeline_cache_object_deserialize(struct vk_pipeline_cache *cache,
}
assert(reader.current == reader.end && !reader.overrun);
assert(object->device == cache->base.device);
assert(object->ops == ops);
assert(object->ref_cnt == 1);
assert(object->key_size == key_size);
@@ -413,7 +413,7 @@ vk_pipeline_cache_add_object(struct vk_pipeline_cache *cache,
vk_pipeline_cache_unlock(cache);
if (found) {
vk_pipeline_cache_object_unref(object);
vk_pipeline_cache_object_unref(cache->base.device, object);
return found_object;
} else {
/* If it wasn't in the object cache, it might not be in the disk cache
@@ -461,7 +461,7 @@ vk_pipeline_cache_lookup_nir(struct vk_pipeline_cache *cache,
blob_reader_init(&blob, data_obj->data, data_obj->data_size);
nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
vk_pipeline_cache_object_unref(object);
vk_pipeline_cache_object_unref(cache->base.device, object);
if (blob.overrun) {
ralloc_free(nir);
@@ -494,7 +494,7 @@ vk_pipeline_cache_add_nir(struct vk_pipeline_cache *cache,
struct vk_pipeline_cache_object *cached =
vk_pipeline_cache_add_object(cache, &data_obj->base);
vk_pipeline_cache_object_unref(cached);
vk_pipeline_cache_object_unref(cache->base.device, cached);
}
static int32_t
@@ -565,7 +565,7 @@ vk_pipeline_cache_load(struct vk_pipeline_cache *cache,
continue;
object = vk_pipeline_cache_add_object(cache, object);
vk_pipeline_cache_object_unref(object);
vk_pipeline_cache_object_unref(cache->base.device, object);
}
}
@@ -619,18 +619,17 @@ vk_pipeline_cache_create(struct vk_device *device,
return cache;
}
static void
object_unref_cb(struct set_entry *entry)
{
vk_pipeline_cache_object_unref((void *)entry->key);
}
void
vk_pipeline_cache_destroy(struct vk_pipeline_cache *cache,
const VkAllocationCallbacks *pAllocator)
{
if (cache->object_cache)
_mesa_set_destroy(cache->object_cache, object_unref_cb);
if (cache->object_cache) {
set_foreach_remove(cache->object_cache, entry) {
vk_pipeline_cache_object_unref(cache->base.device,
(void *)entry->key);
}
_mesa_set_destroy(cache->object_cache, NULL);
}
simple_mtx_destroy(&cache->lock);
vk_object_free(cache->base.device, pAllocator, cache);
}
@@ -749,12 +748,14 @@ vk_common_GetPipelineCacheData(VkDevice _device,
}
VKAPI_ATTR VkResult VKAPI_CALL
vk_common_MergePipelineCaches(VkDevice device,
vk_common_MergePipelineCaches(VkDevice _device,
VkPipelineCache dstCache,
uint32_t srcCacheCount,
const VkPipelineCache *pSrcCaches)
{
VK_FROM_HANDLE(vk_pipeline_cache, dst, dstCache);
VK_FROM_HANDLE(vk_device, device, _device);
assert(dst->base.device == device);
if (!dst->object_cache)
return VK_SUCCESS;
@@ -763,6 +764,7 @@ vk_common_MergePipelineCaches(VkDevice device,
for (uint32_t i = 0; i < srcCacheCount; i++) {
VK_FROM_HANDLE(vk_pipeline_cache, src, pSrcCaches[i]);
assert(src->base.device == device);
if (!src->object_cache)
continue;
@@ -788,7 +790,7 @@ vk_common_MergePipelineCaches(VkDevice device,
/* Even though dst has the object, it only has the blob version
* which isn't as useful. Replace it with the real object.
*/
vk_pipeline_cache_object_unref(dst_object);
vk_pipeline_cache_object_unref(device, dst_object);
dst_entry->key = vk_pipeline_cache_object_ref(src_object);
}
} else {

View File

@@ -86,7 +86,8 @@ struct vk_pipeline_cache_object_ops {
*
* Called when vk_pipeline_cache_object.ref_cnt hits 0.
*/
void (*destroy)(struct vk_pipeline_cache_object *object);
void (*destroy)(struct vk_device *device,
struct vk_pipeline_cache_object *object);
};
/** Base struct for cached objects
@@ -105,7 +106,6 @@ struct vk_pipeline_cache_object_ops {
* it never has two objects of different types with the same key.
*/
struct vk_pipeline_cache_object {
struct vk_device *device;
const struct vk_pipeline_cache_object_ops *ops;
uint32_t ref_cnt;
@@ -121,7 +121,6 @@ vk_pipeline_cache_object_init(struct vk_device *device,
const void *key_data, uint32_t key_size)
{
memset(object, 0, sizeof(*object));
object->device = device;
object->ops = ops;
p_atomic_set(&object->ref_cnt, 1);
object->data_size = 0; /* Unknown */
@@ -144,11 +143,12 @@ vk_pipeline_cache_object_ref(struct vk_pipeline_cache_object *object)
}
static inline void
vk_pipeline_cache_object_unref(struct vk_pipeline_cache_object *object)
vk_pipeline_cache_object_unref(struct vk_device *device,
struct vk_pipeline_cache_object *object)
{
assert(object && p_atomic_read(&object->ref_cnt) >= 1);
if (p_atomic_dec_zero(&object->ref_cnt))
object->ops->destroy(object);
object->ops->destroy(device, object);
}
/** A generic implementation of VkPipelineCache */