radv: reuse the multiple shader store & load functions for gs copy variant
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
This commit is contained in:
@@ -1551,11 +1551,9 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
|
||||
gs_copy_hash[0] ^= 1;
|
||||
|
||||
if (modules[MESA_SHADER_GEOMETRY]) {
|
||||
pipeline->gs_copy_shader =
|
||||
radv_create_shader_variant_from_pipeline_cache(
|
||||
pipeline->device,
|
||||
cache,
|
||||
gs_copy_hash);
|
||||
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
|
||||
radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants);
|
||||
pipeline->gs_copy_shader = variants[MESA_SHADER_GEOMETRY];
|
||||
}
|
||||
|
||||
if (radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders) &&
|
||||
@@ -1629,12 +1627,19 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
|
||||
}
|
||||
|
||||
if (pipeline->gs_copy_shader) {
|
||||
pipeline->gs_copy_shader =
|
||||
radv_pipeline_cache_insert_shader(device, cache,
|
||||
void *code[MESA_SHADER_STAGES] = {0};
|
||||
unsigned code_size[MESA_SHADER_STAGES] = {0};
|
||||
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
|
||||
|
||||
code[MESA_SHADER_GEOMETRY] = gs_copy_code;
|
||||
code_size[MESA_SHADER_GEOMETRY] = gs_copy_code_size;
|
||||
variants[MESA_SHADER_GEOMETRY] = pipeline->gs_copy_shader;
|
||||
|
||||
radv_pipeline_cache_insert_shaders(device, cache,
|
||||
gs_copy_hash,
|
||||
pipeline->gs_copy_shader,
|
||||
gs_copy_code,
|
||||
gs_copy_code_size);
|
||||
variants,
|
||||
(const void**)code,
|
||||
code_size);
|
||||
}
|
||||
free(gs_copy_code);
|
||||
}
|
||||
|
@@ -170,60 +170,6 @@ radv_pipeline_cache_search(struct radv_pipeline_cache *cache,
|
||||
return entry;
|
||||
}
|
||||
|
||||
struct radv_shader_variant *
|
||||
radv_create_shader_variant_from_pipeline_cache(struct radv_device *device,
|
||||
struct radv_pipeline_cache *cache,
|
||||
const unsigned char *sha1)
|
||||
{
|
||||
struct cache_entry *entry = NULL;
|
||||
|
||||
if (cache)
|
||||
entry = radv_pipeline_cache_search(cache, sha1);
|
||||
else
|
||||
entry = radv_pipeline_cache_search(device->mem_cache, sha1);
|
||||
|
||||
if (!entry) {
|
||||
if (!device->physical_device->disk_cache)
|
||||
return NULL;
|
||||
uint8_t disk_sha1[20];
|
||||
disk_cache_compute_key(device->physical_device->disk_cache,
|
||||
sha1, 20, disk_sha1);
|
||||
entry = (struct cache_entry *)
|
||||
disk_cache_get(device->physical_device->disk_cache,
|
||||
disk_sha1, NULL);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!entry->variants[0]) {
|
||||
struct radv_shader_variant *variant;
|
||||
char *p = entry->code;
|
||||
struct cache_entry_variant_info info;
|
||||
|
||||
variant = calloc(1, sizeof(struct radv_shader_variant));
|
||||
if (!variant)
|
||||
return NULL;
|
||||
|
||||
memcpy(&info, p, sizeof(struct cache_entry_variant_info));
|
||||
p += sizeof(struct cache_entry_variant_info);
|
||||
|
||||
variant->code_size = entry->code_sizes[0];
|
||||
variant->config = info.config;
|
||||
variant->info = info.variant_info;
|
||||
variant->rsrc1 = info.rsrc1;
|
||||
variant->rsrc2 = info.rsrc2;
|
||||
variant->ref_count = 1;
|
||||
|
||||
void *ptr = radv_alloc_shader_memory(device, variant);
|
||||
memcpy(ptr, p, entry->code_sizes[0]);
|
||||
|
||||
entry->variants[0] = variant;
|
||||
}
|
||||
|
||||
p_atomic_inc(&entry->variants[0]->ref_count);
|
||||
return entry->variants[0];
|
||||
}
|
||||
|
||||
bool
|
||||
radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
|
||||
struct radv_pipeline_cache *cache,
|
||||
@@ -357,79 +303,6 @@ radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache,
|
||||
radv_pipeline_cache_set_entry(cache, entry);
|
||||
}
|
||||
|
||||
struct radv_shader_variant *
|
||||
radv_pipeline_cache_insert_shader(struct radv_device *device,
|
||||
struct radv_pipeline_cache *cache,
|
||||
const unsigned char *sha1,
|
||||
struct radv_shader_variant *variant,
|
||||
const void *code, unsigned code_size)
|
||||
{
|
||||
if (!cache)
|
||||
cache = device->mem_cache;
|
||||
|
||||
pthread_mutex_lock(&cache->mutex);
|
||||
struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
|
||||
if (entry) {
|
||||
if (entry->variants[0]) {
|
||||
radv_shader_variant_destroy(cache->device, variant);
|
||||
variant = entry->variants[0];
|
||||
} else {
|
||||
entry->variants[0] = variant;
|
||||
}
|
||||
p_atomic_inc(&variant->ref_count);
|
||||
pthread_mutex_unlock(&cache->mutex);
|
||||
return variant;
|
||||
}
|
||||
|
||||
entry = vk_alloc(&cache->alloc, sizeof(*entry) + sizeof(struct cache_entry_variant_info) + code_size, 8,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
|
||||
if (!entry) {
|
||||
pthread_mutex_unlock(&cache->mutex);
|
||||
return variant;
|
||||
}
|
||||
|
||||
memset(entry, 0, sizeof(*entry));
|
||||
|
||||
char* p = entry->code;
|
||||
struct cache_entry_variant_info info;
|
||||
|
||||
info.config = variant->config;
|
||||
info.variant_info = variant->info;
|
||||
info.rsrc1 = variant->rsrc1;
|
||||
info.rsrc2 = variant->rsrc2;
|
||||
memcpy(p, &info, sizeof(struct cache_entry_variant_info));
|
||||
p += sizeof(struct cache_entry_variant_info);
|
||||
|
||||
memcpy(entry->sha1, sha1, 20);
|
||||
memcpy(p, code, code_size);
|
||||
|
||||
entry->code_sizes[0] = code_size;
|
||||
|
||||
/* Set variant to NULL so we have reproducible cache items */
|
||||
entry->variants[0] = NULL;
|
||||
|
||||
/* Always add cache items to disk. This will allow collection of
|
||||
* compiled shaders by third parties such as steam, even if the app
|
||||
* implements its own pipeline cache.
|
||||
*/
|
||||
if (device->physical_device->disk_cache) {
|
||||
uint8_t disk_sha1[20];
|
||||
disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20,
|
||||
disk_sha1);
|
||||
disk_cache_put(device->physical_device->disk_cache,
|
||||
disk_sha1, entry, entry_size(entry), NULL);
|
||||
}
|
||||
|
||||
entry->variants[0] = variant;
|
||||
p_atomic_inc(&variant->ref_count);
|
||||
|
||||
radv_pipeline_cache_add_entry(cache, entry);
|
||||
|
||||
cache->modified = true;
|
||||
pthread_mutex_unlock(&cache->mutex);
|
||||
return variant;
|
||||
}
|
||||
|
||||
void
|
||||
radv_pipeline_cache_insert_shaders(struct radv_device *device,
|
||||
struct radv_pipeline_cache *cache,
|
||||
|
@@ -324,17 +324,7 @@ void
|
||||
radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
|
||||
const void *data, size_t size);
|
||||
|
||||
struct radv_shader_variant *
|
||||
radv_create_shader_variant_from_pipeline_cache(struct radv_device *device,
|
||||
struct radv_pipeline_cache *cache,
|
||||
const unsigned char *sha1);
|
||||
|
||||
struct radv_shader_variant *
|
||||
radv_pipeline_cache_insert_shader(struct radv_device *device,
|
||||
struct radv_pipeline_cache *cache,
|
||||
const unsigned char *sha1,
|
||||
struct radv_shader_variant *variant,
|
||||
const void *code, unsigned code_size);
|
||||
struct radv_shader_variant;
|
||||
|
||||
bool
|
||||
radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
|
||||
|
Reference in New Issue
Block a user