anv: Rework pipeline caching

The original pipeline cache the Kristian wrote was based on a now-false
premise that the shaders can be stored in the pipeline cache.  The Vulkan
1.0 spec explicitly states that the pipeline cache object is transiant and
you are allowed to delete it after using it to create a pipeline with no
ill effects.  As nice as Kristian's design was, it doesn't jive with the
expectation provided by the Vulkan spec.

The new pipeline cache uses reference-counted anv_shader_bin objects that
are backed by a large state pool.  The cache itself is just a hash table
mapping keys hashes to anv_shader_bin objects.  This has the added
advantage of removing one more hand-rolled hash table from mesa.

Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Cc: "12.0" <mesa-stable@lists.freedesktop.org>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=97476
Acked-by: Kristian Høgsberg Kristensen <krh@bitplanet.net>
This commit is contained in:
Jason Ekstrand
2016-08-25 01:49:49 -07:00
parent 6899718470
commit 10f9901bce
8 changed files with 315 additions and 427 deletions

View File

@@ -757,7 +757,7 @@ anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
return VK_SUCCESS; return VK_SUCCESS;
} }
struct anv_pipeline_bind_map *map = &pipeline->bindings[stage]; struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
if (bias + map->surface_count == 0) { if (bias + map->surface_count == 0) {
*bt_state = (struct anv_state) { 0, }; *bt_state = (struct anv_state) { 0, };
return VK_SUCCESS; return VK_SUCCESS;
@@ -922,7 +922,7 @@ anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
return VK_SUCCESS; return VK_SUCCESS;
} }
struct anv_pipeline_bind_map *map = &pipeline->bindings[stage]; struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
if (map->sampler_count == 0) { if (map->sampler_count == 0) {
*state = (struct anv_state) { 0, }; *state = (struct anv_state) { 0, };
return VK_SUCCESS; return VK_SUCCESS;
@@ -1096,7 +1096,7 @@ anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
struct anv_push_constants *data = struct anv_push_constants *data =
cmd_buffer->state.push_constants[stage]; cmd_buffer->state.push_constants[stage];
const struct brw_stage_prog_data *prog_data = const struct brw_stage_prog_data *prog_data =
cmd_buffer->state.pipeline->prog_data[stage]; anv_shader_bin_get_prog_data(cmd_buffer->state.pipeline->shaders[stage]);
/* If we don't actually have any push constants, bail. */ /* If we don't actually have any push constants, bail. */
if (data == NULL || prog_data == NULL || prog_data->nr_params == 0) if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)

View File

@@ -881,7 +881,6 @@ VkResult anv_CreateDevice(
anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024); anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
anv_state_pool_init(&device->instruction_state_pool, anv_state_pool_init(&device->instruction_state_pool,
&device->instruction_block_pool); &device->instruction_block_pool);
anv_pipeline_cache_init(&device->default_pipeline_cache, device);
anv_block_pool_init(&device->surface_state_block_pool, device, 4096); anv_block_pool_init(&device->surface_state_block_pool, device, 4096);

View File

@@ -205,6 +205,12 @@ void anv_DestroyPipeline(
pAllocator ? pAllocator : &device->alloc); pAllocator ? pAllocator : &device->alloc);
if (pipeline->blend_state.map) if (pipeline->blend_state.map)
anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state); anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (pipeline->shaders[s])
anv_shader_bin_unref(device, pipeline->shaders[s]);
}
anv_free2(&device->alloc, pAllocator, pipeline); anv_free2(&device->alloc, pAllocator, pipeline);
} }
@@ -394,15 +400,34 @@ anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
prog_data->binding_table.image_start = bias; prog_data->binding_table.image_start = bias;
} }
static struct anv_shader_bin *
anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
struct anv_pipeline_cache *cache,
const void *key_data, uint32_t key_size,
const void *kernel_data, uint32_t kernel_size,
const void *prog_data, uint32_t prog_data_size,
const struct anv_pipeline_bind_map *bind_map)
{
if (cache) {
return anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
kernel_data, kernel_size,
prog_data, prog_data_size,
bind_map);
} else {
return anv_shader_bin_create(pipeline->device, key_data, key_size,
kernel_data, kernel_size,
prog_data, prog_data_size, bind_map);
}
}
static void static void
anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline, anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
gl_shader_stage stage, gl_shader_stage stage,
const struct brw_stage_prog_data *prog_data, struct anv_shader_bin *shader)
struct anv_pipeline_bind_map *map)
{ {
pipeline->prog_data[stage] = prog_data; pipeline->shaders[stage] = shader;
pipeline->active_stages |= mesa_to_vk_shader_stage(stage); pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
pipeline->bindings[stage] = *map;
} }
static VkResult static VkResult
@@ -415,21 +440,20 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
{ {
const struct brw_compiler *compiler = const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler; pipeline->device->instance->physicalDevice.compiler;
const struct brw_stage_prog_data *stage_prog_data;
struct anv_pipeline_bind_map map; struct anv_pipeline_bind_map map;
struct brw_vs_prog_key key; struct brw_vs_prog_key key;
uint32_t kernel = NO_KERNEL; struct anv_shader_bin *bin = NULL;
unsigned char sha1[20]; unsigned char sha1[20];
populate_vs_prog_key(&pipeline->device->info, &key); populate_vs_prog_key(&pipeline->device->info, &key);
if (module->size > 0) { if (cache) {
anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
pipeline->layout, spec_info); pipeline->layout, spec_info);
kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map); bin = anv_pipeline_cache_search(cache, sha1, 20);
} }
if (kernel == NO_KERNEL) { if (bin == NULL) {
struct brw_vs_prog_data prog_data = { 0, }; struct brw_vs_prog_data prog_data = { 0, };
struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding surface_to_descriptor[256];
struct anv_pipeline_binding sampler_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -468,28 +492,29 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
stage_prog_data = &prog_data.base.base; bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
kernel = anv_pipeline_cache_upload_kernel(cache, shader_code, code_size,
module->size > 0 ? sha1 : NULL, &prog_data, sizeof(prog_data), &map);
shader_code, code_size, if (!bin) {
&stage_prog_data, sizeof(prog_data), ralloc_free(mem_ctx);
&map); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
ralloc_free(mem_ctx); ralloc_free(mem_ctx);
} }
const struct brw_vs_prog_data *vs_prog_data = const struct brw_vs_prog_data *vs_prog_data =
(const struct brw_vs_prog_data *) stage_prog_data; (const struct brw_vs_prog_data *)anv_shader_bin_get_prog_data(bin);
if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) { if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
pipeline->vs_simd8 = kernel; pipeline->vs_simd8 = bin->kernel.offset;
pipeline->vs_vec4 = NO_KERNEL; pipeline->vs_vec4 = NO_KERNEL;
} else { } else {
pipeline->vs_simd8 = NO_KERNEL; pipeline->vs_simd8 = NO_KERNEL;
pipeline->vs_vec4 = kernel; pipeline->vs_vec4 = bin->kernel.offset;
} }
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin);
stage_prog_data, &map);
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -504,21 +529,20 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
{ {
const struct brw_compiler *compiler = const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler; pipeline->device->instance->physicalDevice.compiler;
const struct brw_stage_prog_data *stage_prog_data;
struct anv_pipeline_bind_map map; struct anv_pipeline_bind_map map;
struct brw_gs_prog_key key; struct brw_gs_prog_key key;
uint32_t kernel = NO_KERNEL; struct anv_shader_bin *bin = NULL;
unsigned char sha1[20]; unsigned char sha1[20];
populate_gs_prog_key(&pipeline->device->info, &key); populate_gs_prog_key(&pipeline->device->info, &key);
if (module->size > 0) { if (cache) {
anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
pipeline->layout, spec_info); pipeline->layout, spec_info);
kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map); bin = anv_pipeline_cache_search(cache, sha1, 20);
} }
if (kernel == NO_KERNEL) { if (bin == NULL) {
struct brw_gs_prog_data prog_data = { 0, }; struct brw_gs_prog_data prog_data = { 0, };
struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding surface_to_descriptor[256];
struct anv_pipeline_binding sampler_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -556,20 +580,20 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
} }
/* TODO: SIMD8 GS */ /* TODO: SIMD8 GS */
stage_prog_data = &prog_data.base.base; bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
kernel = anv_pipeline_cache_upload_kernel(cache, shader_code, code_size,
module->size > 0 ? sha1 : NULL, &prog_data, sizeof(prog_data), &map);
shader_code, code_size, if (!bin) {
&stage_prog_data, sizeof(prog_data), ralloc_free(mem_ctx);
&map); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
ralloc_free(mem_ctx); ralloc_free(mem_ctx);
} }
pipeline->gs_kernel = kernel; pipeline->gs_kernel = bin->kernel.offset;
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin);
stage_prog_data, &map);
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -585,21 +609,20 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
{ {
const struct brw_compiler *compiler = const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler; pipeline->device->instance->physicalDevice.compiler;
const struct brw_stage_prog_data *stage_prog_data;
struct anv_pipeline_bind_map map; struct anv_pipeline_bind_map map;
struct brw_wm_prog_key key; struct brw_wm_prog_key key;
struct anv_shader_bin *bin = NULL;
unsigned char sha1[20]; unsigned char sha1[20];
populate_wm_prog_key(&pipeline->device->info, info, extra, &key); populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
if (module->size > 0) { if (cache) {
anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
pipeline->layout, spec_info); pipeline->layout, spec_info);
pipeline->ps_ksp0 = bin = anv_pipeline_cache_search(cache, sha1, 20);
anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
} }
if (pipeline->ps_ksp0 == NO_KERNEL) { if (bin == NULL) {
struct brw_wm_prog_data prog_data = { 0, }; struct brw_wm_prog_data prog_data = { 0, };
struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding surface_to_descriptor[256];
struct anv_pipeline_binding sampler_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -688,19 +711,20 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
stage_prog_data = &prog_data.base; bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
pipeline->ps_ksp0 = shader_code, code_size,
anv_pipeline_cache_upload_kernel(cache, &prog_data, sizeof(prog_data), &map);
module->size > 0 ? sha1 : NULL, if (!bin) {
shader_code, code_size, ralloc_free(mem_ctx);
&stage_prog_data, sizeof(prog_data), return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
&map); }
ralloc_free(mem_ctx); ralloc_free(mem_ctx);
} }
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, pipeline->ps_ksp0 = bin->kernel.offset;
stage_prog_data, &map);
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin);
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -715,21 +739,20 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
{ {
const struct brw_compiler *compiler = const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler; pipeline->device->instance->physicalDevice.compiler;
const struct brw_stage_prog_data *stage_prog_data;
struct anv_pipeline_bind_map map; struct anv_pipeline_bind_map map;
struct brw_cs_prog_key key; struct brw_cs_prog_key key;
uint32_t kernel = NO_KERNEL; struct anv_shader_bin *bin = NULL;
unsigned char sha1[20]; unsigned char sha1[20];
populate_cs_prog_key(&pipeline->device->info, &key); populate_cs_prog_key(&pipeline->device->info, &key);
if (module->size > 0) { if (cache) {
anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
pipeline->layout, spec_info); pipeline->layout, spec_info);
kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map); bin = anv_pipeline_cache_search(cache, sha1, 20);
} }
if (module->size == 0 || kernel == NO_KERNEL) { if (bin == NULL) {
struct brw_cs_prog_data prog_data = { 0, }; struct brw_cs_prog_data prog_data = { 0, };
struct anv_pipeline_binding surface_to_descriptor[256]; struct anv_pipeline_binding surface_to_descriptor[256];
struct anv_pipeline_binding sampler_to_descriptor[256]; struct anv_pipeline_binding sampler_to_descriptor[256];
@@ -761,20 +784,20 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
stage_prog_data = &prog_data.base; bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
kernel = anv_pipeline_cache_upload_kernel(cache, shader_code, code_size,
module->size > 0 ? sha1 : NULL, &prog_data, sizeof(prog_data), &map);
shader_code, code_size, if (!bin) {
&stage_prog_data, sizeof(prog_data), ralloc_free(mem_ctx);
&map); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
ralloc_free(mem_ctx); ralloc_free(mem_ctx);
} }
pipeline->cs_simd = kernel; pipeline->cs_simd = bin->kernel.offset;
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin);
stage_prog_data, &map);
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -1168,8 +1191,7 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
/* When we free the pipeline, we detect stages based on the NULL status /* When we free the pipeline, we detect stages based on the NULL status
* of various prog_data pointers. Make them NULL by default. * of various prog_data pointers. Make them NULL by default.
*/ */
memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data)); memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
memset(pipeline->bindings, 0, sizeof(pipeline->bindings));
pipeline->vs_simd8 = NO_KERNEL; pipeline->vs_simd8 = NO_KERNEL;
pipeline->vs_vec4 = NO_KERNEL; pipeline->vs_vec4 = NO_KERNEL;
@@ -1278,6 +1300,11 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
return VK_SUCCESS; return VK_SUCCESS;
compile_fail: compile_fail:
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
if (pipeline->shaders[s])
anv_shader_bin_unref(device, pipeline->shaders[s]);
}
anv_reloc_list_finish(&pipeline->batch_relocs, alloc); anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
return result; return result;
@@ -1295,9 +1322,6 @@ anv_graphics_pipeline_create(
ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache); ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
if (cache == NULL)
cache = &device->default_pipeline_cache;
switch (device->info.gen) { switch (device->info.gen) {
case 7: case 7:
if (device->info.is_haswell) if (device->info.is_haswell)
@@ -1351,9 +1375,6 @@ static VkResult anv_compute_pipeline_create(
ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache); ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
if (cache == NULL)
cache = &device->default_pipeline_cache;
switch (device->info.gen) { switch (device->info.gen) {
case 7: case 7:
if (device->info.is_haswell) if (device->info.is_haswell)

View File

@@ -22,6 +22,7 @@
*/ */
#include "util/mesa-sha1.h" #include "util/mesa-sha1.h"
#include "util/hash_table.h"
#include "util/debug.h" #include "util/debug.h"
#include "anv_private.h" #include "anv_private.h"
@@ -147,67 +148,55 @@ anv_shader_bin_write_data(const struct anv_shader_bin *shader, void *data)
* dual_src_blend. * dual_src_blend.
*/ */
static uint32_t
shader_bin_key_hash_func(const void *void_key)
{
const struct shader_bin_key *key = void_key;
return _mesa_hash_data(key->data, key->size);
}
static bool
shader_bin_key_compare_func(const void *void_a, const void *void_b)
{
const struct shader_bin_key *a = void_a, *b = void_b;
if (a->size != b->size)
return false;
return memcmp(a->data, b->data, a->size) == 0;
}
void void
anv_pipeline_cache_init(struct anv_pipeline_cache *cache, anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
struct anv_device *device) struct anv_device *device,
bool cache_enabled)
{ {
cache->device = device; cache->device = device;
anv_state_stream_init(&cache->program_stream,
&device->instruction_block_pool);
pthread_mutex_init(&cache->mutex, NULL); pthread_mutex_init(&cache->mutex, NULL);
cache->kernel_count = 0; if (cache_enabled) {
cache->total_size = 0; cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
cache->table_size = 1024; shader_bin_key_compare_func);
const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]); } else {
cache->hash_table = malloc(byte_size); cache->cache = NULL;
}
/* We don't consider allocation failure fatal, we just start with a 0-sized
* cache. */
if (cache->hash_table == NULL ||
!env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true))
cache->table_size = 0;
else
memset(cache->hash_table, 0xff, byte_size);
} }
void void
anv_pipeline_cache_finish(struct anv_pipeline_cache *cache) anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
{ {
anv_state_stream_finish(&cache->program_stream);
pthread_mutex_destroy(&cache->mutex); pthread_mutex_destroy(&cache->mutex);
free(cache->hash_table);
}
struct cache_entry { if (cache->cache) {
unsigned char sha1[20]; /* This is a bit unfortunate. In order to keep things from randomly
uint32_t prog_data_size; * going away, the shader cache has to hold a reference to all shader
uint32_t kernel_size; * binaries it contains. We unref them when we destroy the cache.
uint32_t surface_count; */
uint32_t sampler_count; struct hash_entry *entry;
uint32_t image_count; hash_table_foreach(cache->cache, entry)
anv_shader_bin_unref(cache->device, entry->data);
char prog_data[0]; _mesa_hash_table_destroy(cache->cache, NULL);
}
/* kernel follows prog_data at next 64 byte aligned address */
};
static uint32_t
entry_size(struct cache_entry *entry)
{
/* This returns the number of bytes needed to serialize an entry, which
* doesn't include the alignment padding bytes.
*/
struct brw_stage_prog_data *prog_data = (void *)entry->prog_data;
const uint32_t param_size =
prog_data->nr_params * sizeof(*prog_data->param);
const uint32_t map_size =
entry->surface_count * sizeof(struct anv_pipeline_binding) +
entry->sampler_count * sizeof(struct anv_pipeline_binding);
return sizeof(*entry) + entry->prog_data_size + param_size + map_size;
} }
void void
@@ -236,221 +225,94 @@ anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
_mesa_sha1_final(ctx, hash); _mesa_sha1_final(ctx, hash);
} }
static uint32_t static struct anv_shader_bin *
anv_pipeline_cache_search_unlocked(struct anv_pipeline_cache *cache, anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
const unsigned char *sha1, const void *key_data, uint32_t key_size)
const struct brw_stage_prog_data **prog_data,
struct anv_pipeline_bind_map *map)
{ {
const uint32_t mask = cache->table_size - 1; uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
const uint32_t start = (*(uint32_t *) sha1); struct shader_bin_key *key = (void *)vla;
key->size = key_size;
memcpy(key->data, key_data, key_size);
for (uint32_t i = 0; i < cache->table_size; i++) { struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
const uint32_t index = (start + i) & mask; if (entry)
const uint32_t offset = cache->hash_table[index]; return entry->data;
else
if (offset == ~0) return NULL;
return NO_KERNEL;
struct cache_entry *entry =
cache->program_stream.block_pool->map + offset;
if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
if (prog_data) {
assert(map);
void *p = entry->prog_data;
*prog_data = p;
p += entry->prog_data_size;
p += (*prog_data)->nr_params * sizeof(*(*prog_data)->param);
map->surface_count = entry->surface_count;
map->sampler_count = entry->sampler_count;
map->image_count = entry->image_count;
map->surface_to_descriptor = p;
p += map->surface_count * sizeof(struct anv_pipeline_binding);
map->sampler_to_descriptor = p;
}
return offset + align_u32(entry_size(entry), 64);
}
}
/* This can happen if the pipeline cache is disabled via
* ANV_ENABLE_PIPELINE_CACHE=false
*/
return NO_KERNEL;
} }
uint32_t struct anv_shader_bin *
anv_pipeline_cache_search(struct anv_pipeline_cache *cache, anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
const unsigned char *sha1, const void *key_data, uint32_t key_size)
const struct brw_stage_prog_data **prog_data,
struct anv_pipeline_bind_map *map)
{ {
uint32_t kernel; if (!cache->cache)
return NULL;
pthread_mutex_lock(&cache->mutex); pthread_mutex_lock(&cache->mutex);
kernel = anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map); struct anv_shader_bin *shader =
anv_pipeline_cache_search_locked(cache, key_data, key_size);
pthread_mutex_unlock(&cache->mutex); pthread_mutex_unlock(&cache->mutex);
return kernel; /* We increment refcount before handing it to the caller */
if (shader)
anv_shader_bin_ref(shader);
return shader;
} }
static void static struct anv_shader_bin *
anv_pipeline_cache_set_entry(struct anv_pipeline_cache *cache, anv_pipeline_cache_add_shader(struct anv_pipeline_cache *cache,
struct cache_entry *entry, uint32_t entry_offset) const void *key_data, uint32_t key_size,
const void *kernel_data, uint32_t kernel_size,
const void *prog_data, uint32_t prog_data_size,
const struct anv_pipeline_bind_map *bind_map)
{ {
const uint32_t mask = cache->table_size - 1; struct anv_shader_bin *shader =
const uint32_t start = (*(uint32_t *) entry->sha1); anv_pipeline_cache_search_locked(cache, key_data, key_size);
if (shader)
return shader;
/* We'll always be able to insert when we get here. */ struct anv_shader_bin *bin =
assert(cache->kernel_count < cache->table_size / 2); anv_shader_bin_create(cache->device, key_data, key_size,
kernel_data, kernel_size,
prog_data, prog_data_size, bind_map);
if (!bin)
return NULL;
for (uint32_t i = 0; i < cache->table_size; i++) { _mesa_hash_table_insert(cache->cache, anv_shader_bin_get_key(bin), bin);
const uint32_t index = (start + i) & mask;
if (cache->hash_table[index] == ~0) {
cache->hash_table[index] = entry_offset;
break;
}
}
cache->total_size += entry_size(entry) + entry->kernel_size; return bin;
cache->kernel_count++;
} }
static VkResult struct anv_shader_bin *
anv_pipeline_cache_grow(struct anv_pipeline_cache *cache)
{
const uint32_t table_size = cache->table_size * 2;
const uint32_t old_table_size = cache->table_size;
const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
uint32_t *table;
uint32_t *old_table = cache->hash_table;
table = malloc(byte_size);
if (table == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY;
cache->hash_table = table;
cache->table_size = table_size;
cache->kernel_count = 0;
cache->total_size = 0;
memset(cache->hash_table, 0xff, byte_size);
for (uint32_t i = 0; i < old_table_size; i++) {
const uint32_t offset = old_table[i];
if (offset == ~0)
continue;
struct cache_entry *entry =
cache->program_stream.block_pool->map + offset;
anv_pipeline_cache_set_entry(cache, entry, offset);
}
free(old_table);
return VK_SUCCESS;
}
static void
anv_pipeline_cache_add_entry(struct anv_pipeline_cache *cache,
struct cache_entry *entry, uint32_t entry_offset)
{
if (cache->kernel_count == cache->table_size / 2)
anv_pipeline_cache_grow(cache);
/* Failing to grow that hash table isn't fatal, but may mean we don't
* have enough space to add this new kernel. Only add it if there's room.
*/
if (cache->kernel_count < cache->table_size / 2)
anv_pipeline_cache_set_entry(cache, entry, entry_offset);
}
uint32_t
anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache, anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
const unsigned char *sha1, const void *key_data, uint32_t key_size,
const void *kernel, size_t kernel_size, const void *kernel_data, uint32_t kernel_size,
const struct brw_stage_prog_data **prog_data, const void *prog_data, uint32_t prog_data_size,
size_t prog_data_size, const struct anv_pipeline_bind_map *bind_map)
struct anv_pipeline_bind_map *map)
{ {
pthread_mutex_lock(&cache->mutex); if (cache->cache) {
pthread_mutex_lock(&cache->mutex);
/* Before uploading, check again that another thread didn't upload this struct anv_shader_bin *bin =
* shader while we were compiling it. anv_pipeline_cache_add_shader(cache, key_data, key_size,
*/ kernel_data, kernel_size,
if (sha1) { prog_data, prog_data_size, bind_map);
uint32_t cached_kernel =
anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map); pthread_mutex_unlock(&cache->mutex);
if (cached_kernel != NO_KERNEL) {
pthread_mutex_unlock(&cache->mutex); /* We increment refcount before handing it to the caller */
return cached_kernel; anv_shader_bin_ref(bin);
}
return bin;
} else {
/* In this case, we're not caching it so the caller owns it entirely */
return anv_shader_bin_create(cache->device, key_data, key_size,
kernel_data, kernel_size,
prog_data, prog_data_size, bind_map);
} }
struct cache_entry *entry;
assert((*prog_data)->nr_pull_params == 0);
assert((*prog_data)->nr_image_params == 0);
const uint32_t param_size =
(*prog_data)->nr_params * sizeof(*(*prog_data)->param);
const uint32_t map_size =
map->surface_count * sizeof(struct anv_pipeline_binding) +
map->sampler_count * sizeof(struct anv_pipeline_binding);
const uint32_t preamble_size =
align_u32(sizeof(*entry) + prog_data_size + param_size + map_size, 64);
const uint32_t size = preamble_size + kernel_size;
assert(size < cache->program_stream.block_pool->block_size);
const struct anv_state state =
anv_state_stream_alloc(&cache->program_stream, size, 64);
entry = state.map;
entry->prog_data_size = prog_data_size;
entry->surface_count = map->surface_count;
entry->sampler_count = map->sampler_count;
entry->image_count = map->image_count;
entry->kernel_size = kernel_size;
void *p = entry->prog_data;
memcpy(p, *prog_data, prog_data_size);
p += prog_data_size;
memcpy(p, (*prog_data)->param, param_size);
((struct brw_stage_prog_data *)entry->prog_data)->param = p;
p += param_size;
memcpy(p, map->surface_to_descriptor,
map->surface_count * sizeof(struct anv_pipeline_binding));
map->surface_to_descriptor = p;
p += map->surface_count * sizeof(struct anv_pipeline_binding);
memcpy(p, map->sampler_to_descriptor,
map->sampler_count * sizeof(struct anv_pipeline_binding));
map->sampler_to_descriptor = p;
if (sha1) {
assert(anv_pipeline_cache_search_unlocked(cache, sha1,
NULL, NULL) == NO_KERNEL);
memcpy(entry->sha1, sha1, sizeof(entry->sha1));
anv_pipeline_cache_add_entry(cache, entry, state.offset);
}
pthread_mutex_unlock(&cache->mutex);
memcpy(state.map + preamble_size, kernel, kernel_size);
if (!cache->device->info.has_llc)
anv_state_clflush(state);
*prog_data = (const struct brw_stage_prog_data *) entry->prog_data;
return state.offset + preamble_size;
} }
struct cache_header { struct cache_header {
@@ -469,6 +331,9 @@ anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
struct cache_header header; struct cache_header header;
uint8_t uuid[VK_UUID_SIZE]; uint8_t uuid[VK_UUID_SIZE];
if (cache->cache == NULL)
return;
if (size < sizeof(header)) if (size < sizeof(header))
return; return;
memcpy(&header, data, sizeof(header)); memcpy(&header, data, sizeof(header));
@@ -484,48 +349,62 @@ anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0) if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0)
return; return;
void *end = (void *) data + size; const void *end = data + size;
void *p = (void *) data + header.header_size; const void *p = data + header.header_size;
while (p < end) { /* Count is the total number of valid entries */
struct cache_entry *entry = p; uint32_t count;
if (p + sizeof(count) >= end)
return;
memcpy(&count, p, sizeof(count));
p += align_u32(sizeof(count), 8);
void *data = entry->prog_data; for (uint32_t i = 0; i < count; i++) {
struct anv_shader_bin bin;
if (p + sizeof(bin) > end)
break;
memcpy(&bin, p, sizeof(bin));
p += align_u32(sizeof(struct anv_shader_bin), 8);
/* Make a copy of prog_data so that it's mutable */ const void *prog_data = p;
uint8_t prog_data_tmp[512]; p += align_u32(bin.prog_data_size, 8);
assert(entry->prog_data_size <= sizeof(prog_data_tmp));
memcpy(prog_data_tmp, data, entry->prog_data_size);
struct brw_stage_prog_data *prog_data = (void *)prog_data_tmp;
data += entry->prog_data_size;
prog_data->param = data; struct shader_bin_key key;
data += prog_data->nr_params * sizeof(*prog_data->param); if (p + sizeof(key) > end)
break;
memcpy(&key, p, sizeof(key));
const void *key_data = p + sizeof(key);
p += align_u32(sizeof(key) + key.size, 8);
struct anv_pipeline_binding *surface_to_descriptor = data; /* We're going to memcpy this so getting rid of const is fine */
data += entry->surface_count * sizeof(struct anv_pipeline_binding); struct anv_pipeline_binding *bindings = (void *)p;
struct anv_pipeline_binding *sampler_to_descriptor = data; p += align_u32((bin.bind_map.surface_count + bin.bind_map.sampler_count) *
data += entry->sampler_count * sizeof(struct anv_pipeline_binding); sizeof(struct anv_pipeline_binding), 8);
void *kernel = data; bin.bind_map.surface_to_descriptor = bindings;
bin.bind_map.sampler_to_descriptor = bindings + bin.bind_map.surface_count;
struct anv_pipeline_bind_map map = { const void *kernel_data = p;
.surface_count = entry->surface_count, p += align_u32(bin.kernel_size, 8);
.sampler_count = entry->sampler_count,
.image_count = entry->image_count,
.surface_to_descriptor = surface_to_descriptor,
.sampler_to_descriptor = sampler_to_descriptor
};
const struct brw_stage_prog_data *const_prog_data = prog_data; if (p > end)
break;
anv_pipeline_cache_upload_kernel(cache, entry->sha1, anv_pipeline_cache_add_shader(cache, key_data, key.size,
kernel, entry->kernel_size, kernel_data, bin.kernel_size,
&const_prog_data, prog_data, bin.prog_data_size,
entry->prog_data_size, &map); &bin.bind_map);
p = kernel + entry->kernel_size;
} }
} }
static bool
pipeline_cache_enabled()
{
static int enabled = -1;
if (enabled < 0)
enabled = env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
return enabled;
}
VkResult anv_CreatePipelineCache( VkResult anv_CreatePipelineCache(
VkDevice _device, VkDevice _device,
const VkPipelineCacheCreateInfo* pCreateInfo, const VkPipelineCacheCreateInfo* pCreateInfo,
@@ -544,7 +423,7 @@ VkResult anv_CreatePipelineCache(
if (cache == NULL) if (cache == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
anv_pipeline_cache_init(cache, device); anv_pipeline_cache_init(cache, device, pipeline_cache_enabled());
if (pCreateInfo->initialDataSize > 0) if (pCreateInfo->initialDataSize > 0)
anv_pipeline_cache_load(cache, anv_pipeline_cache_load(cache,
@@ -579,9 +458,16 @@ VkResult anv_GetPipelineCacheData(
ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache); ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
struct cache_header *header; struct cache_header *header;
const size_t size = sizeof(*header) + cache->total_size;
if (pData == NULL) { if (pData == NULL) {
size_t size = align_u32(sizeof(*header), 8) +
align_u32(sizeof(uint32_t), 8);
if (cache->cache) {
struct hash_entry *entry;
hash_table_foreach(cache->cache, entry)
size += anv_shader_bin_data_size(entry->data);
}
*pDataSize = size; *pDataSize = size;
return VK_SUCCESS; return VK_SUCCESS;
} }
@@ -598,25 +484,25 @@ VkResult anv_GetPipelineCacheData(
header->vendor_id = 0x8086; header->vendor_id = 0x8086;
header->device_id = device->chipset_id; header->device_id = device->chipset_id;
anv_device_get_cache_uuid(header->uuid); anv_device_get_cache_uuid(header->uuid);
p += header->header_size; p += align_u32(header->header_size, 8);
struct cache_entry *entry; uint32_t *count = p;
for (uint32_t i = 0; i < cache->table_size; i++) { p += align_u32(sizeof(*count), 8);
if (cache->hash_table[i] == ~0) *count = 0;
continue;
entry = cache->program_stream.block_pool->map + cache->hash_table[i]; if (cache->cache) {
const uint32_t size = entry_size(entry); struct hash_entry *entry;
if (end < p + size + entry->kernel_size) hash_table_foreach(cache->cache, entry) {
break; struct anv_shader_bin *shader = entry->data;
size_t data_size = anv_shader_bin_data_size(entry->data);
if (p + data_size > end)
break;
memcpy(p, entry, size); anv_shader_bin_write_data(shader, p);
p += size; p += data_size;
void *kernel = (void *) entry + align_u32(size, 64); (*count)++;
}
memcpy(p, kernel, entry->kernel_size);
p += entry->kernel_size;
} }
*pDataSize = p - pData; *pDataSize = p - pData;
@@ -624,25 +510,6 @@ VkResult anv_GetPipelineCacheData(
return VK_SUCCESS; return VK_SUCCESS;
} }
static void
anv_pipeline_cache_merge(struct anv_pipeline_cache *dst,
struct anv_pipeline_cache *src)
{
for (uint32_t i = 0; i < src->table_size; i++) {
const uint32_t offset = src->hash_table[i];
if (offset == ~0)
continue;
struct cache_entry *entry =
src->program_stream.block_pool->map + offset;
if (anv_pipeline_cache_search(dst, entry->sha1, NULL, NULL) != NO_KERNEL)
continue;
anv_pipeline_cache_add_entry(dst, entry, offset);
}
}
VkResult anv_MergePipelineCaches( VkResult anv_MergePipelineCaches(
VkDevice _device, VkDevice _device,
VkPipelineCache destCache, VkPipelineCache destCache,
@@ -651,10 +518,23 @@ VkResult anv_MergePipelineCaches(
{ {
ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache); ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
if (!dst->cache)
return VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) { for (uint32_t i = 0; i < srcCacheCount; i++) {
ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]); ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
if (!src->cache)
continue;
anv_pipeline_cache_merge(dst, src); struct hash_entry *entry;
hash_table_foreach(src->cache, entry) {
struct anv_shader_bin *bin = entry->data;
if (_mesa_hash_table_search(dst->cache, anv_shader_bin_get_key(bin)))
continue;
anv_shader_bin_ref(bin);
_mesa_hash_table_insert(dst->cache, anv_shader_bin_get_key(bin), bin);
}
} }
return VK_SUCCESS; return VK_SUCCESS;

View File

@@ -400,7 +400,7 @@ struct anv_fixed_size_state_pool {
}; };
#define ANV_MIN_STATE_SIZE_LOG2 6 #define ANV_MIN_STATE_SIZE_LOG2 6
#define ANV_MAX_STATE_SIZE_LOG2 10 #define ANV_MAX_STATE_SIZE_LOG2 17
#define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1) #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
@@ -658,31 +658,27 @@ struct anv_queue {
struct anv_pipeline_cache { struct anv_pipeline_cache {
struct anv_device * device; struct anv_device * device;
struct anv_state_stream program_stream;
pthread_mutex_t mutex; pthread_mutex_t mutex;
uint32_t total_size; struct hash_table * cache;
uint32_t table_size;
uint32_t kernel_count;
uint32_t * hash_table;
}; };
struct anv_pipeline_bind_map; struct anv_pipeline_bind_map;
void anv_pipeline_cache_init(struct anv_pipeline_cache *cache, void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
struct anv_device *device); struct anv_device *device,
bool cache_enabled);
void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache); void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
uint32_t anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
const unsigned char *sha1, struct anv_shader_bin *
const struct brw_stage_prog_data **prog_data, anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
struct anv_pipeline_bind_map *map); const void *key, uint32_t key_size);
uint32_t anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache, struct anv_shader_bin *
const unsigned char *sha1, anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
const void *kernel, const void *key_data, uint32_t key_size,
size_t kernel_size, const void *kernel_data, uint32_t kernel_size,
const struct brw_stage_prog_data **prog_data, const void *prog_data, uint32_t prog_data_size,
size_t prog_data_size, const struct anv_pipeline_bind_map *bind_map);
struct anv_pipeline_bind_map *map);
struct anv_device { struct anv_device {
VK_LOADER_DATA _loader_data; VK_LOADER_DATA _loader_data;
@@ -705,7 +701,6 @@ struct anv_device {
struct anv_block_pool instruction_block_pool; struct anv_block_pool instruction_block_pool;
struct anv_state_pool instruction_state_pool; struct anv_state_pool instruction_state_pool;
struct anv_pipeline_cache default_pipeline_cache;
struct anv_block_pool surface_state_block_pool; struct anv_block_pool surface_state_block_pool;
struct anv_state_pool surface_state_pool; struct anv_state_pool surface_state_pool;
@@ -1519,12 +1514,12 @@ struct anv_pipeline {
struct anv_dynamic_state dynamic_state; struct anv_dynamic_state dynamic_state;
struct anv_pipeline_layout * layout; struct anv_pipeline_layout * layout;
struct anv_pipeline_bind_map bindings[MESA_SHADER_STAGES];
bool use_repclear; bool use_repclear;
bool needs_data_cache; bool needs_data_cache;
const struct brw_stage_prog_data * prog_data[MESA_SHADER_STAGES]; struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
struct { struct {
uint32_t start[MESA_SHADER_GEOMETRY + 1]; uint32_t start[MESA_SHADER_GEOMETRY + 1];
uint32_t size[MESA_SHADER_GEOMETRY + 1]; uint32_t size[MESA_SHADER_GEOMETRY + 1];
@@ -1574,29 +1569,22 @@ anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0; return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
} }
static inline const struct brw_vs_prog_data * #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
get_vs_prog_data(struct anv_pipeline *pipeline) static inline const struct brw_##prefix##_prog_data * \
{ get_##prefix##_prog_data(struct anv_pipeline *pipeline) \
return (const struct brw_vs_prog_data *) pipeline->prog_data[MESA_SHADER_VERTEX]; { \
if (anv_pipeline_has_stage(pipeline, stage)) { \
return (const struct brw_##prefix##_prog_data *) \
anv_shader_bin_get_prog_data(pipeline->shaders[stage]); \
} else { \
return NULL; \
} \
} }
static inline const struct brw_gs_prog_data * ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
get_gs_prog_data(struct anv_pipeline *pipeline) ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
{ ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
return (const struct brw_gs_prog_data *) pipeline->prog_data[MESA_SHADER_GEOMETRY]; ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
}
static inline const struct brw_wm_prog_data *
get_wm_prog_data(struct anv_pipeline *pipeline)
{
return (const struct brw_wm_prog_data *) pipeline->prog_data[MESA_SHADER_FRAGMENT];
}
static inline const struct brw_cs_prog_data *
get_cs_prog_data(struct anv_pipeline *pipeline)
{
return (const struct brw_cs_prog_data *) pipeline->prog_data[MESA_SHADER_COMPUTE];
}
struct anv_graphics_pipeline_create_info { struct anv_graphics_pipeline_create_info {
/** /**

View File

@@ -318,7 +318,8 @@ get_pipeline_state_l3_weights(const struct anv_pipeline *pipeline)
if (!anv_pipeline_has_stage(pipeline, i)) if (!anv_pipeline_has_stage(pipeline, i))
continue; continue;
const struct brw_stage_prog_data *prog_data = pipeline->prog_data[i]; const struct brw_stage_prog_data *prog_data =
anv_shader_bin_get_prog_data(pipeline->shaders[i]);
needs_dc |= pipeline->needs_data_cache; needs_dc |= pipeline->needs_data_cache;
needs_slm |= prog_data->total_shared; needs_slm |= prog_data->total_shared;

View File

@@ -63,8 +63,7 @@ genX(compute_pipeline_create)(
/* When we free the pipeline, we detect stages based on the NULL status /* When we free the pipeline, we detect stages based on the NULL status
* of various prog_data pointers. Make them NULL by default. * of various prog_data pointers. Make them NULL by default.
*/ */
memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data)); memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
memset(pipeline->bindings, 0, sizeof(pipeline->bindings));
pipeline->vs_simd8 = NO_KERNEL; pipeline->vs_simd8 = NO_KERNEL;
pipeline->vs_vec4 = NO_KERNEL; pipeline->vs_vec4 = NO_KERNEL;

View File

@@ -671,7 +671,7 @@ emit_cb_state(struct anv_pipeline *pipeline,
uint32_t surface_count = 0; uint32_t surface_count = 0;
struct anv_pipeline_bind_map *map; struct anv_pipeline_bind_map *map;
if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) { if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
map = &pipeline->bindings[MESA_SHADER_FRAGMENT]; map = &pipeline->shaders[MESA_SHADER_FRAGMENT]->bind_map;
surface_count = map->surface_count; surface_count = map->surface_count;
} }