anv: Get ready for more pipeline stages

This makes a bunch of loops use ARRAY_SIZE instead of MESA_SHADER_STAGES,
extends a few arrays, and adds a bunch of array length asserts.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8637>
This commit is contained in:
Jason Ekstrand
2021-01-21 16:58:50 -06:00
committed by Marge Bot
parent f366f6a071
commit dc05daf0e5
7 changed files with 23 additions and 16 deletions

View File

@@ -1414,7 +1414,7 @@ anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool)
void
anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool)
{
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pool->bos[0]); s++) {
for (unsigned i = 0; i < 16; i++) {
if (pool->bos[i][s] != NULL)
anv_device_release_bo(device, pool->bos[i][s]);
@@ -1432,6 +1432,7 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
assert(scratch_size_log2 < 16);
assert(stage < ARRAY_SIZE(pool->bos));
struct anv_bo *bo = p_atomic_read(&pool->bos[scratch_size_log2][stage]);
if (bo != NULL)

View File

@@ -449,14 +449,17 @@ set_dirty_for_bind_map(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage,
const struct anv_pipeline_bind_map *map)
{
assert(stage < ARRAY_SIZE(cmd_buffer->state.surface_sha1s));
if (mem_update(cmd_buffer->state.surface_sha1s[stage],
map->surface_sha1, sizeof(map->surface_sha1)))
cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
assert(stage < ARRAY_SIZE(cmd_buffer->state.sampler_sha1s));
if (mem_update(cmd_buffer->state.sampler_sha1s[stage],
map->sampler_sha1, sizeof(map->sampler_sha1)))
cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
assert(stage < ARRAY_SIZE(cmd_buffer->state.push_sha1s));
if (mem_update(cmd_buffer->state.push_sha1s[stage],
map->push_sha1, sizeof(map->push_sha1)))
cmd_buffer->state.push_constants_dirty |= mesa_to_vk_shader_stage(stage);

View File

@@ -253,7 +253,7 @@ void anv_GetDescriptorSetLayoutSupport(
ANV_FROM_HANDLE(anv_device, device, _device);
const struct anv_physical_device *pdevice = device->physical;
uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
uint32_t surface_count[MESA_VULKAN_SHADER_STAGES] = { 0, };
VkDescriptorType varying_desc_type = VK_DESCRIPTOR_TYPE_MAX_ENUM;
bool needs_descriptor_buffer = false;
@@ -315,7 +315,7 @@ void anv_GetDescriptorSetLayoutSupport(
}
}
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(surface_count); s++) {
if (needs_descriptor_buffer)
surface_count[s] += 1;
}
@@ -334,7 +334,7 @@ void anv_GetDescriptorSetLayoutSupport(
}
bool supported = true;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(surface_count); s++) {
/* Our maximum binding table size is 240 and we need to reserve 8 for
* render targets.
*/

View File

@@ -1980,7 +1980,7 @@ anv_get_physical_device_properties_1_1(struct anv_physical_device *pdevice,
p->subgroupSize = BRW_SUBGROUP_SIZE;
VkShaderStageFlags scalar_stages = 0;
for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
for (unsigned stage = 0; stage < MESA_VULKAN_SHADER_STAGES; stage++) {
if (pdevice->compiler->scalar_stage[stage])
scalar_stages |= mesa_to_vk_shader_stage(stage);
}

View File

@@ -331,7 +331,7 @@ void anv_DestroyPipeline(
if (gfx_pipeline->cps_state.map)
anv_state_pool_free(&device->dynamic_state_pool, gfx_pipeline->cps_state);
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(gfx_pipeline->shaders); s++) {
if (gfx_pipeline->shaders[s])
anv_shader_bin_unref(device, gfx_pipeline->shaders[s]);
}
@@ -666,7 +666,7 @@ anv_pipeline_hash_graphics(struct anv_graphics_pipeline *pipeline,
const bool rba = pipeline->base.device->robust_buffer_access;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (stages[s].entrypoint) {
_mesa_sha1_update(&ctx, stages[s].shader_sha1,
sizeof(stages[s].shader_sha1));
@@ -1419,7 +1419,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
unsigned char sha1[20];
anv_pipeline_hash_graphics(pipeline, layout, stages, sha1);
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (!stages[s].entrypoint)
continue;
@@ -1433,7 +1433,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
if (!skip_cache_lookup) {
unsigned found = 0;
unsigned cache_hits = 0;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (!stages[s].entrypoint)
continue;
@@ -1463,7 +1463,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
}
/* We found all our shaders in the cache. We're done. */
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (!stages[s].entrypoint)
continue;
@@ -1491,7 +1491,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
* references to the shaders in the cache. We'll get them out of the
* cache again as part of the compilation process.
*/
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
stages[s].feedback.flags = 0;
if (pipeline->shaders[s]) {
anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
@@ -1506,7 +1506,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
void *pipeline_ctx = ralloc_context(NULL);
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (!stages[s].entrypoint)
continue;
@@ -1551,7 +1551,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
/* Walk backwards to link */
struct anv_pipeline_stage *next_stage = NULL;
for (int s = MESA_SHADER_STAGES - 1; s >= 0; s--) {
for (int s = ARRAY_SIZE(pipeline->shaders) - 1; s >= 0; s--) {
if (!stages[s].entrypoint)
continue;
@@ -1596,7 +1596,7 @@ anv_pipeline_compile_graphics(struct anv_graphics_pipeline *pipeline,
}
struct anv_pipeline_stage *prev_stage = NULL;
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (!stages[s].entrypoint)
continue;
@@ -1730,7 +1730,7 @@ done:
fail:
ralloc_free(pipeline_ctx);
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
for (unsigned s = 0; s < ARRAY_SIZE(pipeline->shaders); s++) {
if (pipeline->shaders[s])
anv_shader_bin_unref(pipeline->base.device, pipeline->shaders[s]);
}

View File

@@ -3347,7 +3347,7 @@ struct anv_semaphore {
void anv_semaphore_reset_temporary(struct anv_device *device,
struct anv_semaphore *semaphore);
#define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
#define ANV_STAGE_MASK ((1 << MESA_VULKAN_SHADER_STAGES) - 1)
#define anv_foreach_stage(stage, stage_bits) \
for (gl_shader_stage stage, \

View File

@@ -2946,10 +2946,13 @@ flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer,
if ((vk_stage & dirty) == 0)
continue;
assert(stage < ARRAY_SIZE(cmd_buffer->state.samplers));
result = emit_samplers(cmd_buffer, pipe_state, shaders[i],
&cmd_buffer->state.samplers[stage]);
if (result != VK_SUCCESS)
break;
assert(stage < ARRAY_SIZE(cmd_buffer->state.binding_tables));
result = emit_binding_table(cmd_buffer, pipe_state, shaders[i],
&cmd_buffer->state.binding_tables[stage]);
if (result != VK_SUCCESS)