anv: More carefully dirty state in BindDescriptorSets

Instead of dirtying all graphics or all compute based on binding point,
we're now much more careful.  We first check to see if the actual
descriptor set changed and then only dirty the stages used by that
descriptor set.  For dynamic offsets, we keep a bitfield per-stage of
which offsets are actually used in that stage and we only dirty push
constants and descriptors if that stage has dynamic offsets AND those
offsets actually change.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
This commit is contained in:
Jason Ekstrand
2019-11-07 11:44:08 -06:00
parent ca8117b5d5
commit 22f16ff54a
4 changed files with 51 additions and 22 deletions

View File

@@ -572,53 +572,65 @@ anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
struct anv_descriptor_set_layout *set_layout =
layout->set[set_index].layout;
struct anv_cmd_pipeline_state *pipe_state;
VkShaderStageFlags stages = set_layout->shader_stages &
(bind_point == VK_PIPELINE_BIND_POINT_COMPUTE ?
VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_ALL_GRAPHICS);
VkShaderStageFlags dirty_stages = 0;
if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
pipe_state = &cmd_buffer->state.compute.base;
if (cmd_buffer->state.compute.base.descriptors[set_index] != set) {
cmd_buffer->state.compute.base.descriptors[set_index] = set;
dirty_stages |= stages;
}
} else {
assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
pipe_state = &cmd_buffer->state.gfx.base;
if (cmd_buffer->state.gfx.base.descriptors[set_index] != set) {
cmd_buffer->state.gfx.base.descriptors[set_index] = set;
dirty_stages |= stages;
}
pipe_state->descriptors[set_index] = set;
}
/* If it's a push descriptor set, we have to flag things as dirty
* regardless of whether or not the CPU-side data structure changed as we
* may have edited in-place.
*/
if (set->pool == NULL)
dirty_stages |= stages;
if (dynamic_offsets) {
if (set_layout->dynamic_offset_count > 0) {
uint32_t dynamic_offset_start =
layout->set[set_index].dynamic_offset_start;
anv_foreach_stage(stage, set_layout->shader_stages) {
anv_foreach_stage(stage, stages) {
struct anv_push_constants *push =
&cmd_buffer->state.push_constants[stage];
uint32_t *push_offsets =
&push->dynamic_offsets[dynamic_offset_start];
/* Assert that everything is in range */
assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
ARRAY_SIZE(push->dynamic_offsets));
typed_memcpy(&push->dynamic_offsets[dynamic_offset_start],
*dynamic_offsets, set_layout->dynamic_offset_count);
unsigned mask = set_layout->stage_dynamic_offsets[stage];
STATIC_ASSERT(MAX_DYNAMIC_BUFFERS <= sizeof(mask) * 8);
while (mask) {
int i = u_bit_scan(&mask);
if (push_offsets[i] != (*dynamic_offsets)[i]) {
push_offsets[i] = (*dynamic_offsets)[i];
dirty_stages |= mesa_to_vk_shader_stage(stage);
}
}
}
*dynamic_offsets += set_layout->dynamic_offset_count;
*dynamic_offset_count -= set_layout->dynamic_offset_count;
if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
cmd_buffer->state.push_constants_dirty |=
VK_SHADER_STAGE_COMPUTE_BIT;
} else {
cmd_buffer->state.push_constants_dirty |=
VK_SHADER_STAGE_ALL_GRAPHICS;
}
}
}
if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
} else {
assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
cmd_buffer->state.descriptors_dirty |=
set_layout->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
}
cmd_buffer->state.descriptors_dirty |= dirty_stages;
cmd_buffer->state.push_constants_dirty |= dirty_stages;
}
void anv_CmdBindDescriptorSets(

View File

@@ -469,7 +469,15 @@ VkResult anv_CreateDescriptorSetLayout(
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
anv_foreach_stage(s, binding->stageFlags) {
STATIC_ASSERT(MAX_DYNAMIC_BUFFERS <=
sizeof(set_layout->stage_dynamic_offsets[s]) * 8);
set_layout->stage_dynamic_offsets[s] |=
BITFIELD_RANGE(set_layout->binding[b].dynamic_offset_index,
binding->descriptorCount);
}
dynamic_offset_count += binding->descriptorCount;
assert(dynamic_offset_count < MAX_DYNAMIC_BUFFERS);
break;
default:
@@ -603,6 +611,7 @@ VkResult anv_CreatePipelineLayout(
dynamic_offset_count += set_layout->binding[b].array_size;
}
}
assert(dynamic_offset_count < MAX_DYNAMIC_BUFFERS);
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);

View File

@@ -1803,6 +1803,9 @@ struct anv_descriptor_set_layout {
/* Number of dynamic offsets used by this descriptor set */
uint16_t dynamic_offset_count;
/* For each shader stage, which offsets apply to that stage */
uint16_t stage_dynamic_offsets[MESA_SHADER_STAGES];
/* Size of the descriptor buffer for this descriptor set */
uint32_t descriptor_buffer_size;

View File

@@ -3544,6 +3544,11 @@ genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
/* The workgroup size of the pipeline affects our push constant layout
* so flag push constants as dirty if we change the pipeline.
*/
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
}
if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||