tu: Fix context faults loading unused descriptor sets

The app is allowed to never bind descriptor sets that are statically
unused by the pipeline, which would've caused a context fault since
CP_LOAD_STATE6 would try to load the descriptors that don't exist. Fix
this by not preloading descriptors from unused descriptor sets. We could
do more fine-grained accounting of which descriptors are used, but this
is enough to fix the problem.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5400>
This commit is contained in:
Connor Abbott
2020-06-09 14:40:58 +02:00
committed by Marge Bot
parent a751051248
commit 334204823e
3 changed files with 28 additions and 0 deletions

View File

@@ -185,6 +185,25 @@ tu6_emit_load_state(struct tu_pipeline *pipeline, bool compute)
struct tu_pipeline_layout *layout = pipeline->layout;
for (unsigned i = 0; i < layout->num_sets; i++) {
/* From 13.2.7. Descriptor Set Binding:
*
* A compatible descriptor set must be bound for all set numbers that
* any shaders in a pipeline access, at the time that a draw or
* dispatch command is recorded to execute using that pipeline.
* However, if none of the shaders in a pipeline statically use any
* bindings with a particular set number, then no descriptor set need
* be bound for that set number, even if the pipeline layout includes
* a non-trivial descriptor set layout for that set number.
*
* This means that descriptor sets unused by the pipeline may have a
* garbage or 0 BINDLESS_BASE register, which will cause context faults
* when prefetching descriptors from these sets. Skip prefetching for
* descriptors from them to avoid this. This is also an optimization,
* since these prefetches would be useless.
*/
if (!(pipeline->active_desc_sets & (1u << i)))
continue;
struct tu_descriptor_set_layout *set_layout = layout->set[i].layout;
for (unsigned j = 0; j < set_layout->binding_count; j++) {
struct tu_descriptor_set_binding_layout *binding = &set_layout->binding[j];
@@ -2273,6 +2292,7 @@ tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,
}
pipeline->active_stages = stages;
uint32_t desc_sets = 0;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (!builder->shaders[i])
continue;
@@ -2280,7 +2300,9 @@ tu_pipeline_builder_parse_shader_stages(struct tu_pipeline_builder *builder,
tu_pipeline_set_linkage(&pipeline->program.link[i],
builder->shaders[i],
&builder->shaders[i]->variants[0]);
desc_sets |= builder->shaders[i]->active_desc_sets;
}
pipeline->active_desc_sets = desc_sets;
if (builder->shaders[MESA_SHADER_FRAGMENT]) {
memcpy(pipeline->program.input_attachment_idx,

View File

@@ -1289,6 +1289,7 @@ struct tu_shader
struct tu_push_constant_range push_consts;
unsigned attachment_idx[MAX_RTS];
uint8_t active_desc_sets;
/* This may be true for vertex shaders. When true, variants[1] is the
* binning variant and binning_binary is non-NULL.
@@ -1345,6 +1346,7 @@ struct tu_pipeline
bool need_indirect_descriptor_sets;
VkShaderStageFlags active_stages;
uint32_t active_desc_sets;
struct tu_streamout_state streamout;

View File

@@ -135,6 +135,8 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *instr,
&set_layout->binding[binding];
uint32_t base;
shader->active_desc_sets |= 1u << set;
switch (binding_layout->type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
@@ -175,6 +177,8 @@ build_bindless(nir_builder *b, nir_deref_instr *deref, bool is_sampler,
const struct tu_descriptor_set_binding_layout *bind_layout =
&layout->set[set].layout->binding[binding];
shader->active_desc_sets |= 1u << set;
nir_ssa_def *desc_offset;
unsigned descriptor_stride;
if (bind_layout->type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT) {