anv: add analysis for push descriptor uses and store it in shader cache

We'll use this information to avoid :
   - binding table emission
   - allocation of surface states

v2: Fix anv_nir_push_desc_ubo_fully_promoted()

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Emma Anholt <emma@anholt.net>
Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19050>
This commit is contained in:
Lionel Landwerlin
2022-10-12 02:00:41 +03:00
committed by Marge Bot
parent 01e282f23f
commit ff91c5ca42
9 changed files with 389 additions and 18 deletions

View File

@@ -63,12 +63,14 @@ upload_blorp_shader(struct blorp_batch *batch, uint32_t stage,
.surface_count = 0,
.sampler_count = 0,
};
struct anv_push_descriptor_info push_desc_info = {};
struct anv_shader_bin *bin =
anv_device_upload_kernel(device, device->internal_cache, stage,
key, key_size, kernel, kernel_size,
prog_data, prog_data_size,
NULL, 0, NULL, &bind_map);
NULL, 0, NULL, &bind_map,
&push_desc_info);
if (!bin)
return false;

View File

@@ -82,6 +82,17 @@ void anv_nir_validate_push_layout(struct brw_stage_prog_data *prog_data,
bool anv_nir_add_base_work_group_id(nir_shader *shader);
uint32_t anv_nir_compute_used_push_descriptors(nir_shader *shader,
const struct anv_pipeline_layout *layout);
bool anv_nir_loads_push_desc_buffer(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_bind_map *bind_map);
uint32_t anv_nir_push_desc_ubo_fully_promoted(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_bind_map *bind_map);
#ifdef __cplusplus
}
#endif

View File

@@ -1351,6 +1351,7 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
map->surface_to_descriptor[map->surface_count] =
(struct anv_pipeline_binding) {
.set = ANV_DESCRIPTOR_SET_DESCRIPTORS,
.binding = UINT32_MAX,
.index = s,
};
state.set[s].desc_offset = map->surface_count;
@@ -1440,6 +1441,7 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
map->surface_to_descriptor[map->surface_count++] =
(struct anv_pipeline_binding) {
.set = set,
.binding = b,
.index = binding->descriptor_index + i,
.plane = p,
};
@@ -1450,6 +1452,7 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
map->surface_to_descriptor[map->surface_count++] =
(struct anv_pipeline_binding) {
.set = set,
.binding = b,
.index = binding->descriptor_index + i,
.dynamic_offset_index =
layout->set[set].dynamic_offset_start +
@@ -1483,6 +1486,7 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
map->sampler_to_descriptor[map->sampler_count++] =
(struct anv_pipeline_binding) {
.set = set,
.binding = b,
.index = binding->descriptor_index + i,
.plane = p,
};

View File

@@ -0,0 +1,242 @@
/*
* Copyright © 2022 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "anv_nir.h"
const struct anv_descriptor_set_layout *
anv_pipeline_layout_get_push_set(const struct anv_pipeline_layout *layout,
uint8_t *set_idx)
{
for (unsigned s = 0; s < ARRAY_SIZE(layout->set); s++) {
struct anv_descriptor_set_layout *set_layout = layout->set[s].layout;
if (!set_layout ||
!(set_layout->flags &
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))
continue;
if (set_idx)
*set_idx = s;
return set_layout;
}
return NULL;
}
/* This function returns a bitfield of used descriptors in the push descriptor
* set. You can only call this function before calling
* anv_nir_apply_pipeline_layout() as information required is lost after
* applying the pipeline layout.
*/
uint32_t
anv_nir_compute_used_push_descriptors(nir_shader *shader,
const struct anv_pipeline_layout *layout)
{
uint8_t push_set;
const struct anv_descriptor_set_layout *push_set_layout =
anv_pipeline_layout_get_push_set(layout, &push_set);
if (push_set_layout == NULL)
return 0;
uint32_t used_push_bindings = 0;
nir_foreach_variable_with_modes(var, shader,
nir_var_uniform |
nir_var_image |
nir_var_mem_ubo |
nir_var_mem_ssbo) {
if (var->data.descriptor_set == push_set) {
uint32_t desc_idx =
push_set_layout->binding[var->data.binding].descriptor_index;
assert(desc_idx < MAX_PUSH_DESCRIPTORS);
used_push_bindings |= BITFIELD_BIT(desc_idx);
}
}
nir_foreach_function(function, shader) {
if (!function->impl)
continue;
nir_foreach_block(block, function->impl) {
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
continue;
uint8_t set = nir_intrinsic_desc_set(intrin);
if (set != push_set)
continue;
uint32_t binding = nir_intrinsic_binding(intrin);
uint32_t desc_idx =
push_set_layout->binding[binding].descriptor_index;
assert(desc_idx < MAX_PUSH_DESCRIPTORS);
used_push_bindings |= BITFIELD_BIT(desc_idx);
}
}
}
return used_push_bindings;
}
/* This function checks whether the shader accesses the push descriptor
* buffer. This function must be called after anv_nir_compute_push_layout().
*/
bool
anv_nir_loads_push_desc_buffer(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_bind_map *bind_map)
{
uint8_t push_set;
const struct anv_descriptor_set_layout *push_set_layout =
anv_pipeline_layout_get_push_set(layout, &push_set);
if (push_set_layout == NULL)
return false;
nir_foreach_function(function, nir) {
if (!function->impl)
continue;
nir_foreach_block(block, function->impl) {
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_ubo)
continue;
const nir_const_value *const_bt_idx =
nir_src_as_const_value(intrin->src[0]);
if (const_bt_idx == NULL)
continue;
const unsigned bt_idx = const_bt_idx[0].u32;
const struct anv_pipeline_binding *binding =
&bind_map->surface_to_descriptor[bt_idx];
if (binding->set == ANV_DESCRIPTOR_SET_DESCRIPTORS &&
binding->index == push_set)
return true;
}
}
}
return false;
}
/* This function computes a bitfield of all the UBOs bindings in the push
* descriptor set that are fully promoted to push constants. If a binding's
* bit in the field is set, the corresponding binding table entry will not be
* accessed by the shader. This function must be called after
* anv_nir_compute_push_layout().
*/
uint32_t
anv_nir_push_desc_ubo_fully_promoted(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_bind_map *bind_map)
{
uint8_t push_set;
const struct anv_descriptor_set_layout *push_set_layout =
anv_pipeline_layout_get_push_set(layout, &push_set);
if (push_set_layout == NULL)
return false;
uint32_t ubos_fully_promoted = 0;
for (uint32_t b = 0; b < push_set_layout->binding_count; b++) {
const struct anv_descriptor_set_binding_layout *bind_layout =
&push_set_layout->binding[b];
if (bind_layout->type == -1)
continue;
assert(bind_layout->descriptor_index < MAX_PUSH_DESCRIPTORS);
if (bind_layout->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
ubos_fully_promoted |= BITFIELD_BIT(bind_layout->descriptor_index);
}
nir_foreach_function(function, nir) {
if (!function->impl)
continue;
nir_foreach_block(block, function->impl) {
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_ubo)
continue;
const nir_const_value *const_bt_idx =
nir_src_as_const_value(intrin->src[0]);
if (const_bt_idx == NULL)
continue;
const unsigned bt_idx = const_bt_idx[0].u32;
/* Skip if this isn't a load from push descriptor buffer. */
const struct anv_pipeline_binding *binding =
&bind_map->surface_to_descriptor[bt_idx];
if (binding->set != push_set)
continue;
const uint32_t desc_idx =
push_set_layout->binding[binding->binding].descriptor_index;
assert(desc_idx < MAX_PUSH_DESCRIPTORS);
bool promoted = false;
/* If the offset in the entry is dynamic, we can't tell if
* promoted or not.
*/
const nir_const_value *const_load_offset =
nir_src_as_const_value(intrin->src[1]);
if (const_load_offset != NULL) {
/* Check if the load was promoted to a push constant. */
const unsigned load_offset = const_load_offset[0].u32;
const int load_bytes = nir_intrinsic_dest_components(intrin) *
(nir_dest_bit_size(intrin->dest) / 8);
for (unsigned i = 0; i < ARRAY_SIZE(bind_map->push_ranges); i++) {
if (bind_map->push_ranges[i].start * 32 <= load_offset &&
(bind_map->push_ranges[i].start +
bind_map->push_ranges[i].length) * 32 >=
(load_offset + load_bytes)) {
promoted = true;
break;
}
}
}
if (!promoted)
ubos_fully_promoted &= ~BITFIELD_BIT(desc_idx);
}
}
}
return ubos_fully_promoted;
}

View File

@@ -662,10 +662,14 @@ struct anv_pipeline_stage {
nir_shader *nir;
struct anv_push_descriptor_info push_desc_info;
struct anv_pipeline_binding surface_to_descriptor[256];
struct anv_pipeline_binding sampler_to_descriptor[256];
struct anv_pipeline_bind_map bind_map;
bool uses_bt_for_push_descs;
union brw_any_prog_data prog_data;
uint32_t num_stats;
@@ -787,6 +791,38 @@ anv_pipeline_hash_ray_tracing_combined_shader(struct anv_ray_tracing_pipeline *p
_mesa_sha1_final(&ctx, sha1_out);
}
static void
anv_stage_nullify_unused_push_desc_surfaces(struct anv_pipeline_stage *stage,
struct anv_pipeline_layout *layout)
{
uint8_t push_set;
const struct anv_descriptor_set_layout *push_set_layout =
anv_pipeline_layout_get_push_set(layout, &push_set);
if (push_set_layout == NULL)
return;
const uint32_t to_keep_descriptors =
stage->push_desc_info.used_descriptors &
~stage->push_desc_info.fully_promoted_ubo_descriptors;
for (unsigned s = 0; s < stage->bind_map.surface_count; s++) {
if (stage->bind_map.surface_to_descriptor[s].set == ANV_DESCRIPTOR_SET_DESCRIPTORS &&
stage->bind_map.surface_to_descriptor[s].index == push_set &&
!stage->push_desc_info.used_set_buffer)
stage->bind_map.surface_to_descriptor[s].set = ANV_DESCRIPTOR_SET_NULL;
if (stage->bind_map.surface_to_descriptor[s].set == push_set) {
const uint32_t binding =
stage->bind_map.surface_to_descriptor[s].binding;
const uint32_t desc_index =
push_set_layout->binding[binding].descriptor_index;
if (!(BITFIELD_BIT(desc_index) & to_keep_descriptors))
stage->bind_map.surface_to_descriptor[s].set = ANV_DESCRIPTOR_SET_NULL;
}
}
}
static nir_shader *
anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
struct vk_pipeline_cache *cache,
@@ -887,6 +923,9 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
NIR_PASS(_, nir, brw_nir_lower_ray_queries, &pdevice->info);
stage->push_desc_info.used_descriptors =
anv_nir_compute_used_push_descriptors(nir, layout);
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
NIR_PASS_V(nir, anv_nir_apply_pipeline_layout,
pdevice, pipeline->device->robust_buffer_access,
@@ -962,6 +1001,12 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
gl_shader_stage_is_mesh(nir->info.stage))
NIR_PASS(_, nir, brw_nir_lower_cs_intrinsics);
stage->push_desc_info.used_set_buffer =
anv_nir_loads_push_desc_buffer(nir, layout, &stage->bind_map);
stage->push_desc_info.fully_promoted_ubo_descriptors =
anv_nir_push_desc_ubo_fully_promoted(nir, layout, &stage->bind_map);
anv_stage_nullify_unused_push_desc_surfaces(stage, layout);
stage->nir = nir;
}
@@ -1266,12 +1311,15 @@ anv_pipeline_link_fs(const struct brw_compiler *compiler,
rt_bindings[rt] = (struct anv_pipeline_binding) {
.set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
.index = rt,
.binding = UINT32_MAX,
};
} else {
/* Setup a null render target */
rt_bindings[rt] = (struct anv_pipeline_binding) {
.set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
.index = UINT32_MAX,
.binding = UINT32_MAX,
};
}
}
@@ -1453,6 +1501,14 @@ anv_pipeline_add_executables(struct anv_pipeline *pipeline,
}
pipeline->ray_queries = MAX2(pipeline->ray_queries, bin->prog_data->ray_queries);
if (bin->push_desc_info.used_set_buffer) {
pipeline->use_push_descriptor_buffer |=
BITFIELD_BIT(mesa_to_vk_shader_stage(stage->stage));
}
if (bin->push_desc_info.used_descriptors &
~bin->push_desc_info.fully_promoted_ubo_descriptors)
pipeline->use_push_descriptor |= mesa_to_vk_shader_stage(stage->stage);
}
static void
@@ -1921,7 +1977,8 @@ anv_graphics_pipeline_compile(struct anv_graphics_pipeline *pipeline,
brw_prog_data_size(s),
stage->stats, stage->num_stats,
stage->nir->xfb_info,
&stage->bind_map);
&stage->bind_map,
&stage->push_desc_info);
if (!bin) {
ralloc_free(stage_ctx);
result = vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -2034,6 +2091,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
stage.bind_map.surface_count = 1;
stage.bind_map.surface_to_descriptor[0] = (struct anv_pipeline_binding) {
.set = ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS,
.binding = UINT32_MAX,
};
stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
@@ -2081,7 +2139,8 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
&stage.prog_data.base,
sizeof(stage.prog_data.cs),
stage.stats, stage.num_stats,
NULL, &stage.bind_map);
NULL, &stage.bind_map,
&stage.push_desc_info);
if (!bin) {
ralloc_free(mem_ctx);
return vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -2444,7 +2503,8 @@ compile_upload_rt_shader(struct anv_ray_tracing_pipeline *pipeline,
&stage->prog_data.base,
sizeof(stage->prog_data.bs),
stage->stats, 1,
NULL, &empty_bind_map);
NULL, &empty_bind_map,
false /* push_descriptor_uses_bt */);
if (bin == NULL)
return vk_error(pipeline, VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -2872,6 +2932,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
trampoline_nir->info.subgroup_size = SUBGROUP_SIZE_REQUIRE_16;
struct anv_push_descriptor_info push_desc_info = {};
struct anv_pipeline_bind_map bind_map = {
.surface_count = 0,
.sampler_count = 0,
@@ -2900,7 +2961,8 @@ anv_device_init_rt_shaders(struct anv_device *device)
trampoline_prog_data.base.program_size,
&trampoline_prog_data.base,
sizeof(trampoline_prog_data),
NULL, 0, NULL, &bind_map);
NULL, 0, NULL, &bind_map,
&push_desc_info);
ralloc_free(tmp_ctx);
@@ -2932,6 +2994,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
NIR_PASS_V(trivial_return_nir, brw_nir_lower_rt_intrinsics, device->info);
struct anv_push_descriptor_info push_desc_info = {};
struct anv_pipeline_bind_map bind_map = {
.surface_count = 0,
.sampler_count = 0,
@@ -2953,7 +3016,8 @@ anv_device_init_rt_shaders(struct anv_device *device)
&return_key, sizeof(return_key),
return_data, return_prog_data.base.program_size,
&return_prog_data.base, sizeof(return_prog_data),
NULL, 0, NULL, &bind_map);
NULL, 0, NULL, &bind_map,
&push_desc_info);
ralloc_free(tmp_ctx);

View File

@@ -73,7 +73,8 @@ anv_shader_bin_create(struct anv_device *device,
uint32_t prog_data_size,
const struct brw_compile_stats *stats, uint32_t num_stats,
const nir_xfb_info *xfb_info_in,
const struct anv_pipeline_bind_map *bind_map)
const struct anv_pipeline_bind_map *bind_map,
const struct anv_push_descriptor_info *push_desc_info)
{
VK_MULTIALLOC(ma);
VK_MULTIALLOC_DECL(&ma, struct anv_shader_bin, shader, 1);
@@ -171,6 +172,8 @@ anv_shader_bin_create(struct anv_device *device,
shader->xfb_info = NULL;
}
typed_memcpy(&shader->push_desc_info, push_desc_info, 1);
shader->bind_map = *bind_map;
typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
bind_map->surface_count);
@@ -216,6 +219,10 @@ anv_shader_bin_serialize(struct vk_pipeline_cache_object *object,
blob_write_uint32(blob, 0);
}
blob_write_uint32(blob, shader->push_desc_info.used_descriptors);
blob_write_uint32(blob, shader->push_desc_info.fully_promoted_ubo_descriptors);
blob_write_uint8(blob, shader->push_desc_info.used_set_buffer);
blob_write_bytes(blob, shader->bind_map.surface_sha1,
sizeof(shader->bind_map.surface_sha1));
blob_write_bytes(blob, shader->bind_map.sampler_sha1,
@@ -278,6 +285,11 @@ anv_shader_bin_deserialize(struct vk_device *vk_device,
if (xfb_size)
xfb_info = blob_read_bytes(blob, xfb_size);
struct anv_push_descriptor_info push_desc_info = {};
push_desc_info.used_descriptors = blob_read_uint32(blob);
push_desc_info.fully_promoted_ubo_descriptors = blob_read_uint32(blob);
push_desc_info.used_set_buffer = blob_read_uint8(blob);
struct anv_pipeline_bind_map bind_map = {};
blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
@@ -308,7 +320,8 @@ anv_shader_bin_deserialize(struct vk_device *vk_device,
key_data, key_size,
kernel_data, kernel_size,
&prog_data.base, prog_data_size,
stats, num_stats, xfb_info, &bind_map);
stats, num_stats, xfb_info, &bind_map,
&push_desc_info);
if (shader == NULL)
return NULL;
@@ -350,7 +363,8 @@ anv_device_upload_kernel(struct anv_device *device,
const struct brw_compile_stats *stats,
uint32_t num_stats,
const nir_xfb_info *xfb_info,
const struct anv_pipeline_bind_map *bind_map)
const struct anv_pipeline_bind_map *bind_map,
const struct anv_push_descriptor_info *push_desc_info)
{
/* Use the default pipeline cache if none is specified */
if (cache == NULL)
@@ -362,7 +376,8 @@ anv_device_upload_kernel(struct anv_device *device,
kernel_data, kernel_size,
prog_data, prog_data_size,
stats, num_stats,
xfb_info, bind_map);
xfb_info, bind_map,
push_desc_info);
if (shader == NULL)
return NULL;

View File

@@ -1102,6 +1102,7 @@ struct anv_queue {
struct nir_xfb_info;
struct anv_pipeline_bind_map;
struct anv_push_descriptor_info;
extern const struct vk_pipeline_cache_object_ops *const anv_cache_import_ops[2];
@@ -1122,7 +1123,8 @@ anv_device_upload_kernel(struct anv_device *device,
const struct brw_compile_stats *stats,
uint32_t num_stats,
const struct nir_xfb_info *xfb_info,
const struct anv_pipeline_bind_map *bind_map);
const struct anv_pipeline_bind_map *bind_map,
const struct anv_push_descriptor_info *push_desc_info);
struct nir_shader;
struct nir_shader_compiler_options;
@@ -1950,6 +1952,11 @@ struct anv_pipeline_binding {
*/
uint32_t index;
/** Binding in the descriptor set. Not valid for any of the
* ANV_DESCRIPTOR_SET_*
*/
uint32_t binding;
/** The descriptor set this surface corresponds to.
*
* The special ANV_DESCRIPTOR_SET_* values above indicates that this
@@ -1967,11 +1974,6 @@ struct anv_pipeline_binding {
/** For a storage image, whether it requires a lowered surface */
uint8_t lowered_storage_surface;
/** Pad to 64 bits so that there are no holes and we can safely memcmp
* assuming POD zero-initialization.
*/
uint8_t pad;
};
struct anv_push_range {
@@ -2004,6 +2006,10 @@ struct anv_pipeline_layout {
unsigned char sha1[20];
};
const struct anv_descriptor_set_layout *
anv_pipeline_layout_get_push_set(const struct anv_pipeline_layout *layout,
uint8_t *desc_idx);
struct anv_buffer {
struct vk_buffer vk;
@@ -2849,6 +2855,17 @@ struct anv_pipeline_bind_map {
struct anv_push_range push_ranges[4];
};
struct anv_push_descriptor_info {
/* A bitfield of descriptors used. */
uint32_t used_descriptors;
/* A bitfield of UBOs bindings fully promoted to push constants. */
uint32_t fully_promoted_ubo_descriptors;
/* */
uint8_t used_set_buffer;
};
struct anv_shader_bin {
struct vk_pipeline_cache_object base;
@@ -2865,6 +2882,8 @@ struct anv_shader_bin {
struct nir_xfb_info *xfb_info;
struct anv_push_descriptor_info push_desc_info;
struct anv_pipeline_bind_map bind_map;
};
@@ -2877,7 +2896,8 @@ anv_shader_bin_create(struct anv_device *device,
uint32_t prog_data_size,
const struct brw_compile_stats *stats, uint32_t num_stats,
const struct nir_xfb_info *xfb_info,
const struct anv_pipeline_bind_map *bind_map);
const struct anv_pipeline_bind_map *bind_map,
const struct anv_push_descriptor_info *push_desc_info);
static inline void
anv_shader_bin_ref(struct anv_shader_bin *shader)
@@ -2935,6 +2955,16 @@ struct anv_pipeline {
uint32_t ray_queries;
/**
* Mask of stages that are accessing push descriptors.
*/
VkShaderStageFlags use_push_descriptor;
/**
* Mask of stages that are accessing the push descriptors buffer.
*/
VkShaderStageFlags use_push_descriptor_buffer;
struct util_dynarray executables;
const struct intel_l3_config * l3_config;

View File

@@ -44,6 +44,7 @@ get_shader_bin(struct anv_device *device,
assert(kernel_data.prog_data.base.nr_params <= ARRAY_SIZE(dummy_param));
kernel_data.prog_data.base.param = dummy_param;
struct anv_push_descriptor_info push_desc_info = {};
struct anv_pipeline_bind_map bind_map = {
.kernel_args_size = kernel_data.args_size,
.kernel_arg_count = kernel_data.arg_count,
@@ -57,7 +58,8 @@ get_shader_bin(struct anv_device *device,
kernel_data.prog_data.base.program_size,
&kernel_data.prog_data.base,
sizeof(kernel_data.prog_data),
NULL, 0, NULL, &bind_map);
NULL, 0, NULL, &bind_map,
&push_desc_info);
/* The cache already has a reference and it's not going anywhere so there
* is no need to hold a second reference.

View File

@@ -147,6 +147,7 @@ libanv_files = files(
'anv_nir_lower_multiview.c',
'anv_nir_lower_ubo_loads.c',
'anv_nir_lower_ycbcr_textures.c',
'anv_nir_push_descriptor_analysis.c',
'anv_perf.c',
'anv_pipeline.c',
'anv_pipeline_cache.c',