anv: drop support for VK_NV_mesh_shader
Reviewed-by: Ivan Briano <ivan.briano@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24071>
This commit is contained in:

committed by
Marge Bot

parent
ed72d6e2a7
commit
87dd96bbbe
@@ -39,12 +39,6 @@ specific to ANV:
|
|||||||
Experimental features
|
Experimental features
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
:envvar:`ANV_EXPERIMENTAL_NV_MESH_SHADER`
|
|
||||||
If defined to ``1`` or ``true``, this advertise support for
|
|
||||||
:ext:`VK_NV_mesh_shader` extension for platforms that have hardware
|
|
||||||
support for it.
|
|
||||||
|
|
||||||
|
|
||||||
.. _`Bindless model`:
|
.. _`Bindless model`:
|
||||||
|
|
||||||
Binding Model
|
Binding Model
|
||||||
|
@@ -91,8 +91,7 @@ void
|
|||||||
anv_device_init_blorp(struct anv_device *device)
|
anv_device_init_blorp(struct anv_device *device)
|
||||||
{
|
{
|
||||||
const struct blorp_config config = {
|
const struct blorp_config config = {
|
||||||
.use_mesh_shading = device->physical->vk.supported_extensions.NV_mesh_shader ||
|
.use_mesh_shading = device->physical->vk.supported_extensions.EXT_mesh_shader,
|
||||||
device->physical->vk.supported_extensions.EXT_mesh_shader,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
blorp_init(&device->blorp, device, &device->isl_dev, &config);
|
blorp_init(&device->blorp, device, &device->isl_dev, &config);
|
||||||
|
@@ -570,8 +570,7 @@ anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
|
|||||||
switch (bind_point) {
|
switch (bind_point) {
|
||||||
case VK_PIPELINE_BIND_POINT_GRAPHICS:
|
case VK_PIPELINE_BIND_POINT_GRAPHICS:
|
||||||
stages &= VK_SHADER_STAGE_ALL_GRAPHICS |
|
stages &= VK_SHADER_STAGE_ALL_GRAPHICS |
|
||||||
((cmd_buffer->device->vk.enabled_extensions.NV_mesh_shader ||
|
(cmd_buffer->device->vk.enabled_extensions.EXT_mesh_shader ?
|
||||||
cmd_buffer->device->vk.enabled_extensions.EXT_mesh_shader) ?
|
|
||||||
(VK_SHADER_STAGE_TASK_BIT_EXT |
|
(VK_SHADER_STAGE_TASK_BIT_EXT |
|
||||||
VK_SHADER_STAGE_MESH_BIT_EXT) : 0);
|
VK_SHADER_STAGE_MESH_BIT_EXT) : 0);
|
||||||
pipe_state = &cmd_buffer->state.gfx.base;
|
pipe_state = &cmd_buffer->state.gfx.base;
|
||||||
|
@@ -201,8 +201,6 @@ get_device_extensions(const struct anv_physical_device *device,
|
|||||||
*/
|
*/
|
||||||
const bool mesh_shader_enabled = device->info.has_mesh_shading &&
|
const bool mesh_shader_enabled = device->info.has_mesh_shading &&
|
||||||
debug_get_bool_option("ANV_MESH_SHADER", false);
|
debug_get_bool_option("ANV_MESH_SHADER", false);
|
||||||
const bool nv_mesh_shading_enabled =
|
|
||||||
debug_get_bool_option("ANV_EXPERIMENTAL_NV_MESH_SHADER", false);
|
|
||||||
|
|
||||||
*ext = (struct vk_device_extension_table) {
|
*ext = (struct vk_device_extension_table) {
|
||||||
.KHR_8bit_storage = true,
|
.KHR_8bit_storage = true,
|
||||||
@@ -384,8 +382,6 @@ get_device_extensions(const struct anv_physical_device *device,
|
|||||||
.INTEL_shader_integer_functions2 = true,
|
.INTEL_shader_integer_functions2 = true,
|
||||||
.EXT_multi_draw = true,
|
.EXT_multi_draw = true,
|
||||||
.NV_compute_shader_derivatives = true,
|
.NV_compute_shader_derivatives = true,
|
||||||
.NV_mesh_shader = mesh_shader_enabled &&
|
|
||||||
nv_mesh_shading_enabled,
|
|
||||||
.VALVE_mutable_descriptor_type = true,
|
.VALVE_mutable_descriptor_type = true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -404,8 +400,7 @@ get_features(const struct anv_physical_device *pdevice,
|
|||||||
const bool rt_enabled = ANV_SUPPORT_RT && pdevice->info.has_ray_tracing;
|
const bool rt_enabled = ANV_SUPPORT_RT && pdevice->info.has_ray_tracing;
|
||||||
|
|
||||||
const bool mesh_shader =
|
const bool mesh_shader =
|
||||||
pdevice->vk.supported_extensions.EXT_mesh_shader ||
|
pdevice->vk.supported_extensions.EXT_mesh_shader;
|
||||||
pdevice->vk.supported_extensions.NV_mesh_shader;
|
|
||||||
|
|
||||||
*features = (struct vk_features) {
|
*features = (struct vk_features) {
|
||||||
/* Vulkan 1.0 */
|
/* Vulkan 1.0 */
|
||||||
@@ -632,8 +627,8 @@ get_features(const struct anv_physical_device *pdevice,
|
|||||||
.stippledSmoothLines = false,
|
.stippledSmoothLines = false,
|
||||||
|
|
||||||
/* VK_NV_mesh_shader */
|
/* VK_NV_mesh_shader */
|
||||||
.taskShaderNV = mesh_shader,
|
.taskShaderNV = false,
|
||||||
.meshShaderNV = mesh_shader,
|
.meshShaderNV = false,
|
||||||
|
|
||||||
/* VK_EXT_mesh_shader */
|
/* VK_EXT_mesh_shader */
|
||||||
.taskShader = mesh_shader,
|
.taskShader = mesh_shader,
|
||||||
@@ -1812,8 +1807,7 @@ anv_get_physical_device_properties_1_1(struct anv_physical_device *pdevice,
|
|||||||
VK_SHADER_STAGE_INTERSECTION_BIT_KHR |
|
VK_SHADER_STAGE_INTERSECTION_BIT_KHR |
|
||||||
VK_SHADER_STAGE_CALLABLE_BIT_KHR;
|
VK_SHADER_STAGE_CALLABLE_BIT_KHR;
|
||||||
}
|
}
|
||||||
if (pdevice->vk.supported_extensions.NV_mesh_shader ||
|
if (pdevice->vk.supported_extensions.EXT_mesh_shader) {
|
||||||
pdevice->vk.supported_extensions.EXT_mesh_shader) {
|
|
||||||
scalar_stages |= VK_SHADER_STAGE_TASK_BIT_EXT |
|
scalar_stages |= VK_SHADER_STAGE_TASK_BIT_EXT |
|
||||||
VK_SHADER_STAGE_MESH_BIT_EXT;
|
VK_SHADER_STAGE_MESH_BIT_EXT;
|
||||||
}
|
}
|
||||||
@@ -2230,66 +2224,6 @@ void anv_GetPhysicalDeviceProperties2(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV: {
|
|
||||||
VkPhysicalDeviceMeshShaderPropertiesNV *props =
|
|
||||||
(VkPhysicalDeviceMeshShaderPropertiesNV *)ext;
|
|
||||||
|
|
||||||
/* Bounded by the maximum representable size in
|
|
||||||
* 3DSTATE_MESH_SHADER_BODY::SharedLocalMemorySize. Same for Task.
|
|
||||||
*/
|
|
||||||
const uint32_t max_slm_size = 64 * 1024;
|
|
||||||
|
|
||||||
/* Bounded by the maximum representable size in
|
|
||||||
* 3DSTATE_MESH_SHADER_BODY::LocalXMaximum. Same for Task.
|
|
||||||
*/
|
|
||||||
const uint32_t max_workgroup_size = 1 << 10;
|
|
||||||
|
|
||||||
/* Bounded by the maximum representable count in
|
|
||||||
* 3DSTATE_MESH_SHADER_BODY::MaximumPrimitiveCount.
|
|
||||||
*/
|
|
||||||
const uint32_t max_primitives = 1024;
|
|
||||||
|
|
||||||
/* TODO(mesh): Multiview. */
|
|
||||||
const uint32_t max_view_count = 1;
|
|
||||||
|
|
||||||
props->maxDrawMeshTasksCount = UINT32_MAX;
|
|
||||||
|
|
||||||
/* TODO(mesh): Implement workgroup Y and Z sizes larger than one by
|
|
||||||
* mapping them to/from the single value that HW provides us
|
|
||||||
* (currently used for X).
|
|
||||||
*/
|
|
||||||
|
|
||||||
props->maxTaskWorkGroupInvocations = max_workgroup_size;
|
|
||||||
props->maxTaskWorkGroupSize[0] = max_workgroup_size;
|
|
||||||
props->maxTaskWorkGroupSize[1] = 1;
|
|
||||||
props->maxTaskWorkGroupSize[2] = 1;
|
|
||||||
props->maxTaskTotalMemorySize = max_slm_size;
|
|
||||||
props->maxTaskOutputCount = UINT16_MAX;
|
|
||||||
|
|
||||||
props->maxMeshWorkGroupInvocations = max_workgroup_size;
|
|
||||||
props->maxMeshWorkGroupSize[0] = max_workgroup_size;
|
|
||||||
props->maxMeshWorkGroupSize[1] = 1;
|
|
||||||
props->maxMeshWorkGroupSize[2] = 1;
|
|
||||||
props->maxMeshTotalMemorySize = max_slm_size / max_view_count;
|
|
||||||
props->maxMeshOutputPrimitives = max_primitives / max_view_count;
|
|
||||||
props->maxMeshMultiviewViewCount = max_view_count;
|
|
||||||
|
|
||||||
/* Depends on what indices can be represented with IndexFormat. For
|
|
||||||
* now we always use U32, so bound to the maximum unique vertices we
|
|
||||||
* need for the maximum primitives.
|
|
||||||
*
|
|
||||||
* TODO(mesh): Revisit this if we drop "U32" IndexFormat when adding
|
|
||||||
* support for others.
|
|
||||||
*/
|
|
||||||
props->maxMeshOutputVertices = 3 * props->maxMeshOutputPrimitives;
|
|
||||||
|
|
||||||
|
|
||||||
props->meshOutputPerVertexGranularity = 32;
|
|
||||||
props->meshOutputPerPrimitiveGranularity = 32;
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT: {
|
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT: {
|
||||||
VkPhysicalDeviceMeshShaderPropertiesEXT *properties =
|
VkPhysicalDeviceMeshShaderPropertiesEXT *properties =
|
||||||
(VkPhysicalDeviceMeshShaderPropertiesEXT *)ext;
|
(VkPhysicalDeviceMeshShaderPropertiesEXT *)ext;
|
||||||
|
@@ -345,20 +345,10 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
|
|||||||
nir_ssa_def *src_vertex;
|
nir_ssa_def *src_vertex;
|
||||||
nir_ssa_def *prim_indices;
|
nir_ssa_def *prim_indices;
|
||||||
|
|
||||||
if (nir->info.mesh.nv) {
|
/* array of vectors, we have to extract index out of array deref */
|
||||||
/* flat array, but we can deref each index directly */
|
indexed_primitive_indices_deref = nir_build_deref_array(&b, primitive_indices_deref, primitive);
|
||||||
nir_ssa_def *index_index =
|
prim_indices = nir_load_deref(&b, indexed_primitive_indices_deref);
|
||||||
nir_imul(&b, primitive, nir_imm_int(&b, vertices_per_primitive));
|
src_vertex = nir_channel(&b, prim_indices, provoking_vertex);
|
||||||
index_index = nir_iadd(&b, index_index, nir_imm_int(&b, provoking_vertex));
|
|
||||||
indexed_primitive_indices_deref = nir_build_deref_array(&b, primitive_indices_deref, index_index);
|
|
||||||
src_vertex = nir_load_deref(&b, indexed_primitive_indices_deref);
|
|
||||||
prim_indices = NULL;
|
|
||||||
} else {
|
|
||||||
/* array of vectors, we have to extract index out of array deref */
|
|
||||||
indexed_primitive_indices_deref = nir_build_deref_array(&b, primitive_indices_deref, primitive);
|
|
||||||
prim_indices = nir_load_deref(&b, indexed_primitive_indices_deref);
|
|
||||||
src_vertex = nir_channel(&b, prim_indices, provoking_vertex);
|
|
||||||
}
|
|
||||||
|
|
||||||
nir_ssa_def *dst_vertex = nir_load_deref(&b, vertex_deref);
|
nir_ssa_def *dst_vertex = nir_load_deref(&b, vertex_deref);
|
||||||
|
|
||||||
@@ -378,17 +368,13 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
|
|||||||
nir_copy_deref(&b, dst, src);
|
nir_copy_deref(&b, dst, src);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nir->info.mesh.nv) {
|
/* replace one component of primitive indices vector */
|
||||||
nir_store_deref(&b, indexed_primitive_indices_deref, dst_vertex, 1);
|
nir_ssa_def *new_val =
|
||||||
} else {
|
nir_vector_insert_imm(&b, prim_indices, dst_vertex, provoking_vertex);
|
||||||
/* replace one component of primitive indices vector */
|
|
||||||
nir_ssa_def *new_val =
|
|
||||||
nir_vector_insert_imm(&b, prim_indices, dst_vertex, provoking_vertex);
|
|
||||||
|
|
||||||
/* and store complete vector */
|
/* and store complete vector */
|
||||||
nir_store_deref(&b, indexed_primitive_indices_deref, new_val,
|
nir_store_deref(&b, indexed_primitive_indices_deref, new_val,
|
||||||
BITFIELD_MASK(vertices_per_primitive));
|
BITFIELD_MASK(vertices_per_primitive));
|
||||||
}
|
|
||||||
|
|
||||||
nir_store_deref(&b, vertex_deref, nir_iadd_imm(&b, dst_vertex, 1), 1);
|
nir_store_deref(&b, vertex_deref, nir_iadd_imm(&b, dst_vertex, 1), 1);
|
||||||
|
|
||||||
|
@@ -176,7 +176,7 @@ anv_shader_stage_to_nir(struct anv_device *device,
|
|||||||
.int64_atomics = true,
|
.int64_atomics = true,
|
||||||
.integer_functions2 = true,
|
.integer_functions2 = true,
|
||||||
.mesh_shading = pdevice->vk.supported_extensions.EXT_mesh_shader,
|
.mesh_shading = pdevice->vk.supported_extensions.EXT_mesh_shader,
|
||||||
.mesh_shading_nv = pdevice->vk.supported_extensions.NV_mesh_shader,
|
.mesh_shading_nv = false,
|
||||||
.min_lod = true,
|
.min_lod = true,
|
||||||
.multiview = true,
|
.multiview = true,
|
||||||
.physical_storage_buffer_address = true,
|
.physical_storage_buffer_address = true,
|
||||||
@@ -925,14 +925,8 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((nir->info.stage == MESA_SHADER_MESH ||
|
if (nir->info.stage == MESA_SHADER_MESH ||
|
||||||
nir->info.stage == MESA_SHADER_TASK) && !nir->info.mesh.nv) {
|
nir->info.stage == MESA_SHADER_TASK) {
|
||||||
/* We can't/shouldn't lower id to index for NV_mesh_shader, because:
|
|
||||||
* 3DMESH_1D doesn't expose registers needed for
|
|
||||||
* nir_intrinsic_load_num_workgroups (generated by this pass)
|
|
||||||
* and we can't unify NV with EXT, because 3DMESH_3D doesn't support
|
|
||||||
* vkCmdDrawMeshTasksNV.firstTask.
|
|
||||||
*/
|
|
||||||
nir_lower_compute_system_values_options options = {
|
nir_lower_compute_system_values_options options = {
|
||||||
.lower_cs_local_id_to_index = true,
|
.lower_cs_local_id_to_index = true,
|
||||||
.lower_workgroup_id_to_index = true,
|
.lower_workgroup_id_to_index = true,
|
||||||
@@ -1966,8 +1960,7 @@ anv_pipeline_nir_preprocess(struct anv_pipeline *pipeline,
|
|||||||
};
|
};
|
||||||
brw_preprocess_nir(compiler, stage->nir, &opts);
|
brw_preprocess_nir(compiler, stage->nir, &opts);
|
||||||
|
|
||||||
if (stage->nir->info.stage == MESA_SHADER_MESH &&
|
if (stage->nir->info.stage == MESA_SHADER_MESH) {
|
||||||
!stage->nir->info.mesh.nv) {
|
|
||||||
NIR_PASS(_, stage->nir, anv_nir_lower_set_vtx_and_prim_count);
|
NIR_PASS(_, stage->nir, anv_nir_lower_set_vtx_and_prim_count);
|
||||||
NIR_PASS(_, stage->nir, nir_opt_dce);
|
NIR_PASS(_, stage->nir, nir_opt_dce);
|
||||||
NIR_PASS(_, stage->nir, nir_remove_dead_variables, nir_var_shader_out, NULL);
|
NIR_PASS(_, stage->nir, nir_remove_dead_variables, nir_var_shader_out, NULL);
|
||||||
@@ -3104,8 +3097,7 @@ anv_graphics_pipeline_create(struct anv_device *device,
|
|||||||
pipeline->base.base.active_stages & VK_SHADER_STAGE_MESH_BIT_EXT);
|
pipeline->base.base.active_stages & VK_SHADER_STAGE_MESH_BIT_EXT);
|
||||||
|
|
||||||
if (anv_pipeline_is_mesh(pipeline)) {
|
if (anv_pipeline_is_mesh(pipeline)) {
|
||||||
assert(device->physical->vk.supported_extensions.NV_mesh_shader ||
|
assert(device->physical->vk.supported_extensions.EXT_mesh_shader);
|
||||||
device->physical->vk.supported_extensions.EXT_mesh_shader);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* After we've imported all the libraries' layouts, import the pipeline
|
/* After we've imported all the libraries' layouts, import the pipeline
|
||||||
@@ -3141,8 +3133,7 @@ anv_graphics_pipeline_create(struct anv_device *device,
|
|||||||
pipeline->base.base.active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
|
pipeline->base.base.active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
|
||||||
|
|
||||||
if (anv_pipeline_is_mesh(pipeline))
|
if (anv_pipeline_is_mesh(pipeline))
|
||||||
assert(device->physical->vk.supported_extensions.NV_mesh_shader ||
|
assert(device->physical->vk.supported_extensions.EXT_mesh_shader);
|
||||||
device->physical->vk.supported_extensions.EXT_mesh_shader);
|
|
||||||
|
|
||||||
anv_graphics_pipeline_emit(pipeline, &state);
|
anv_graphics_pipeline_emit(pipeline, &state);
|
||||||
|
|
||||||
|
@@ -5311,42 +5311,6 @@ void genX(CmdEndTransformFeedbackEXT)(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if GFX_VERx10 >= 125
|
#if GFX_VERx10 >= 125
|
||||||
void
|
|
||||||
genX(CmdDrawMeshTasksNV)(
|
|
||||||
VkCommandBuffer commandBuffer,
|
|
||||||
uint32_t taskCount,
|
|
||||||
uint32_t firstTask)
|
|
||||||
{
|
|
||||||
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
||||||
|
|
||||||
if (anv_batch_has_error(&cmd_buffer->batch))
|
|
||||||
return;
|
|
||||||
|
|
||||||
anv_measure_snapshot(cmd_buffer,
|
|
||||||
INTEL_SNAPSHOT_DRAW,
|
|
||||||
"draw mesh", taskCount);
|
|
||||||
|
|
||||||
trace_intel_begin_draw_mesh(&cmd_buffer->trace);
|
|
||||||
|
|
||||||
/* TODO(mesh): Check if this is not emitting more packets than we need. */
|
|
||||||
genX(cmd_buffer_flush_gfx_state)(cmd_buffer);
|
|
||||||
|
|
||||||
if (cmd_buffer->state.conditional_render_enabled)
|
|
||||||
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
|
|
||||||
|
|
||||||
/* BSpec 54016 says: "The values passed for Starting ThreadGroup ID X
|
|
||||||
* and ThreadGroup Count X shall not cause TGIDs to exceed (2^32)-1."
|
|
||||||
*/
|
|
||||||
assert((int64_t)firstTask + taskCount - 1 <= UINT32_MAX);
|
|
||||||
|
|
||||||
anv_batch_emit(&cmd_buffer->batch, GENX(3DMESH_1D), m) {
|
|
||||||
m.PredicateEnable = cmd_buffer->state.conditional_render_enabled;
|
|
||||||
m.ThreadGroupCountX = taskCount;
|
|
||||||
m.StartingThreadGroupIDX = firstTask;
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_intel_end_draw_mesh(&cmd_buffer->trace, taskCount, 1, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
genX(CmdDrawMeshTasksEXT)(
|
genX(CmdDrawMeshTasksEXT)(
|
||||||
@@ -5383,43 +5347,8 @@ genX(CmdDrawMeshTasksEXT)(
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define GFX125_3DMESH_TG_COUNT 0x26F0
|
#define GFX125_3DMESH_TG_COUNT 0x26F0
|
||||||
#define GFX125_3DMESH_STARTING_TGID 0x26F4
|
|
||||||
#define GFX10_3DPRIM_XP(n) (0x2690 + (n) * 4) /* n = { 0, 1, 2 } */
|
#define GFX10_3DPRIM_XP(n) (0x2690 + (n) * 4) /* n = { 0, 1, 2 } */
|
||||||
|
|
||||||
static void
|
|
||||||
mesh_load_indirect_parameters_3dmesh_1d(struct anv_cmd_buffer *cmd_buffer,
|
|
||||||
struct mi_builder *b,
|
|
||||||
struct anv_address addr,
|
|
||||||
bool emit_xp0,
|
|
||||||
uint32_t xp0)
|
|
||||||
{
|
|
||||||
const size_t taskCountOff = offsetof(VkDrawMeshTasksIndirectCommandNV, taskCount);
|
|
||||||
const size_t firstTaskOff = offsetof(VkDrawMeshTasksIndirectCommandNV, firstTask);
|
|
||||||
|
|
||||||
mi_store(b, mi_reg32(GFX125_3DMESH_TG_COUNT),
|
|
||||||
mi_mem32(anv_address_add(addr, taskCountOff)));
|
|
||||||
|
|
||||||
mi_store(b, mi_reg32(GFX125_3DMESH_STARTING_TGID),
|
|
||||||
mi_mem32(anv_address_add(addr, firstTaskOff)));
|
|
||||||
|
|
||||||
if (emit_xp0)
|
|
||||||
mi_store(b, mi_reg32(GFX10_3DPRIM_XP(0)), mi_imm(xp0));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
emit_indirect_3dmesh_1d(struct anv_batch *batch,
|
|
||||||
bool predicate_enable,
|
|
||||||
bool uses_drawid)
|
|
||||||
{
|
|
||||||
uint32_t len = GENX(3DMESH_1D_length) + uses_drawid;
|
|
||||||
uint32_t *dw = anv_batch_emitn(batch, len, GENX(3DMESH_1D),
|
|
||||||
.PredicateEnable = predicate_enable,
|
|
||||||
.IndirectParameterEnable = true,
|
|
||||||
.ExtendedParameter0Present = uses_drawid);
|
|
||||||
if (uses_drawid)
|
|
||||||
dw[len - 1] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mesh_load_indirect_parameters_3dmesh_3d(struct anv_cmd_buffer *cmd_buffer,
|
mesh_load_indirect_parameters_3dmesh_3d(struct anv_cmd_buffer *cmd_buffer,
|
||||||
struct mi_builder *b,
|
struct mi_builder *b,
|
||||||
@@ -5458,54 +5387,6 @@ emit_indirect_3dmesh_3d(struct anv_batch *batch,
|
|||||||
dw[len - 1] = 0;
|
dw[len - 1] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
genX(CmdDrawMeshTasksIndirectNV)(
|
|
||||||
VkCommandBuffer commandBuffer,
|
|
||||||
VkBuffer _buffer,
|
|
||||||
VkDeviceSize offset,
|
|
||||||
uint32_t drawCount,
|
|
||||||
uint32_t stride)
|
|
||||||
{
|
|
||||||
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
||||||
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
|
||||||
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
|
|
||||||
const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline);
|
|
||||||
const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
|
|
||||||
struct anv_cmd_state *cmd_state = &cmd_buffer->state;
|
|
||||||
|
|
||||||
if (anv_batch_has_error(&cmd_buffer->batch))
|
|
||||||
return;
|
|
||||||
|
|
||||||
anv_measure_snapshot(cmd_buffer,
|
|
||||||
INTEL_SNAPSHOT_DRAW,
|
|
||||||
"draw mesh indirect", drawCount);
|
|
||||||
|
|
||||||
trace_intel_begin_draw_mesh_indirect(&cmd_buffer->trace);
|
|
||||||
|
|
||||||
genX(cmd_buffer_flush_gfx_state)(cmd_buffer);
|
|
||||||
|
|
||||||
if (cmd_state->conditional_render_enabled)
|
|
||||||
genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
|
|
||||||
|
|
||||||
bool uses_drawid = (task_prog_data && task_prog_data->uses_drawid) ||
|
|
||||||
mesh_prog_data->uses_drawid;
|
|
||||||
struct mi_builder b;
|
|
||||||
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < drawCount; i++) {
|
|
||||||
struct anv_address draw = anv_address_add(buffer->address, offset);
|
|
||||||
|
|
||||||
mesh_load_indirect_parameters_3dmesh_1d(cmd_buffer, &b, draw, uses_drawid, i);
|
|
||||||
|
|
||||||
emit_indirect_3dmesh_1d(&cmd_buffer->batch,
|
|
||||||
cmd_state->conditional_render_enabled, uses_drawid);
|
|
||||||
|
|
||||||
offset += stride;
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_intel_end_draw_mesh_indirect(&cmd_buffer->trace, drawCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
genX(CmdDrawMeshTasksIndirectEXT)(
|
genX(CmdDrawMeshTasksIndirectEXT)(
|
||||||
VkCommandBuffer commandBuffer,
|
VkCommandBuffer commandBuffer,
|
||||||
@@ -5554,60 +5435,6 @@ genX(CmdDrawMeshTasksIndirectEXT)(
|
|||||||
trace_intel_end_draw_mesh_indirect(&cmd_buffer->trace, drawCount);
|
trace_intel_end_draw_mesh_indirect(&cmd_buffer->trace, drawCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
genX(CmdDrawMeshTasksIndirectCountNV)(
|
|
||||||
VkCommandBuffer commandBuffer,
|
|
||||||
VkBuffer _buffer,
|
|
||||||
VkDeviceSize offset,
|
|
||||||
VkBuffer _countBuffer,
|
|
||||||
VkDeviceSize countBufferOffset,
|
|
||||||
uint32_t maxDrawCount,
|
|
||||||
uint32_t stride)
|
|
||||||
{
|
|
||||||
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
|
|
||||||
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
|
|
||||||
ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer);
|
|
||||||
struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
|
|
||||||
const struct brw_task_prog_data *task_prog_data = get_task_prog_data(pipeline);
|
|
||||||
const struct brw_mesh_prog_data *mesh_prog_data = get_mesh_prog_data(pipeline);
|
|
||||||
|
|
||||||
if (anv_batch_has_error(&cmd_buffer->batch))
|
|
||||||
return;
|
|
||||||
|
|
||||||
anv_measure_snapshot(cmd_buffer,
|
|
||||||
INTEL_SNAPSHOT_DRAW,
|
|
||||||
"draw mesh indirect count", 0);
|
|
||||||
|
|
||||||
trace_intel_begin_draw_mesh_indirect_count(&cmd_buffer->trace);
|
|
||||||
|
|
||||||
genX(cmd_buffer_flush_gfx_state)(cmd_buffer);
|
|
||||||
|
|
||||||
bool uses_drawid = (task_prog_data && task_prog_data->uses_drawid) ||
|
|
||||||
mesh_prog_data->uses_drawid;
|
|
||||||
|
|
||||||
struct mi_builder b;
|
|
||||||
mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
|
|
||||||
|
|
||||||
struct mi_value max =
|
|
||||||
prepare_for_draw_count_predicate(
|
|
||||||
cmd_buffer, &b,
|
|
||||||
anv_address_add(count_buffer->address, countBufferOffset));
|
|
||||||
|
|
||||||
for (uint32_t i = 0; i < maxDrawCount; i++) {
|
|
||||||
struct anv_address draw = anv_address_add(buffer->address, offset);
|
|
||||||
|
|
||||||
emit_draw_count_predicate_cond(cmd_buffer, &b, i, max);
|
|
||||||
|
|
||||||
mesh_load_indirect_parameters_3dmesh_1d(cmd_buffer, &b, draw, uses_drawid, i);
|
|
||||||
|
|
||||||
emit_indirect_3dmesh_1d(&cmd_buffer->batch, true, uses_drawid);
|
|
||||||
|
|
||||||
offset += stride;
|
|
||||||
}
|
|
||||||
|
|
||||||
trace_intel_end_draw_mesh_indirect_count(&cmd_buffer->trace, maxDrawCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
genX(CmdDrawMeshTasksIndirectCountEXT)(
|
genX(CmdDrawMeshTasksIndirectCountEXT)(
|
||||||
VkCommandBuffer commandBuffer,
|
VkCommandBuffer commandBuffer,
|
||||||
|
@@ -76,8 +76,7 @@ emit_common_so_memcpy(struct anv_batch *batch, struct anv_device *device,
|
|||||||
/* Disable Mesh, we can't have this and streamout enabled at the same
|
/* Disable Mesh, we can't have this and streamout enabled at the same
|
||||||
* time.
|
* time.
|
||||||
*/
|
*/
|
||||||
if (device->vk.enabled_extensions.NV_mesh_shader ||
|
if (device->vk.enabled_extensions.EXT_mesh_shader) {
|
||||||
device->vk.enabled_extensions.EXT_mesh_shader) {
|
|
||||||
anv_batch_emit(batch, GENX(3DSTATE_MESH_CONTROL), mesh);
|
anv_batch_emit(batch, GENX(3DSTATE_MESH_CONTROL), mesh);
|
||||||
anv_batch_emit(batch, GENX(3DSTATE_TASK_CONTROL), task);
|
anv_batch_emit(batch, GENX(3DSTATE_TASK_CONTROL), task);
|
||||||
}
|
}
|
||||||
|
@@ -333,8 +333,7 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#if GFX_VERx10 >= 125
|
#if GFX_VERx10 >= 125
|
||||||
if (device->physical->vk.supported_extensions.NV_mesh_shader ||
|
if (device->physical->vk.supported_extensions.EXT_mesh_shader) {
|
||||||
device->physical->vk.supported_extensions.EXT_mesh_shader) {
|
|
||||||
anv_batch_emit(batch, GENX(3DSTATE_URB_ALLOC_MESH), zero);
|
anv_batch_emit(batch, GENX(3DSTATE_URB_ALLOC_MESH), zero);
|
||||||
anv_batch_emit(batch, GENX(3DSTATE_URB_ALLOC_TASK), zero);
|
anv_batch_emit(batch, GENX(3DSTATE_URB_ALLOC_TASK), zero);
|
||||||
}
|
}
|
||||||
@@ -1870,8 +1869,7 @@ genX(graphics_pipeline_emit)(struct anv_graphics_pipeline *pipeline,
|
|||||||
#if GFX_VERx10 >= 125
|
#if GFX_VERx10 >= 125
|
||||||
const struct anv_device *device = pipeline->base.base.device;
|
const struct anv_device *device = pipeline->base.base.device;
|
||||||
/* Disable Mesh. */
|
/* Disable Mesh. */
|
||||||
if (device->vk.enabled_extensions.NV_mesh_shader ||
|
if (device->vk.enabled_extensions.EXT_mesh_shader) {
|
||||||
device->vk.enabled_extensions.EXT_mesh_shader) {
|
|
||||||
struct anv_batch *batch = &pipeline->base.base.batch;
|
struct anv_batch *batch = &pipeline->base.base.batch;
|
||||||
|
|
||||||
anv_batch_emit(batch, GENX(3DSTATE_MESH_CONTROL), zero);
|
anv_batch_emit(batch, GENX(3DSTATE_MESH_CONTROL), zero);
|
||||||
|
@@ -139,8 +139,7 @@ genX(emit_simpler_shader_init_fragment)(struct anv_simple_shader *state)
|
|||||||
anv_batch_emit(batch, GENX(3DSTATE_DS), DS);
|
anv_batch_emit(batch, GENX(3DSTATE_DS), DS);
|
||||||
|
|
||||||
#if GFX_VERx10 >= 125
|
#if GFX_VERx10 >= 125
|
||||||
if (device->vk.enabled_extensions.NV_mesh_shader ||
|
if (device->vk.enabled_extensions.EXT_mesh_shader) {
|
||||||
device->vk.enabled_extensions.EXT_mesh_shader) {
|
|
||||||
anv_batch_emit(batch, GENX(3DSTATE_MESH_CONTROL), mesh);
|
anv_batch_emit(batch, GENX(3DSTATE_MESH_CONTROL), mesh);
|
||||||
anv_batch_emit(batch, GENX(3DSTATE_TASK_CONTROL), task);
|
anv_batch_emit(batch, GENX(3DSTATE_TASK_CONTROL), task);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user