nir: Convert to nir_foreach_function_impl
Done by hand at each call site but going very quickly with funny Vim motions and common regexes. This is a very common idiom in NIR. Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23807>
This commit is contained in:

committed by
Marge Bot

parent
19daa9283c
commit
190b1fdc64
@@ -369,14 +369,12 @@ nir_find_inlinable_uniforms(nir_shader *shader)
|
||||
uint32_t uni_offsets[MAX_INLINABLE_UNIFORMS];
|
||||
uint8_t num_offsets = 0;
|
||||
|
||||
nir_foreach_function(function, shader) {
|
||||
if (function->impl) {
|
||||
nir_metadata_require(function->impl, nir_metadata_loop_analysis,
|
||||
nir_var_all, false);
|
||||
nir_foreach_function_impl(impl, shader) {
|
||||
nir_metadata_require(impl, nir_metadata_loop_analysis,
|
||||
nir_var_all, false);
|
||||
|
||||
foreach_list_typed(nir_cf_node, node, node, &function->impl->body)
|
||||
process_node(node, NULL, uni_offsets, &num_offsets);
|
||||
}
|
||||
foreach_list_typed(nir_cf_node, node, node, &impl->body)
|
||||
process_node(node, NULL, uni_offsets, &num_offsets);
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_offsets; i++)
|
||||
@@ -392,86 +390,84 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
|
||||
if (!num_uniforms)
|
||||
return;
|
||||
|
||||
nir_foreach_function(function, shader) {
|
||||
if (function->impl) {
|
||||
nir_builder b = nir_builder_create(function->impl);
|
||||
nir_foreach_block(block, function->impl) {
|
||||
nir_foreach_instr_safe(instr, block) {
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
continue;
|
||||
nir_foreach_function_impl(impl, shader) {
|
||||
nir_builder b = nir_builder_create(impl);
|
||||
nir_foreach_block(block, impl) {
|
||||
nir_foreach_instr_safe(instr, block) {
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
continue;
|
||||
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
|
||||
/* Only replace UBO 0 with constant offsets. */
|
||||
if (intr->intrinsic == nir_intrinsic_load_ubo &&
|
||||
nir_src_is_const(intr->src[0]) &&
|
||||
nir_src_as_uint(intr->src[0]) == 0 &&
|
||||
nir_src_is_const(intr->src[1]) &&
|
||||
/* TODO: Can't handle other bit sizes for now. */
|
||||
intr->dest.ssa.bit_size == 32) {
|
||||
int num_components = intr->dest.ssa.num_components;
|
||||
uint32_t offset = nir_src_as_uint(intr->src[1]) / 4;
|
||||
/* Only replace UBO 0 with constant offsets. */
|
||||
if (intr->intrinsic == nir_intrinsic_load_ubo &&
|
||||
nir_src_is_const(intr->src[0]) &&
|
||||
nir_src_as_uint(intr->src[0]) == 0 &&
|
||||
nir_src_is_const(intr->src[1]) &&
|
||||
/* TODO: Can't handle other bit sizes for now. */
|
||||
intr->dest.ssa.bit_size == 32) {
|
||||
int num_components = intr->dest.ssa.num_components;
|
||||
uint32_t offset = nir_src_as_uint(intr->src[1]) / 4;
|
||||
|
||||
if (num_components == 1) {
|
||||
/* Just replace the uniform load to constant load. */
|
||||
for (unsigned i = 0; i < num_uniforms; i++) {
|
||||
if (offset == uniform_dw_offsets[i]) {
|
||||
b.cursor = nir_before_instr(&intr->instr);
|
||||
nir_ssa_def *def = nir_imm_int(&b, uniform_values[i]);
|
||||
nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
|
||||
nir_instr_remove(&intr->instr);
|
||||
break;
|
||||
}
|
||||
if (num_components == 1) {
|
||||
/* Just replace the uniform load to constant load. */
|
||||
for (unsigned i = 0; i < num_uniforms; i++) {
|
||||
if (offset == uniform_dw_offsets[i]) {
|
||||
b.cursor = nir_before_instr(&intr->instr);
|
||||
nir_ssa_def *def = nir_imm_int(&b, uniform_values[i]);
|
||||
nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
|
||||
nir_instr_remove(&intr->instr);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* Lower vector uniform load to scalar and replace each
|
||||
* found component load with constant load.
|
||||
*/
|
||||
uint32_t max_offset = offset + num_components;
|
||||
nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS] = {0};
|
||||
bool found = false;
|
||||
|
||||
b.cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
/* Find component to replace. */
|
||||
for (unsigned i = 0; i < num_uniforms; i++) {
|
||||
uint32_t uni_offset = uniform_dw_offsets[i];
|
||||
if (uni_offset >= offset && uni_offset < max_offset) {
|
||||
int index = uni_offset - offset;
|
||||
components[index] = nir_imm_int(&b, uniform_values[i]);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
continue;
|
||||
|
||||
/* Create per-component uniform load. */
|
||||
for (unsigned i = 0; i < num_components; i++) {
|
||||
if (!components[i]) {
|
||||
uint32_t scalar_offset = (offset + i) * 4;
|
||||
components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size,
|
||||
intr->src[0].ssa,
|
||||
nir_imm_int(&b, scalar_offset));
|
||||
nir_intrinsic_instr *load =
|
||||
nir_instr_as_intrinsic(components[i]->parent_instr);
|
||||
nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX, scalar_offset);
|
||||
nir_intrinsic_set_range_base(load, scalar_offset);
|
||||
nir_intrinsic_set_range(load, 4);
|
||||
}
|
||||
}
|
||||
|
||||
/* Replace the original uniform load. */
|
||||
nir_ssa_def_rewrite_uses(&intr->dest.ssa,
|
||||
nir_vec(&b, components, num_components));
|
||||
nir_instr_remove(&intr->instr);
|
||||
}
|
||||
} else {
|
||||
/* Lower vector uniform load to scalar and replace each
|
||||
* found component load with constant load.
|
||||
*/
|
||||
uint32_t max_offset = offset + num_components;
|
||||
nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS] = {0};
|
||||
bool found = false;
|
||||
|
||||
b.cursor = nir_before_instr(&intr->instr);
|
||||
|
||||
/* Find component to replace. */
|
||||
for (unsigned i = 0; i < num_uniforms; i++) {
|
||||
uint32_t uni_offset = uniform_dw_offsets[i];
|
||||
if (uni_offset >= offset && uni_offset < max_offset) {
|
||||
int index = uni_offset - offset;
|
||||
components[index] = nir_imm_int(&b, uniform_values[i]);
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
continue;
|
||||
|
||||
/* Create per-component uniform load. */
|
||||
for (unsigned i = 0; i < num_components; i++) {
|
||||
if (!components[i]) {
|
||||
uint32_t scalar_offset = (offset + i) * 4;
|
||||
components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size,
|
||||
intr->src[0].ssa,
|
||||
nir_imm_int(&b, scalar_offset));
|
||||
nir_intrinsic_instr *load =
|
||||
nir_instr_as_intrinsic(components[i]->parent_instr);
|
||||
nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX, scalar_offset);
|
||||
nir_intrinsic_set_range_base(load, scalar_offset);
|
||||
nir_intrinsic_set_range(load, 4);
|
||||
}
|
||||
}
|
||||
|
||||
/* Replace the original uniform load. */
|
||||
nir_ssa_def_rewrite_uses(&intr->dest.ssa,
|
||||
nir_vec(&b, components, num_components));
|
||||
nir_instr_remove(&intr->instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nir_metadata_preserve(function->impl, nir_metadata_block_index |
|
||||
nir_metadata_dominance);
|
||||
nir_metadata_preserve(impl, nir_metadata_block_index |
|
||||
nir_metadata_dominance);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user