radv: Move radv_nir_lower_fs_intrinsics to new file.

Also ran clang-format on the affected code.

Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21971>
This commit is contained in:
Timur Kristóf
2023-03-29 18:46:11 +02:00
committed by Marge Bot
parent 838defc5eb
commit 87e7dfb5f8
6 changed files with 164 additions and 136 deletions

View File

@@ -73,6 +73,7 @@ libradv_files = files(
'nir/radv_nir.h',
'nir/radv_nir_apply_pipeline_layout.c',
'nir/radv_nir_lower_abi.c',
'nir/radv_nir_lower_fs_intrinsics.c',
'nir/radv_nir_lower_primitive_shading_rate.c',
'nir/radv_nir_lower_ray_queries.c',
'nir/radv_nir_lower_vs_inputs.c',

View File

@@ -59,6 +59,9 @@ bool radv_nir_lower_vs_inputs(nir_shader *shader, const struct radv_pipeline_sta
bool radv_nir_lower_primitive_shading_rate(nir_shader *nir, enum amd_gfx_level gfx_level);
bool radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_pipeline_stage *fs_stage,
const struct radv_pipeline_key *key);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,159 @@
/*
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
* Copyright © 2023 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "nir.h"
#include "nir_builder.h"
#include "radv_nir.h"
#include "radv_private.h"
bool
radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_pipeline_stage *fs_stage,
const struct radv_pipeline_key *key)
{
const struct radv_shader_info *info = &fs_stage->info;
const struct radv_shader_args *args = &fs_stage->args;
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
bool progress = false;
nir_builder b;
nir_builder_init(&b, impl);
nir_foreach_block (block, impl) {
nir_foreach_instr_safe (instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
b.cursor = nir_after_instr(&intrin->instr);
switch (intrin->intrinsic) {
case nir_intrinsic_load_sample_mask_in: {
nir_ssa_def *sample_coverage =
nir_load_vector_arg_amd(&b, 1, .base = args->ac.sample_coverage.arg_index);
nir_ssa_def *def = NULL;
if (info->ps.uses_sample_shading || key->ps.sample_shading_enable) {
/* gl_SampleMaskIn[0] = (SampleCoverage & (1 << gl_SampleID)). */
nir_ssa_def *sample_id = nir_load_sample_id(&b);
def = nir_iand(&b, sample_coverage, nir_ishl(&b, nir_imm_int(&b, 1u), sample_id));
} else {
def = sample_coverage;
}
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, def);
nir_instr_remove(instr);
progress = true;
break;
}
case nir_intrinsic_load_frag_coord: {
if (!key->adjust_frag_coord_z)
continue;
if (!(nir_ssa_def_components_read(&intrin->dest.ssa) & (1 << 2)))
continue;
nir_ssa_def *frag_z = nir_channel(&b, &intrin->dest.ssa, 2);
/* adjusted_frag_z = fddx_fine(frag_z) * 0.0625 + frag_z */
nir_ssa_def *adjusted_frag_z = nir_fddx_fine(&b, frag_z);
adjusted_frag_z = nir_ffma_imm1(&b, adjusted_frag_z, 0.0625f, frag_z);
/* VRS Rate X = Ancillary[2:3] */
nir_ssa_def *ancillary =
nir_load_vector_arg_amd(&b, 1, .base = args->ac.ancillary.arg_index);
nir_ssa_def *x_rate = nir_ubfe_imm(&b, ancillary, 2, 2);
/* xRate = xRate == 0x1 ? adjusted_frag_z : frag_z. */
nir_ssa_def *cond = nir_ieq_imm(&b, x_rate, 1);
frag_z = nir_bcsel(&b, cond, adjusted_frag_z, frag_z);
nir_ssa_def *new_dest = nir_vector_insert_imm(&b, &intrin->dest.ssa, frag_z, 2);
nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa, new_dest, new_dest->parent_instr);
progress = true;
break;
}
case nir_intrinsic_load_barycentric_at_sample: {
nir_ssa_def *num_samples = nir_load_rasterization_samples_amd(&b);
nir_ssa_def *new_dest;
if (key->dynamic_rasterization_samples) {
nir_ssa_def *res1, *res2;
nir_push_if(&b, nir_ieq_imm(&b, num_samples, 1));
{
res1 = nir_load_barycentric_pixel(
&b, 32, .interp_mode = nir_intrinsic_interp_mode(intrin));
}
nir_push_else(&b, NULL);
{
nir_ssa_def *sample_pos =
nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
/* sample_pos -= 0.5 */
sample_pos = nir_fsub(&b, sample_pos, nir_imm_float(&b, 0.5f));
res2 = nir_load_barycentric_at_offset(
&b, 32, sample_pos, .interp_mode = nir_intrinsic_interp_mode(intrin));
}
nir_pop_if(&b, NULL);
new_dest = nir_if_phi(&b, res1, res2);
} else {
if (!key->ps.num_samples) {
new_dest = nir_load_barycentric_pixel(
&b, 32, .interp_mode = nir_intrinsic_interp_mode(intrin));
} else {
nir_ssa_def *sample_pos =
nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
/* sample_pos -= 0.5 */
sample_pos = nir_fsub(&b, sample_pos, nir_imm_float(&b, 0.5f));
new_dest = nir_load_barycentric_at_offset(
&b, 32, sample_pos, .interp_mode = nir_intrinsic_interp_mode(intrin));
}
}
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_dest);
nir_instr_remove(instr);
progress = true;
break;
}
default:
break;
}
}
}
if (progress)
nir_metadata_preserve(impl, 0);
else
nir_metadata_preserve(impl, nir_metadata_all);
return progress;
}

View File

@@ -3013,7 +3013,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_pipeline_layo
if (!pipeline_key->optimisations_disabled) {
NIR_PASS(_, stage->nir, nir_opt_cse);
}
NIR_PASS(_, stage->nir, radv_lower_fs_intrinsics, stage, pipeline_key);
NIR_PASS(_, stage->nir, radv_nir_lower_fs_intrinsics, stage, pipeline_key);
}
enum nir_lower_non_uniform_access_type lower_non_uniform_access_types =

View File

@@ -377,138 +377,6 @@ lower_intrinsics(nir_shader *nir, const struct radv_pipeline_key *key)
return progress;
}
bool
radv_lower_fs_intrinsics(nir_shader *nir, const struct radv_pipeline_stage *fs_stage,
const struct radv_pipeline_key *key)
{
const struct radv_shader_info *info = &fs_stage->info;
const struct radv_shader_args *args = &fs_stage->args;
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
bool progress = false;
nir_builder b;
nir_builder_init(&b, impl);
nir_foreach_block(block, impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
b.cursor = nir_after_instr(&intrin->instr);
switch (intrin->intrinsic) {
case nir_intrinsic_load_sample_mask_in: {
nir_ssa_def *sample_coverage =
nir_load_vector_arg_amd(&b, 1, .base = args->ac.sample_coverage.arg_index);
nir_ssa_def *def = NULL;
if (info->ps.uses_sample_shading || key->ps.sample_shading_enable) {
/* gl_SampleMaskIn[0] = (SampleCoverage & (1 << gl_SampleID)). */
nir_ssa_def *sample_id = nir_load_sample_id(&b);
def = nir_iand(&b, sample_coverage, nir_ishl(&b, nir_imm_int(&b, 1u), sample_id));
} else {
def = sample_coverage;
}
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, def);
nir_instr_remove(instr);
progress = true;
break;
}
case nir_intrinsic_load_frag_coord: {
if (!key->adjust_frag_coord_z)
continue;
if (!(nir_ssa_def_components_read(&intrin->dest.ssa) & (1 << 2)))
continue;
nir_ssa_def *frag_z = nir_channel(&b, &intrin->dest.ssa, 2);
/* adjusted_frag_z = fddx_fine(frag_z) * 0.0625 + frag_z */
nir_ssa_def *adjusted_frag_z = nir_fddx_fine(&b, frag_z);
adjusted_frag_z = nir_ffma_imm1(&b, adjusted_frag_z, 0.0625f, frag_z);
/* VRS Rate X = Ancillary[2:3] */
nir_ssa_def *ancillary =
nir_load_vector_arg_amd(&b, 1, .base = args->ac.ancillary.arg_index);
nir_ssa_def *x_rate = nir_ubfe_imm(&b, ancillary, 2, 2);
/* xRate = xRate == 0x1 ? adjusted_frag_z : frag_z. */
nir_ssa_def *cond = nir_ieq_imm(&b, x_rate, 1);
frag_z = nir_bcsel(&b, cond, adjusted_frag_z, frag_z);
nir_ssa_def *new_dest = nir_vector_insert_imm(&b, &intrin->dest.ssa, frag_z, 2);
nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa, new_dest, new_dest->parent_instr);
progress = true;
break;
}
case nir_intrinsic_load_barycentric_at_sample: {
nir_ssa_def *num_samples = nir_load_rasterization_samples_amd(&b);
nir_ssa_def *new_dest;
if (key->dynamic_rasterization_samples) {
nir_ssa_def *res1, *res2;
nir_push_if(&b, nir_ieq_imm(&b, num_samples, 1));
{
res1 = nir_load_barycentric_pixel(&b, 32,
.interp_mode = nir_intrinsic_interp_mode(intrin));
}
nir_push_else(&b, NULL);
{
nir_ssa_def *sample_pos =
nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
/* sample_pos -= 0.5 */
sample_pos = nir_fsub(&b, sample_pos, nir_imm_float(&b, 0.5f));
res2 = nir_load_barycentric_at_offset(&b, 32, sample_pos,
.interp_mode = nir_intrinsic_interp_mode(intrin));
}
nir_pop_if(&b, NULL);
new_dest = nir_if_phi(&b, res1, res2);
} else {
if (!key->ps.num_samples) {
new_dest =
nir_load_barycentric_pixel(&b, 32,
.interp_mode = nir_intrinsic_interp_mode(intrin));
} else {
nir_ssa_def *sample_pos =
nir_load_sample_positions_amd(&b, 32, intrin->src[0].ssa, num_samples);
/* sample_pos -= 0.5 */
sample_pos = nir_fsub(&b, sample_pos, nir_imm_float(&b, 0.5f));
new_dest =
nir_load_barycentric_at_offset(&b, 32, sample_pos,
.interp_mode = nir_intrinsic_interp_mode(intrin));
}
}
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, new_dest);
nir_instr_remove(instr);
progress = true;
break;
}
default:
break;
}
}
}
if (progress)
nir_metadata_preserve(impl, 0);
else
nir_metadata_preserve(impl, nir_metadata_all);
return progress;
}
static bool
is_sincos(const nir_instr *instr, const void *_)
{

View File

@@ -759,9 +759,6 @@ bool radv_consider_culling(const struct radv_physical_device *pdevice, struct ni
void radv_get_nir_options(struct radv_physical_device *device);
bool radv_lower_fs_intrinsics(nir_shader *nir, const struct radv_pipeline_stage *fs_stage,
const struct radv_pipeline_key *key);
nir_shader *create_rt_shader(struct radv_device *device,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo,
struct radv_ray_tracing_module *groups,