asahi: Gather flat/linear shaded input info from uncompiled FS

We need to propagate shading model metadata from the FS to the VS in
order to correctly lay out the uniforms in the right order. This means
we need VS variants depending on this data.

We could use the existing shader info structure, but that applies to
compiled shaders which would introduce a dependency from the VS compile
to the FS compile. This information does not change with FS variants, so
we can introduce an agx_uncompiled_shader_info structure and gather it
early at precompilation time.

Signed-off-by: Asahi Lina <lina@asahilina.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23998>
This commit is contained in:
Asahi Lina
2023-06-28 19:07:20 +09:00
committed by Marge Bot
parent 49994dc8cb
commit 90834353a1
5 changed files with 62 additions and 16 deletions

View File

@@ -2094,33 +2094,60 @@ agx_gather_texcoords(nir_builder *b, nir_instr *instr, void *data)
return false; return false;
} }
struct interp_masks {
uint64_t flat;
uint64_t linear;
};
static bool static bool
agx_gather_flat(nir_builder *b, nir_instr *instr, void *data) agx_gather_interp(nir_builder *b, nir_instr *instr, void *data)
{ {
uint64_t *mask = data; struct interp_masks *masks = data;
if (instr->type != nir_instr_type_intrinsic) if (instr->type != nir_instr_type_intrinsic)
return false; return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_input)
return false;
nir_io_semantics sem = nir_intrinsic_io_semantics(intr); if (intr->intrinsic == nir_intrinsic_load_input) {
*mask |= BITFIELD64_BIT(sem.location); nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
masks->flat |= BITFIELD64_BIT(sem.location);
} else if (intr->intrinsic == nir_intrinsic_load_interpolated_input &&
nir_intrinsic_interp_mode(nir_src_as_intrinsic(intr->src[0])) ==
INTERP_MODE_NOPERSPECTIVE) {
nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
masks->linear |= BITFIELD64_BIT(sem.location);
}
return false; return false;
} }
/* /*
* Build a bit mask of varyings (by location) that are flatshaded or used as * Build a bit mask of varyings (by location) that are flatshaded and linear
* texture coordinates. This information is needed by lower_mediump_io. * shaded. This information is needed by lower_mediump_io and
* agx_uncompiled_shader_info.
*/
static struct interp_masks
agx_interp_masks(nir_shader *nir)
{
assert(nir->info.stage == MESA_SHADER_FRAGMENT);
struct interp_masks masks = {0};
nir_shader_instructions_pass(nir, agx_gather_interp, nir_metadata_all,
&masks);
return masks;
}
/*
* Build a bit mask of varyings (by location) that are used as texture
* coordinates. This information is needed by lower_mediump_io.
*/ */
static uint64_t static uint64_t
agx_fp32_varying_mask(nir_shader *nir) agx_texcoord_mask(nir_shader *nir)
{ {
assert(nir->info.stage == MESA_SHADER_FRAGMENT); assert(nir->info.stage == MESA_SHADER_FRAGMENT);
uint64_t mask = 0; uint64_t mask = 0;
nir_shader_instructions_pass(nir, agx_gather_flat, nir_metadata_all, &mask);
nir_shader_instructions_pass(nir, agx_gather_texcoords, nir_metadata_all, nir_shader_instructions_pass(nir, agx_gather_texcoords, nir_metadata_all,
&mask); &mask);
return mask; return mask;
@@ -2287,8 +2314,12 @@ agx_compile_function_nir(nir_shader *nir, nir_function_impl *impl,
* lowered here to avoid duplicate work with shader variants. * lowered here to avoid duplicate work with shader variants.
*/ */
void void
agx_preprocess_nir(nir_shader *nir, bool support_lod_bias) agx_preprocess_nir(nir_shader *nir, bool support_lod_bias,
struct agx_uncompiled_shader_info *out)
{ {
if (out)
memset(out, 0, sizeof(*out));
NIR_PASS_V(nir, nir_lower_vars_to_ssa); NIR_PASS_V(nir, nir_lower_vars_to_ssa);
if (nir->info.stage == MESA_SHADER_VERTEX) { if (nir->info.stage == MESA_SHADER_VERTEX) {
@@ -2313,6 +2344,9 @@ agx_preprocess_nir(nir_shader *nir, bool support_lod_bias)
glsl_type_size, 0); glsl_type_size, 0);
NIR_PASS_V(nir, nir_lower_ssbo); NIR_PASS_V(nir, nir_lower_ssbo);
if (nir->info.stage == MESA_SHADER_FRAGMENT) { if (nir->info.stage == MESA_SHADER_FRAGMENT) {
uint64_t texcoord = agx_texcoord_mask(nir);
struct interp_masks masks = agx_interp_masks(nir);
NIR_PASS_V(nir, agx_nir_lower_frag_sidefx); NIR_PASS_V(nir, agx_nir_lower_frag_sidefx);
/* Interpolate varyings at fp16 and write to the tilebuffer at fp16. As an /* Interpolate varyings at fp16 and write to the tilebuffer at fp16. As an
@@ -2322,7 +2356,12 @@ agx_preprocess_nir(nir_shader *nir, bool support_lod_bias)
*/ */
NIR_PASS_V(nir, nir_lower_mediump_io, NIR_PASS_V(nir, nir_lower_mediump_io,
nir_var_shader_in | nir_var_shader_out, nir_var_shader_in | nir_var_shader_out,
~agx_fp32_varying_mask(nir), false); ~(masks.flat | texcoord), false);
if (out) {
out->inputs_flat_shaded = masks.flat;
out->inputs_linear_shaded = masks.linear;
}
} }
/* Clean up deref gunk after lowering I/O */ /* Clean up deref gunk after lowering I/O */

View File

@@ -76,6 +76,11 @@ union agx_varyings {
struct agx_varyings_fs fs; struct agx_varyings_fs fs;
}; };
struct agx_uncompiled_shader_info {
uint64_t inputs_flat_shaded;
uint64_t inputs_linear_shaded;
};
struct agx_shader_info { struct agx_shader_info {
union agx_varyings varyings; union agx_varyings varyings;
@@ -171,7 +176,8 @@ struct agx_shader_key {
}; };
}; };
void agx_preprocess_nir(nir_shader *nir, bool support_lod_bias); void agx_preprocess_nir(nir_shader *nir, bool support_lod_bias,
struct agx_uncompiled_shader_info *out);
bool agx_nir_lower_discard_zs_emit(nir_shader *s); bool agx_nir_lower_discard_zs_emit(nir_shader *s);

View File

@@ -17,7 +17,7 @@ agx_compile_meta_shader(struct agx_meta_cache *cache, nir_shader *shader,
struct util_dynarray binary; struct util_dynarray binary;
util_dynarray_init(&binary, NULL); util_dynarray_init(&binary, NULL);
agx_preprocess_nir(shader, false); agx_preprocess_nir(shader, false, NULL);
if (tib) { if (tib) {
agx_nir_lower_tilebuffer(shader, tib, NULL, NULL); agx_nir_lower_tilebuffer(shader, tib, NULL, NULL);
agx_nir_lower_monolithic_msaa( agx_nir_lower_monolithic_msaa(

View File

@@ -1595,7 +1595,7 @@ agx_create_shader_state(struct pipe_context *pctx,
blob_finish(&blob); blob_finish(&blob);
so->nir = nir; so->nir = nir;
agx_preprocess_nir(nir, true); agx_preprocess_nir(nir, true, &so->info);
/* For shader-db, precompile a shader with a default key. This could be /* For shader-db, precompile a shader with a default key. This could be
* improved but hopefully this is acceptable for now. * improved but hopefully this is acceptable for now.
@@ -1674,7 +1674,7 @@ agx_create_compute_state(struct pipe_context *pctx,
blob_finish(&blob); blob_finish(&blob);
so->nir = nir; so->nir = nir;
agx_preprocess_nir(nir, true); agx_preprocess_nir(nir, true, &so->info);
agx_get_shader_variant(agx_screen(pctx->screen), so, &pctx->debug, &key); agx_get_shader_variant(agx_screen(pctx->screen), so, &pctx->debug, &key);
/* We're done with the NIR, throw it away */ /* We're done with the NIR, throw it away */

View File

@@ -171,6 +171,7 @@ struct agx_uncompiled_shader {
enum pipe_shader_type type; enum pipe_shader_type type;
const struct nir_shader *nir; const struct nir_shader *nir;
uint8_t nir_sha1[20]; uint8_t nir_sha1[20];
struct agx_uncompiled_shader_info info;
struct hash_table *variants; struct hash_table *variants;
/* For compute kernels */ /* For compute kernels */