radv,aco: optimize computing the sample mask for per-sample shading

I don't know why these values were introduced for but it seems like
we can optimize this by just doing:

gl_SampleMaskIn[0] = (SampleCoverage & (1 << gl_SampleID))

AMDGPU-PRO and AMDVLK apply the same formula to compute the
sample mask when per-sample shading is enabled.

No fossils-db changes.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7377>
This commit is contained in:
Samuel Pitoiset
2020-10-29 16:47:54 +01:00
parent c63bcda22c
commit 03f260cb27
2 changed files with 5 additions and 27 deletions

View File

@@ -6797,26 +6797,16 @@ void visit_load_sample_mask_in(isel_context *ctx, nir_intrinsic_instr *instr) {
log2_ps_iter_samples = ctx->options->key.fs.log2_ps_iter_samples; log2_ps_iter_samples = ctx->options->key.fs.log2_ps_iter_samples;
} }
/* The bit pattern matches that used by fixed function fragment
* processing. */
static const unsigned ps_iter_masks[] = {
0xffff, /* not used */
0x5555,
0x1111,
0x0101,
0x0001,
};
assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
Builder bld(ctx->program, ctx->block); Builder bld(ctx->program, ctx->block);
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa); Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
if (log2_ps_iter_samples) { if (log2_ps_iter_samples) {
/* gl_SampleMaskIn[0] = (SampleCoverage & (1 << gl_SampleID)). */
Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1), Temp sample_id = bld.vop3(aco_opcode::v_bfe_u32, bld.def(v1),
get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u)); get_arg(ctx, ctx->args->ac.ancillary), Operand(8u), Operand(4u));
Temp ps_iter_mask = bld.copy(bld.def(v1), Operand(ps_iter_masks[log2_ps_iter_samples])); Temp mask = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), sample_id,
Temp mask = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), sample_id, ps_iter_mask); bld.copy(bld.def(v1), Operand(1u)));
bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, get_arg(ctx, ctx->args->ac.sample_coverage)); bld.vop2(aco_opcode::v_and_b32, Definition(dst), mask, get_arg(ctx, ctx->args->ac.sample_coverage));
} else { } else {
bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.sample_coverage)); bld.copy(Definition(dst), get_arg(ctx, ctx->args->ac.sample_coverage));

View File

@@ -744,23 +744,11 @@ static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
log2_ps_iter_samples = ctx->args->options->key.fs.log2_ps_iter_samples; log2_ps_iter_samples = ctx->args->options->key.fs.log2_ps_iter_samples;
} }
/* The bit pattern matches that used by fixed function fragment
* processing. */
static const uint16_t ps_iter_masks[] = {
0xffff, /* not used */
0x5555,
0x1111,
0x0101,
0x0001,
};
assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));
uint32_t ps_iter_mask = ps_iter_masks[log2_ps_iter_samples];
LLVMValueRef result, sample_id; LLVMValueRef result, sample_id;
if (log2_ps_iter_samples) { if (log2_ps_iter_samples) {
/* gl_SampleMaskIn[0] = (SampleCoverage & (1 << gl_SampleID)). */
sample_id = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ac.ancillary), 8, 4); sample_id = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ac.ancillary), 8, 4);
sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, ps_iter_mask, false), sample_id, ""); sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, 1, false), sample_id, "");
result = LLVMBuildAnd(ctx->ac.builder, sample_id, result = LLVMBuildAnd(ctx->ac.builder, sample_id,
ac_get_arg(&ctx->ac, ctx->args->ac.sample_coverage), ""); ac_get_arg(&ctx->ac, ctx->args->ac.sample_coverage), "");
} else { } else {