2014-11-14 21:35:25 -08:00
|
|
|
/*
|
|
|
|
* Copyright © 2014 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Jason Ekstrand (jason@jlekstrand.net)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2015-01-22 23:32:16 -05:00
|
|
|
#include "nir_constant_expressions.h"
|
2014-11-14 21:35:25 -08:00
|
|
|
#include <math.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Implements SSA-based constant folding.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool
|
2014-12-05 12:05:55 -08:00
|
|
|
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
|
2014-11-14 21:35:25 -08:00
|
|
|
{
|
2019-03-27 00:59:03 +01:00
|
|
|
nir_const_value src[NIR_MAX_VEC_COMPONENTS][NIR_MAX_VEC_COMPONENTS];
|
2014-11-14 21:35:25 -08:00
|
|
|
|
|
|
|
if (!instr->dest.dest.is_ssa)
|
|
|
|
return false;
|
|
|
|
|
2015-08-14 10:45:06 -07:00
|
|
|
/* In the case that any outputs/inputs have unsized types, then we need to
|
|
|
|
* guess the bit-size. In this case, the validator ensures that all
|
|
|
|
* bit-sizes match so we can just take the bit-size from first
|
|
|
|
* output/input with an unsized type. If all the outputs/inputs are sized
|
|
|
|
* then we don't need to guess the bit-size at all because the code we
|
|
|
|
* generate for constant opcodes in this case already knows the sizes of
|
|
|
|
* the types involved and does not need the provided bit-size for anything
|
|
|
|
* (although it still requires to receive a valid bit-size).
|
|
|
|
*/
|
|
|
|
unsigned bit_size = 0;
|
|
|
|
if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
|
|
|
|
bit_size = instr->dest.dest.ssa.bit_size;
|
|
|
|
|
2014-11-14 21:35:25 -08:00
|
|
|
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
|
|
|
|
if (!instr->src[i].src.is_ssa)
|
|
|
|
return false;
|
|
|
|
|
2018-12-06 14:31:20 -06:00
|
|
|
if (bit_size == 0 &&
|
|
|
|
!nir_alu_type_get_type_size(nir_op_infos[instr->op].input_types[i]))
|
2015-08-14 10:45:06 -07:00
|
|
|
bit_size = instr->src[i].src.ssa->bit_size;
|
|
|
|
|
2015-01-22 23:32:16 -05:00
|
|
|
nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
|
|
|
|
|
|
|
|
if (src_instr->type != nir_instr_type_load_const)
|
2014-11-14 21:35:25 -08:00
|
|
|
return false;
|
2015-01-22 23:32:16 -05:00
|
|
|
nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);
|
|
|
|
|
2015-01-25 11:47:53 -05:00
|
|
|
for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
|
|
|
|
j++) {
|
2019-03-27 18:27:39 -05:00
|
|
|
src[i][j] = load_const->value[instr->src[i].swizzle[j]];
|
2015-01-22 23:32:16 -05:00
|
|
|
}
|
2014-11-14 21:35:25 -08:00
|
|
|
|
|
|
|
/* We shouldn't have any source modifiers in the optimization loop. */
|
|
|
|
assert(!instr->src[i].abs && !instr->src[i].negate);
|
|
|
|
}
|
|
|
|
|
2015-08-14 10:45:06 -07:00
|
|
|
if (bit_size == 0)
|
|
|
|
bit_size = 32;
|
|
|
|
|
2014-11-14 21:35:25 -08:00
|
|
|
/* We shouldn't have any saturate modifiers in the optimization loop. */
|
|
|
|
assert(!instr->dest.saturate);
|
|
|
|
|
2019-03-27 00:59:03 +01:00
|
|
|
nir_const_value dest[NIR_MAX_VEC_COMPONENTS];
|
|
|
|
nir_const_value *srcs[NIR_MAX_VEC_COMPONENTS];
|
|
|
|
memset(dest, 0, sizeof(dest));
|
|
|
|
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; ++i)
|
|
|
|
srcs[i] = src[i];
|
|
|
|
nir_eval_const_opcode(instr->op, dest, instr->dest.dest.ssa.num_components,
|
|
|
|
bit_size, srcs);
|
2015-01-22 23:32:16 -05:00
|
|
|
|
|
|
|
nir_load_const_instr *new_instr =
|
|
|
|
nir_load_const_instr_create(mem_ctx,
|
2016-03-23 08:04:18 +01:00
|
|
|
instr->dest.dest.ssa.num_components,
|
|
|
|
instr->dest.dest.ssa.bit_size);
|
2015-01-22 23:32:16 -05:00
|
|
|
|
2019-03-27 00:59:03 +01:00
|
|
|
memcpy(new_instr->value, dest, sizeof(*new_instr->value) * new_instr->def.num_components);
|
2014-11-14 21:35:25 -08:00
|
|
|
|
2015-01-22 23:32:16 -05:00
|
|
|
nir_instr_insert_before(&instr->instr, &new_instr->instr);
|
2014-11-14 21:35:25 -08:00
|
|
|
|
2015-09-09 13:24:35 -07:00
|
|
|
nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa,
|
|
|
|
nir_src_for_ssa(&new_instr->def));
|
2014-11-14 21:35:25 -08:00
|
|
|
|
|
|
|
nir_instr_remove(&instr->instr);
|
|
|
|
ralloc_free(instr);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-12-05 12:05:55 -08:00
|
|
|
static bool
|
|
|
|
constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
|
|
|
|
{
|
|
|
|
bool progress = false;
|
|
|
|
|
2019-07-18 13:39:49 +02:00
|
|
|
if ((instr->intrinsic == nir_intrinsic_demote_if ||
|
|
|
|
instr->intrinsic == nir_intrinsic_discard_if) &&
|
2018-10-22 15:53:14 -05:00
|
|
|
nir_src_is_const(instr->src[0])) {
|
|
|
|
if (nir_src_as_bool(instr->src[0])) {
|
2018-04-26 21:25:42 -07:00
|
|
|
/* This method of getting a nir_shader * from a nir_instr is
|
|
|
|
* admittedly gross, but given the rarity of hitting this case I think
|
|
|
|
* it's preferable to plumbing an otherwise unused nir_shader *
|
|
|
|
* parameter through four functions to get here.
|
|
|
|
*/
|
|
|
|
nir_cf_node *cf_node = &instr->instr.block->cf_node;
|
|
|
|
nir_function_impl *impl = nir_cf_node_get_function(cf_node);
|
|
|
|
nir_shader *shader = impl->function->shader;
|
|
|
|
|
2019-07-18 13:39:49 +02:00
|
|
|
nir_intrinsic_op op = instr->intrinsic == nir_intrinsic_discard_if ?
|
|
|
|
nir_intrinsic_discard :
|
|
|
|
nir_intrinsic_demote;
|
|
|
|
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(shader, op);
|
|
|
|
nir_instr_insert_before(&instr->instr, &new_instr->instr);
|
2016-03-15 18:32:19 -07:00
|
|
|
nir_instr_remove(&instr->instr);
|
|
|
|
progress = true;
|
2018-10-22 15:53:14 -05:00
|
|
|
} else {
|
|
|
|
/* We're not discarding, just delete the instruction */
|
|
|
|
nir_instr_remove(&instr->instr);
|
|
|
|
progress = true;
|
2016-03-15 18:32:19 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-05 12:05:55 -08:00
|
|
|
return progress;
|
|
|
|
}
|
|
|
|
|
2014-11-14 21:35:25 -08:00
|
|
|
static bool
|
2016-04-08 17:37:40 -04:00
|
|
|
constant_fold_block(nir_block *block, void *mem_ctx)
|
2014-11-14 21:35:25 -08:00
|
|
|
{
|
2016-04-08 17:37:40 -04:00
|
|
|
bool progress = false;
|
2014-11-14 21:35:25 -08:00
|
|
|
|
2016-04-26 18:34:19 -07:00
|
|
|
nir_foreach_instr_safe(instr, block) {
|
2014-12-05 12:05:55 -08:00
|
|
|
switch (instr->type) {
|
|
|
|
case nir_instr_type_alu:
|
2016-04-08 17:37:40 -04:00
|
|
|
progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), mem_ctx);
|
2014-12-05 12:05:55 -08:00
|
|
|
break;
|
|
|
|
case nir_instr_type_intrinsic:
|
2016-04-08 17:37:40 -04:00
|
|
|
progress |=
|
2014-12-05 12:05:55 -08:00
|
|
|
constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Don't know how to constant fold */
|
|
|
|
break;
|
|
|
|
}
|
2014-11-14 21:35:25 -08:00
|
|
|
}
|
|
|
|
|
2016-04-08 17:37:40 -04:00
|
|
|
return progress;
|
2014-11-14 21:35:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
nir_opt_constant_folding_impl(nir_function_impl *impl)
|
|
|
|
{
|
2016-04-08 17:37:40 -04:00
|
|
|
void *mem_ctx = ralloc_parent(impl);
|
|
|
|
bool progress = false;
|
2014-11-14 21:35:25 -08:00
|
|
|
|
2016-04-08 17:37:40 -04:00
|
|
|
nir_foreach_block(block, impl) {
|
|
|
|
progress |= constant_fold_block(block, mem_ctx);
|
|
|
|
}
|
2014-11-14 21:35:25 -08:00
|
|
|
|
2018-09-10 14:31:29 -07:00
|
|
|
if (progress) {
|
2014-12-12 16:25:38 -08:00
|
|
|
nir_metadata_preserve(impl, nir_metadata_block_index |
|
|
|
|
nir_metadata_dominance);
|
2018-09-10 14:31:29 -07:00
|
|
|
} else {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
impl->valid_metadata &= ~nir_metadata_not_properly_reset;
|
|
|
|
#endif
|
|
|
|
}
|
2014-12-12 16:25:38 -08:00
|
|
|
|
2016-04-08 17:37:40 -04:00
|
|
|
return progress;
|
2014-11-14 21:35:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
nir_opt_constant_folding(nir_shader *shader)
|
|
|
|
{
|
|
|
|
bool progress = false;
|
|
|
|
|
2016-04-26 20:26:42 -07:00
|
|
|
nir_foreach_function(function, shader) {
|
2015-12-26 10:00:47 -08:00
|
|
|
if (function->impl)
|
|
|
|
progress |= nir_opt_constant_folding_impl(function->impl);
|
2014-11-14 21:35:25 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return progress;
|
|
|
|
}
|