2010-05-05 10:37:25 -07:00
|
|
|
/*
|
|
|
|
* Copyright © 2010 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
* DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2010-07-26 22:50:29 -07:00
|
|
|
* \file ir_optimization.h
|
2010-05-05 10:37:25 -07:00
|
|
|
*
|
|
|
|
* Prototypes for optimization passes to be called by the compiler and drivers.
|
|
|
|
*/
|
|
|
|
|
2017-03-20 16:04:08 +00:00
|
|
|
#ifndef GLSL_IR_OPTIMIZATION_H
|
|
|
|
#define GLSL_IR_OPTIMIZATION_H
|
|
|
|
|
2018-04-08 13:13:08 -04:00
|
|
|
struct gl_linked_shader;
|
|
|
|
struct gl_shader_program;
|
|
|
|
|
2010-11-18 17:54:07 -08:00
|
|
|
/* Operations for lower_instructions() */
|
2011-08-27 18:32:58 -05:00
|
|
|
#define SUB_TO_ADD_NEG 0x01
|
2017-01-16 16:39:06 +01:00
|
|
|
#define FDIV_TO_MUL_RCP 0x02
|
2011-08-27 18:32:58 -05:00
|
|
|
#define EXP_TO_EXP2 0x04
|
|
|
|
#define POW_TO_EXP2 0x08
|
|
|
|
#define LOG_TO_LOG2 0x10
|
glsl: Improve precision of mod(x,y)
Currently, Mesa uses the lowering pass MOD_TO_FRACT to implement
mod(x,y) as y * fract(x/y). This implementation has a down side though:
it introduces precision errors due to the fract() operation. Even worse,
since the result of fract() is multiplied by y, the larger y gets the
larger the precision error we produce, so for large enough numbers the
precision loss is significant. Some examples on i965:
Operation Precision error
-----------------------------------------------------
mod(-1.951171875, 1.9980468750) 0.0000000447
mod(121.57, 13.29) 0.0000023842
mod(3769.12, 321.99) 0.0000762939
mod(3769.12, 1321.99) 0.0001220703
mod(-987654.125, 123456.984375) 0.0160663128
mod( 987654.125, 123456.984375) 0.0312500000
This patch replaces the current lowering pass with a different one
(MOD_TO_FLOOR) that follows the recommended implementation in the GLSL
man pages:
mod(x,y) = x - y * floor(x/y)
This implementation eliminates the precision errors at the expense of
an additional add instruction on some systems. On systems that can do
negate with multiply-add in a single operation this new implementation
would come at no additional cost.
v2 (Ian Romanick)
- Do not clone operands because when they are expressions we would be
duplicating them and that can lead to suboptimal code.
Fixes the following 16 dEQP tests:
dEQP-GLES3.functional.shaders.builtin_functions.precision.mod.mediump_*
dEQP-GLES3.functional.shaders.builtin_functions.precision.mod.highp_*
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
2015-01-20 17:09:59 +01:00
|
|
|
#define MOD_TO_FLOOR 0x20
|
2011-08-27 18:32:58 -05:00
|
|
|
#define INT_DIV_TO_MUL_RCP 0x40
|
2016-01-07 15:54:16 -08:00
|
|
|
#define LDEXP_TO_ARITH 0x80
|
|
|
|
#define CARRY_TO_ARITH 0x100
|
|
|
|
#define BORROW_TO_ARITH 0x200
|
|
|
|
#define SAT_TO_CLAMP 0x400
|
|
|
|
#define DOPS_TO_DFRAC 0x800
|
|
|
|
#define DFREXP_DLDEXP_TO_ARITH 0x1000
|
2016-06-23 16:16:21 -07:00
|
|
|
#define BIT_COUNT_TO_MATH 0x02000
|
2016-06-23 16:57:23 -07:00
|
|
|
#define EXTRACT_TO_SHIFTS 0x04000
|
2016-06-23 23:16:10 -07:00
|
|
|
#define INSERT_TO_SHIFTS 0x08000
|
2016-06-24 00:11:26 -07:00
|
|
|
#define REVERSE_TO_SHIFTS 0x10000
|
2016-06-24 01:17:23 -07:00
|
|
|
#define FIND_LSB_TO_FLOAT_CAST 0x20000
|
2016-06-24 01:53:33 -07:00
|
|
|
#define FIND_MSB_TO_FLOAT_CAST 0x40000
|
2016-06-27 15:21:21 -07:00
|
|
|
#define IMUL_HIGH_TO_MUL 0x80000
|
2017-01-16 16:39:06 +01:00
|
|
|
#define DDIV_TO_MUL_RCP 0x100000
|
|
|
|
#define DIV_TO_MUL_RCP (FDIV_TO_MUL_RCP | DDIV_TO_MUL_RCP)
|
2017-03-17 14:58:22 +01:00
|
|
|
#define SQRT_TO_ABS_SQRT 0x200000
|
2019-02-27 14:02:54 -08:00
|
|
|
#define MUL64_TO_MUL_AND_MUL_HIGH 0x400000
|
2010-11-18 17:54:07 -08:00
|
|
|
|
2016-10-14 18:17:16 -07:00
|
|
|
/* Opertaions for lower_64bit_integer_instructions() */
|
|
|
|
#define MUL64 (1U << 0)
|
2016-10-17 13:55:54 -07:00
|
|
|
#define SIGN64 (1U << 1)
|
2016-10-17 17:55:18 -07:00
|
|
|
#define DIV64 (1U << 2)
|
2016-10-18 16:47:14 -07:00
|
|
|
#define MOD64 (1U << 3)
|
2016-10-14 18:17:16 -07:00
|
|
|
|
2012-11-19 15:15:32 -08:00
|
|
|
/**
|
|
|
|
* \see class lower_packing_builtins_visitor
|
|
|
|
*/
|
|
|
|
enum lower_packing_builtins_op {
|
|
|
|
LOWER_PACK_UNPACK_NONE = 0x0000,
|
|
|
|
|
|
|
|
LOWER_PACK_SNORM_2x16 = 0x0001,
|
|
|
|
LOWER_UNPACK_SNORM_2x16 = 0x0002,
|
|
|
|
|
|
|
|
LOWER_PACK_UNORM_2x16 = 0x0004,
|
|
|
|
LOWER_UNPACK_UNORM_2x16 = 0x0008,
|
|
|
|
|
|
|
|
LOWER_PACK_HALF_2x16 = 0x0010,
|
|
|
|
LOWER_UNPACK_HALF_2x16 = 0x0020,
|
|
|
|
|
2016-01-21 11:46:22 -08:00
|
|
|
LOWER_PACK_SNORM_4x8 = 0x0040,
|
|
|
|
LOWER_UNPACK_SNORM_4x8 = 0x0080,
|
2013-01-21 15:31:00 -08:00
|
|
|
|
2016-01-21 11:46:22 -08:00
|
|
|
LOWER_PACK_UNORM_4x8 = 0x0100,
|
|
|
|
LOWER_UNPACK_UNORM_4x8 = 0x0200,
|
2013-01-21 15:31:00 -08:00
|
|
|
|
2016-01-21 11:46:22 -08:00
|
|
|
LOWER_PACK_USE_BFI = 0x0400,
|
|
|
|
LOWER_PACK_USE_BFE = 0x0800,
|
2012-11-19 15:15:32 -08:00
|
|
|
};
|
|
|
|
|
2011-10-21 11:17:39 -07:00
|
|
|
bool do_common_optimization(exec_list *ir, bool linked,
|
|
|
|
bool uniform_locations_assigned,
|
2014-04-06 23:25:00 -07:00
|
|
|
const struct gl_shader_compiler_options *options,
|
|
|
|
bool native_integers);
|
2010-08-10 13:06:49 -07:00
|
|
|
|
2016-04-29 14:40:26 -07:00
|
|
|
bool ir_constant_fold(ir_rvalue **rvalue);
|
|
|
|
|
2014-02-28 20:11:32 -08:00
|
|
|
bool do_rebalance_tree(exec_list *instructions);
|
2014-02-28 17:49:20 -08:00
|
|
|
bool do_algebraic(exec_list *instructions, bool native_integers,
|
|
|
|
const struct gl_shader_compiler_options *options);
|
2014-10-22 20:48:21 -07:00
|
|
|
bool opt_conditional_discard(exec_list *instructions);
|
2010-05-05 11:45:30 -07:00
|
|
|
bool do_constant_folding(exec_list *instructions);
|
2010-05-12 12:10:41 -07:00
|
|
|
bool do_constant_variable(exec_list *instructions);
|
|
|
|
bool do_constant_variable_unlinked(exec_list *instructions);
|
2011-01-25 10:28:13 +10:00
|
|
|
bool do_copy_propagation_elements(exec_list *instructions);
|
2010-08-09 17:03:46 -07:00
|
|
|
bool do_constant_propagation(exec_list *instructions);
|
2013-06-12 13:23:48 +02:00
|
|
|
void do_dead_builtin_varyings(struct gl_context *ctx,
|
2016-06-30 14:55:40 +10:00
|
|
|
gl_linked_shader *producer,
|
|
|
|
gl_linked_shader *consumer,
|
2013-06-12 13:23:48 +02:00
|
|
|
unsigned num_tfeedback_decls,
|
|
|
|
class tfeedback_decl *tfeedback_decls);
|
2011-10-21 11:17:39 -07:00
|
|
|
bool do_dead_code(exec_list *instructions, bool uniform_locations_assigned);
|
2010-05-05 10:37:25 -07:00
|
|
|
bool do_dead_code_local(exec_list *instructions);
|
2010-07-27 11:28:26 -07:00
|
|
|
bool do_dead_code_unlinked(exec_list *instructions);
|
2010-08-05 10:09:12 -07:00
|
|
|
bool do_dead_functions(exec_list *instructions);
|
2013-04-17 17:30:25 -07:00
|
|
|
bool opt_flip_matrices(exec_list *instructions);
|
2010-05-05 11:45:30 -07:00
|
|
|
bool do_function_inlining(exec_list *instructions);
|
glsl: add continue/break/return unification/elimination pass (v2)
Changes in v2:
- Base class renamed to ir_control_flow_visitor
- Tried to comply with coding style
This is a new pass that supersedes ir_if_return and "lowers" jumps
to if/else structures.
Currently it causes no regressions on softpipe and nv40, but I'm not sure
whether the piglit glsl tests are thorough enough, so consider this
experimental.
It can be asked to:
1. Pull jumps out of ifs where possible
2. Remove all "continue"s, replacing them with an "execute flag"
3. Replace all "break" with a single conditional one at the end of the loop
4. Replace all "return"s with a single return at the end of the function,
for the main function and/or other functions
This gives several great benefits:
1. All functions can be inlined after this pass
2. nv40 and other pre-DX10 chips without "continue" can be supported
3. nv30 and other pre-DX10 chips with no control flow at all are better supported
Note that for full effect we should also teach the unroller to unroll
loops with a fixed maximum number of iterations but with the canonical
conditional "break" that this pass will insert if asked to.
Continues are lowered by adding a per-loop "execute flag", initialized to
TRUE, that when cleared inhibits all execution until the end of the loop.
Breaks are lowered to continues, plus setting a "break flag" that is checked
at the end of the loop, and trigger the unique "break".
Returns are lowered to breaks/continues, plus adding a "return flag" that
causes loops to break again out of their enclosing loops until all the
loops are exited: then the "execute flag" logic will ignore everything
until the end of the function.
Note that "continue" and "return" can also be implemented by adding
a dummy loop and using break.
However, this is bad for hardware with limited nesting depth, and
prevents further optimization, and thus is not currently performed.
2010-09-07 00:24:08 +02:00
|
|
|
bool do_lower_jumps(exec_list *instructions, bool pull_out_jumps = true, bool lower_sub_return = true, bool lower_main_return = false, bool lower_continue = false, bool lower_break = false);
|
2010-09-30 20:07:27 -07:00
|
|
|
bool do_lower_texture_projection(exec_list *instructions);
|
2010-05-05 11:45:30 -07:00
|
|
|
bool do_if_simplification(exec_list *instructions);
|
2013-04-03 23:56:57 -07:00
|
|
|
bool opt_flatten_nested_if_blocks(exec_list *instructions);
|
2010-11-24 22:02:26 -08:00
|
|
|
bool do_discard_simplification(exec_list *instructions);
|
2016-07-03 17:01:09 +02:00
|
|
|
bool lower_if_to_cond_assign(gl_shader_stage stage, exec_list *instructions,
|
2016-07-03 17:11:07 +02:00
|
|
|
unsigned max_depth = 0, unsigned min_branch_cost = 0);
|
2010-07-12 11:04:07 -07:00
|
|
|
bool do_mat_op_to_vec(exec_list *instructions);
|
glsl: Optimize min/max expression trees
Original patch by Petri Latvala <petri.latvala@intel.com>:
Add an optimization pass that drops min/max expression operands that
can be proven to not contribute to the final result. The algorithm is
similar to alpha-beta pruning on a minmax search, from the field of
AI.
This optimization pass can optimize min/max expressions where operands
are min/max expressions. Such code can appear in shaders by itself, or
as the result of clamp() or AMD_shader_trinary_minmax functions.
This optimization pass improves the generated code for piglit's
AMD_shader_trinary_minmax tests as follows:
total instructions in shared programs: 75 -> 67 (-10.67%)
instructions in affected programs: 60 -> 52 (-13.33%)
GAINED: 0
LOST: 0
All tests (max3, min3, mid3) improved.
A full shader-db run:
total instructions in shared programs: 4293603 -> 4293575 (-0.00%)
instructions in affected programs: 1188 -> 1160 (-2.36%)
GAINED: 0
LOST: 0
Improvements happen in Guacamelee and Serious Sam 3. One shader from
Dungeon Defenders is hurt by shader-db metrics (26 -> 28), because of
dropping of a (constant float (0.00000)) operand, which was
compiled to a saturate modifier.
Version 2 by Iago Toral Quiroga <itoral@igalia.com>:
Changes from review feedback:
- Squashed various cosmetic changes sent by Matt Turner.
- Make less_all_components return an enum rather than setting a class member.
(Suggested by Mat Turner). Also, renamed it to compare_components.
- Make less_all_components, smaller_constant and larger_constant static.
(Suggested by Mat Turner)
- Change mixmax_range to call its limits "low" and "high" instead of
"range[0]" and "range[1]". (Suggested by Connor Abbot).
- Use ir_builder swizzle helpers in swizzle_if_required(). (Suggested by
Connor Abbot).
- Make the logic more clearer by rearrenging the code and commenting.
(Suggested by Connor Abbot).
- Added comment to explain why we need to recurse twice. (Suggested by
Connor Abbot).
- If we cannot prune an expression, do not return early. Instead, attempt
to prune its children. (Suggested by Connor Abbot).
Other changes:
- Instead of having a global "valid" visitor member, let the various functions
that can determine this status return a boolean and check for its value
to decide what to do in each case. This is more flexible and allows to
recurse into children of parents that could not be prunned due to invalid
ranges (so related to the last bullet in the review feedback).
- Make sure we always check if a range is valid before working with it. Since
any use of get_range, combine_range or range_intersection can invalidate
a range we should check for this situation every time we use any of these
functions.
Version 3 by Iago Toral Quiroga <itoral@igalia.com>:
Changes from review feedback:
- Now we can make get_range, combine_range and range_intersection static too
(suggested by Connor Abbot).
- Do not return NULL when looking for the larger or greater constant into
mixed vector constants. Instead, produce a new constant by doing a
component-wise minmax. With this we can also remove of the validations when
we call into these functions (suggested by Connor Abbot).
- Add a comment explaining the meaning of the baserange argument in
prune_expression (suggested by Connor Abbot).
Other changes:
- Eliminate minmax expressions operating on constant vectors with mixed values
by resolving them.
No piglit regressions observed with Version 3.
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=76861
Reviewed-by: Connor Abbott <cwabbott0@gmail.com>
2014-07-29 12:36:31 +03:00
|
|
|
bool do_minmax_prune(exec_list *instructions);
|
2010-08-05 11:01:09 -07:00
|
|
|
bool do_structure_splitting(exec_list *instructions);
|
2017-10-31 23:37:14 -07:00
|
|
|
bool optimize_swizzles(exec_list *instructions);
|
2013-12-21 11:28:05 -08:00
|
|
|
bool do_vectorize(exec_list *instructions);
|
2010-07-30 17:04:49 -07:00
|
|
|
bool do_tree_grafting(exec_list *instructions);
|
2010-07-06 17:53:32 -07:00
|
|
|
bool do_vec_index_to_cond_assign(exec_list *instructions);
|
2010-05-11 11:31:09 -07:00
|
|
|
bool do_vec_index_to_swizzle(exec_list *instructions);
|
2010-11-25 01:09:26 -08:00
|
|
|
bool lower_discard(exec_list *instructions);
|
2012-05-15 12:27:15 +01:00
|
|
|
void lower_discard_flow(exec_list *instructions);
|
2010-11-18 17:54:07 -08:00
|
|
|
bool lower_instructions(exec_list *instructions, unsigned what_to_lower);
|
2010-09-09 15:20:09 -07:00
|
|
|
bool lower_noise(exec_list *instructions);
|
2015-05-28 23:24:08 +02:00
|
|
|
bool lower_variable_index_to_cond_assign(gl_shader_stage stage,
|
|
|
|
exec_list *instructions, bool lower_input, bool lower_output,
|
|
|
|
bool lower_temp, bool lower_uniform);
|
2010-11-16 12:01:42 -08:00
|
|
|
bool lower_quadop_vector(exec_list *instructions, bool dont_lower_swz);
|
2019-11-08 08:17:17 +02:00
|
|
|
bool lower_const_arrays_to_uniforms(exec_list *instructions, unsigned stage, unsigned max_uniform_components);
|
2016-06-30 14:55:40 +10:00
|
|
|
bool lower_clip_cull_distance(struct gl_shader_program *prog,
|
|
|
|
gl_linked_shader *shader);
|
2019-10-12 00:19:50 -04:00
|
|
|
ir_variable * lower_xfb_varying(void *mem_ctx,
|
|
|
|
gl_linked_shader *shader,
|
|
|
|
const char *old_var_name);
|
2014-07-21 21:59:37 -04:00
|
|
|
void lower_output_reads(unsigned stage, exec_list *instructions);
|
2012-11-19 15:15:32 -08:00
|
|
|
bool lower_packing_builtins(exec_list *instructions, int op_mask);
|
2017-10-10 13:58:43 +02:00
|
|
|
void lower_shared_reference(struct gl_context *ctx,
|
|
|
|
struct gl_shader_program *prog,
|
|
|
|
struct gl_linked_shader *shader);
|
2016-06-30 14:55:40 +10:00
|
|
|
void lower_ubo_reference(struct gl_linked_shader *shader,
|
2017-07-24 10:24:53 +10:00
|
|
|
bool clamp_block_indices, bool use_std430_as_default);
|
2014-04-26 17:45:35 -07:00
|
|
|
void lower_packed_varyings(void *mem_ctx,
|
2016-11-05 09:31:21 -04:00
|
|
|
unsigned locations_used,
|
|
|
|
const uint8_t *components,
|
|
|
|
ir_variable_mode mode,
|
2016-06-30 14:55:40 +10:00
|
|
|
unsigned gs_input_vertices,
|
|
|
|
gl_linked_shader *shader,
|
2019-10-12 00:05:03 -04:00
|
|
|
bool disable_varying_packing,
|
|
|
|
bool disable_xfb_packing,
|
|
|
|
bool xfb_enabled);
|
2013-03-18 14:45:53 -07:00
|
|
|
bool lower_vector_insert(exec_list *instructions, bool lower_nonconstant_index);
|
2016-06-30 14:55:40 +10:00
|
|
|
bool lower_vector_derefs(gl_linked_shader *shader);
|
|
|
|
void lower_named_interface_blocks(void *mem_ctx, gl_linked_shader *shader);
|
2010-09-13 14:25:26 -07:00
|
|
|
bool optimize_redundant_jumps(exec_list *instructions);
|
2010-10-02 22:57:17 -07:00
|
|
|
bool optimize_split_arrays(exec_list *instructions, bool linked);
|
2014-02-11 09:41:44 +10:00
|
|
|
bool lower_offset_arrays(exec_list *instructions);
|
glsl: Eliminate unused built-in variables after compilation
After compilation (and before linking) we can eliminate quite a few
built-in variables. Basically, any uniform or constant (e.g.,
gl_MaxVertexTextureImageUnits) that isn't used (with one exception) can
be eliminated. System values, vertex shader inputs (with one
exception), and fragment shader outputs that are not used and not
re-declared in the shader text can also be removed.
gl_ModelViewProjectMatrix and gl_Vertex are used by the built-in
function ftransform. There are some complications with eliminating
these variables (see the comment in the patch), so they are not
eliminated.
Valgrind massif results for a trimmed apitrace of dota2:
n time(i) total(B) useful-heap(B) extra-heap(B) stacks(B)
Before (32-bit): 46 40,661,487,174 75,116,800 68,854,065 6,262,735 0
After (32-bit): 50 40,564,927,443 69,185,408 63,683,871 5,501,537 0
Before (64-bit): 64 37,200,329,700 104,872,672 96,514,546 8,358,126 0
After (64-bit): 59 36,822,048,449 96,526,888 89,113,000 7,413,888 0
A real savings of 4.9MiB on 32-bit and 7.0MiB on 64-bit.
v2: Don't remove any built-in with Transpose in the name.
v3: Fix comment typo noticed by Anuj.
Signed-off-by: Ian Romanick <ian.d.romanick@intel.com>
Suggested-by: Eric Anholt <eric@anholt.net>
Reviewed-by: Matt Turner <mattst88@gmail.com>
Acked-by: Anuj Phogat <anuj.phogat@gmail.com>
Cc: Eric Anholt <eric@anholt.net>
2014-05-28 17:09:45 -07:00
|
|
|
void optimize_dead_builtin_variables(exec_list *instructions,
|
|
|
|
enum ir_variable_mode other);
|
2016-06-30 14:55:40 +10:00
|
|
|
bool lower_tess_level(gl_linked_shader *shader);
|
2011-07-18 18:48:39 -07:00
|
|
|
|
2016-06-30 14:55:40 +10:00
|
|
|
bool lower_vertex_id(gl_linked_shader *shader);
|
2017-10-21 15:15:41 -04:00
|
|
|
bool lower_cs_derived(gl_linked_shader *shader);
|
2018-02-14 11:53:49 -08:00
|
|
|
bool lower_blend_equation_advanced(gl_linked_shader *shader, bool coherent);
|
2014-06-19 12:06:42 -07:00
|
|
|
|
2019-10-15 16:20:26 +02:00
|
|
|
bool lower_builtins(exec_list *instructions);
|
2015-04-23 13:34:14 +10:00
|
|
|
bool lower_subroutine(exec_list *instructions, struct _mesa_glsl_parse_state *state);
|
2016-03-17 13:58:40 -07:00
|
|
|
void propagate_invariance(exec_list *instructions);
|
2015-04-23 13:34:14 +10:00
|
|
|
|
2017-09-18 15:04:03 -05:00
|
|
|
namespace ir_builder { class ir_factory; };
|
|
|
|
|
|
|
|
ir_variable *compare_index_block(ir_builder::ir_factory &body,
|
|
|
|
ir_variable *index,
|
|
|
|
unsigned base, unsigned components);
|
2016-10-14 18:17:16 -07:00
|
|
|
|
|
|
|
bool lower_64bit_integer_instructions(exec_list *instructions,
|
|
|
|
unsigned what_to_lower);
|
2017-03-20 16:04:08 +00:00
|
|
|
|
2019-04-19 15:36:58 +02:00
|
|
|
bool lower_precision(exec_list *instructions);
|
|
|
|
|
2017-03-20 16:04:08 +00:00
|
|
|
#endif /* GLSL_IR_OPTIMIZATION_H */
|