2010-10-10 15:42:37 -07:00
|
|
|
/*
|
|
|
|
* Copyright © 2010 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Eric Anholt <eric@anholt.net>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2012-04-10 12:01:50 -07:00
|
|
|
#pragma once
|
|
|
|
|
2011-05-03 10:55:50 -07:00
|
|
|
#include "brw_shader.h"
|
2015-02-06 01:11:18 +02:00
|
|
|
#include "brw_ir_fs.h"
|
2015-06-03 19:59:44 +03:00
|
|
|
#include "brw_fs_builder.h"
|
2016-01-18 12:54:03 +02:00
|
|
|
#include "compiler/nir/nir.h"
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2014-06-14 22:53:40 -07:00
|
|
|
struct bblock_t;
|
2012-06-06 10:57:54 -07:00
|
|
|
namespace {
|
2013-01-07 19:42:38 -08:00
|
|
|
struct acp_entry;
|
2012-06-06 10:57:54 -07:00
|
|
|
}
|
2012-05-10 16:10:15 -07:00
|
|
|
|
2012-06-05 11:37:22 -07:00
|
|
|
namespace brw {
|
|
|
|
class fs_live_variables;
|
|
|
|
}
|
|
|
|
|
2015-06-29 22:50:28 -07:00
|
|
|
struct brw_gs_compile;
|
|
|
|
|
2015-06-25 10:55:51 -07:00
|
|
|
static inline fs_reg
|
2015-06-18 12:07:27 -07:00
|
|
|
offset(fs_reg reg, const brw::fs_builder& bld, unsigned delta)
|
2015-06-25 10:55:51 -07:00
|
|
|
{
|
|
|
|
switch (reg.file) {
|
|
|
|
case BAD_FILE:
|
|
|
|
break;
|
2015-10-26 17:52:57 -07:00
|
|
|
case ARF:
|
|
|
|
case FIXED_GRF:
|
2015-06-25 10:55:51 -07:00
|
|
|
case MRF:
|
2015-10-26 17:52:57 -07:00
|
|
|
case VGRF:
|
2015-06-25 10:55:51 -07:00
|
|
|
case ATTR:
|
2015-07-28 16:53:53 -07:00
|
|
|
case UNIFORM:
|
2015-06-25 10:55:51 -07:00
|
|
|
return byte_offset(reg,
|
2015-07-14 15:43:44 +03:00
|
|
|
delta * reg.component_size(bld.dispatch_width()));
|
|
|
|
case IMM:
|
2015-06-25 10:55:51 -07:00
|
|
|
assert(delta == 0);
|
|
|
|
}
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
2012-11-09 01:05:47 -08:00
|
|
|
/**
|
|
|
|
* The fragment shader front-end.
|
|
|
|
*
|
|
|
|
* Translates either GLSL IR or Mesa IR (for ARB_fragment_program) into FS IR.
|
|
|
|
*/
|
2015-05-20 10:35:34 -07:00
|
|
|
class fs_visitor : public backend_shader
|
2010-10-10 15:42:37 -07:00
|
|
|
{
|
|
|
|
public:
|
2015-06-22 17:17:56 -07:00
|
|
|
fs_visitor(const struct brw_compiler *compiler, void *log_data,
|
2014-05-14 01:21:02 -07:00
|
|
|
void *mem_ctx,
|
2015-03-11 22:41:49 -07:00
|
|
|
const void *key,
|
|
|
|
struct brw_stage_prog_data *prog_data,
|
|
|
|
struct gl_program *prog,
|
2015-10-05 19:26:02 -07:00
|
|
|
const nir_shader *shader,
|
2015-06-19 15:40:09 -07:00
|
|
|
unsigned dispatch_width,
|
2015-11-10 14:35:27 -08:00
|
|
|
int shader_time_index,
|
|
|
|
const struct brw_vue_map *input_vue_map = NULL);
|
2015-06-29 22:50:28 -07:00
|
|
|
fs_visitor(const struct brw_compiler *compiler, void *log_data,
|
|
|
|
void *mem_ctx,
|
|
|
|
struct brw_gs_compile *gs_compile,
|
|
|
|
struct brw_gs_prog_data *prog_data,
|
2015-11-03 12:51:32 -08:00
|
|
|
const nir_shader *shader,
|
|
|
|
int shader_time_index);
|
2015-06-29 22:50:28 -07:00
|
|
|
void init();
|
2012-07-04 13:12:50 -07:00
|
|
|
~fs_visitor();
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2014-05-16 02:21:51 -07:00
|
|
|
fs_reg vgrf(const glsl_type *const type);
|
2011-07-25 18:13:04 -07:00
|
|
|
void import_uniforms(fs_visitor *v);
|
2015-06-19 17:29:42 -07:00
|
|
|
void setup_uniform_clipplane_values(gl_clip_plane *clip_planes);
|
|
|
|
void compute_clip_distance(gl_clip_plane *clip_planes);
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2012-03-10 13:48:42 -08:00
|
|
|
fs_inst *get_instruction_generating_reg(fs_inst *start,
|
|
|
|
fs_inst *end,
|
2014-02-19 20:31:14 -08:00
|
|
|
const fs_reg ®);
|
2011-05-24 16:45:17 -07:00
|
|
|
|
2015-06-03 22:22:39 +03:00
|
|
|
void VARYING_PULL_CONSTANT_LOAD(const brw::fs_builder &bld,
|
|
|
|
const fs_reg &dst,
|
|
|
|
const fs_reg &surf_index,
|
|
|
|
const fs_reg &varying_offset,
|
|
|
|
uint32_t const_offset);
|
2015-06-03 22:22:10 +03:00
|
|
|
void DEP_RESOLVE_MOV(const brw::fs_builder &bld, int grf);
|
2012-11-08 16:06:24 -08:00
|
|
|
|
2015-06-19 17:25:28 -07:00
|
|
|
bool run_fs(bool do_rep_send);
|
2015-06-19 17:29:42 -07:00
|
|
|
bool run_vs(gl_clip_plane *clip_planes);
|
2015-11-14 17:40:43 -08:00
|
|
|
bool run_tcs_single_patch();
|
2015-11-10 14:35:27 -08:00
|
|
|
bool run_tes();
|
2015-03-11 23:14:31 -07:00
|
|
|
bool run_gs();
|
2014-08-30 19:57:39 -07:00
|
|
|
bool run_cs();
|
2014-11-13 16:28:18 -08:00
|
|
|
void optimize();
|
2014-11-13 16:28:19 -08:00
|
|
|
void allocate_registers();
|
2016-02-10 21:20:01 -08:00
|
|
|
void setup_fs_payload_gen4();
|
|
|
|
void setup_fs_payload_gen6();
|
2014-10-27 22:42:50 -07:00
|
|
|
void setup_vs_payload();
|
2015-03-11 23:14:31 -07:00
|
|
|
void setup_gs_payload();
|
2014-08-30 19:57:39 -07:00
|
|
|
void setup_cs_payload();
|
2014-12-29 20:33:12 -08:00
|
|
|
void fixup_3src_null_dest();
|
2010-10-10 15:42:37 -07:00
|
|
|
void assign_curb_setup();
|
|
|
|
void calculate_urb_setup();
|
|
|
|
void assign_urb_setup();
|
2015-03-11 23:14:31 -07:00
|
|
|
void convert_attr_sources_to_hw_regs(fs_inst *inst);
|
2014-10-27 22:42:50 -07:00
|
|
|
void assign_vs_urb_setup();
|
2015-11-14 17:40:43 -08:00
|
|
|
void assign_tcs_single_patch_urb_setup();
|
2015-11-10 14:35:27 -08:00
|
|
|
void assign_tes_urb_setup();
|
2015-03-11 23:14:31 -07:00
|
|
|
void assign_gs_urb_setup();
|
2013-11-06 17:38:23 -08:00
|
|
|
bool assign_regs(bool allow_spilling);
|
2010-10-10 15:42:37 -07:00
|
|
|
void assign_regs_trivial();
|
2015-06-12 12:01:35 -07:00
|
|
|
void calculate_payload_ranges(int payload_node_count,
|
|
|
|
int *payload_last_use_ip);
|
2012-10-02 15:01:24 -07:00
|
|
|
void setup_payload_interference(struct ra_graph *g, int payload_reg_count,
|
|
|
|
int first_payload_node);
|
2010-10-19 09:25:51 -07:00
|
|
|
int choose_spill_reg(struct ra_graph *g);
|
|
|
|
void spill_reg(int spill_reg);
|
2010-10-13 20:17:15 -07:00
|
|
|
void split_virtual_grfs();
|
2014-09-16 13:14:09 -07:00
|
|
|
bool compact_virtual_grfs();
|
2014-03-11 14:35:27 -07:00
|
|
|
void assign_constant_locations();
|
2015-12-08 17:14:49 -08:00
|
|
|
void lower_constant_loads();
|
2014-09-01 10:54:00 -07:00
|
|
|
void invalidate_live_intervals();
|
2014-07-11 20:54:52 -07:00
|
|
|
void calculate_live_intervals();
|
2013-08-04 23:27:14 -07:00
|
|
|
void calculate_register_pressure();
|
2015-07-02 15:41:02 -07:00
|
|
|
void validate();
|
2011-07-22 16:45:15 -07:00
|
|
|
bool opt_algebraic();
|
2014-07-05 22:10:41 -07:00
|
|
|
bool opt_redundant_discard_jumps();
|
2012-05-10 16:10:15 -07:00
|
|
|
bool opt_cse();
|
2014-07-11 20:35:31 -07:00
|
|
|
bool opt_cse_local(bblock_t *block);
|
2012-05-08 13:01:52 -07:00
|
|
|
bool opt_copy_propagate();
|
2012-06-06 10:57:54 -07:00
|
|
|
bool try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry);
|
2014-09-23 17:22:09 -07:00
|
|
|
bool try_constant_propagate(fs_inst *inst, acp_entry *entry);
|
i965/fs: Add support for global copy propagation.
It is common for complicated shaders, particularly code-generated ones, to
have a big array of uniforms or attributes, and a prologue in the shader that
dereferences from the big array to more informatively-named local variables.
Then there will be some small control flow operation (like a ? : statement),
and then use of those informatively-named variables. We were emitting extra
MOVs in these cases, because copy propagation couldn't reach across control
flow.
Instead, implement dataflow analysis on the output of the first copy
propagation pass and re-run it to propagate those extra MOVs out.
On one future Steam release, reduces VS+FS instruction count from 42837 to
41437. No statistically significant performance difference (n=48), though, at
least at the low resolution I'm running it at.
shader-db results:
total instructions in shared programs: 722170 -> 702545 (-2.72%)
instructions in affected programs: 260618 -> 240993 (-7.53%)
Some shaders do get hurt by up to 2 instructions, because a choice to copy
propagate instead of coalesce or something like that results in a dead write
sticking around. Given that we already have instances of those instructions
in the affected programs (particularly unigine), we should just improve dead
code elimination to fix the problem.
2012-10-30 11:09:59 -07:00
|
|
|
bool opt_copy_propagate_local(void *mem_ctx, bblock_t *block,
|
|
|
|
exec_list *acp);
|
2016-04-20 14:22:53 -07:00
|
|
|
bool opt_drop_redundant_mov_to_flags();
|
2014-04-14 15:01:37 -07:00
|
|
|
bool opt_register_renaming();
|
2013-11-29 22:16:14 -08:00
|
|
|
bool register_coalesce();
|
2010-10-08 14:00:14 -07:00
|
|
|
bool compute_to_mrf();
|
2015-02-20 20:25:04 +02:00
|
|
|
bool eliminate_find_live_channel();
|
2010-10-10 15:42:37 -07:00
|
|
|
bool dead_code_eliminate();
|
2010-11-19 15:57:05 +08:00
|
|
|
bool remove_duplicate_mrf_writes();
|
2015-02-08 13:59:57 -08:00
|
|
|
|
|
|
|
bool opt_sampler_eot();
|
2010-10-10 15:42:37 -07:00
|
|
|
bool virtual_grf_interferes(int a, int b);
|
2013-11-06 17:38:23 -08:00
|
|
|
void schedule_instructions(instruction_scheduler_mode mode);
|
2013-02-05 15:46:22 -08:00
|
|
|
void insert_gen4_send_dependency_workarounds();
|
2014-08-24 19:07:01 -07:00
|
|
|
void insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
|
|
|
|
fs_inst *inst);
|
|
|
|
void insert_gen4_post_send_dependency_workarounds(bblock_t *block,
|
|
|
|
fs_inst *inst);
|
i965: Accurately bail on SIMD16 compiles.
Ideally, we'd like to never even attempt the SIMD16 compile if we could
know ahead of time that it won't succeed---it's purely a waste of time.
This is especially important for state-based recompiles, which happen at
draw time.
The fragment shader compiler has a number of checks like:
if (dispatch_width == 16)
fail("...some reason...");
This patch introduces a new no16() function which replaces the above
pattern. In the SIMD8 compile, it sets a "SIMD16 will never work" flag.
Then, brw_wm_fs_emit can check that flag, skip the SIMD16 compile, and
issue a helpful performance warning if INTEL_DEBUG=perf is set. (In
SIMD16 mode, no16() calls fail(), for safety's sake.)
The great part is that this is not a heuristic---if the flag is set, we
know with 100% certainty that the SIMD16 compile would fail. (It might
fail anyway if we run out of registers, but it's always worth trying.)
v2: Fix missing va_end in early-return case (caught by Ilia Mirkin).
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Chris Forbes <chrisf@ijw.co.nz> [v1]
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com> [v1]
Reviewed-by: Eric Anholt <eric@anholt.net>
2014-03-07 00:49:45 -08:00
|
|
|
void vfail(const char *msg, va_list args);
|
2011-03-13 13:43:05 -07:00
|
|
|
void fail(const char *msg, ...);
|
2015-06-22 16:30:04 -07:00
|
|
|
void no16(const char *msg);
|
2013-02-15 19:26:48 -08:00
|
|
|
void lower_uniform_pull_constant_loads();
|
2014-04-18 11:56:46 -07:00
|
|
|
bool lower_load_payload();
|
2016-05-05 11:45:19 +02:00
|
|
|
bool lower_pack();
|
2015-11-01 03:10:21 -05:00
|
|
|
bool lower_d2f();
|
2015-07-13 17:44:58 +03:00
|
|
|
bool lower_logical_sends();
|
2015-05-11 09:29:56 -07:00
|
|
|
bool lower_integer_multiplication();
|
2016-02-11 12:27:02 -08:00
|
|
|
bool lower_minmax();
|
2015-07-13 21:15:31 +03:00
|
|
|
bool lower_simd_width();
|
2014-02-12 11:00:46 -08:00
|
|
|
bool opt_combine_constants();
|
2011-01-18 17:16:49 -08:00
|
|
|
|
2010-10-10 15:42:37 -07:00
|
|
|
void emit_dummy_fs();
|
2014-09-26 14:47:03 -07:00
|
|
|
void emit_repclear_shader();
|
2014-08-05 11:02:02 -07:00
|
|
|
fs_reg *emit_fragcoord_interpolation(bool pixel_center_integer,
|
|
|
|
bool origin_upper_left);
|
2012-06-21 11:33:22 -07:00
|
|
|
fs_inst *emit_linterp(const fs_reg &attr, const fs_reg &interp,
|
2012-06-18 13:52:02 -07:00
|
|
|
glsl_interp_qualifier interpolation_mode,
|
2014-01-06 13:59:18 -08:00
|
|
|
bool is_centroid, bool is_sample);
|
2014-08-05 10:29:00 -07:00
|
|
|
fs_reg *emit_frontfacing_interpolation();
|
2014-08-05 11:10:07 -07:00
|
|
|
fs_reg *emit_samplepos_setup();
|
2014-10-17 12:59:18 -07:00
|
|
|
fs_reg *emit_sampleid_setup();
|
i965: Fix gl_SampleMaskIn[] in per-sample shading mode.
The coverage mask is not sufficient - in per-sample mode, we also need
to AND with a mask representing the samples being processed by the
current fragment shader invocation.
Fixes 18 dEQP-GLES31.functional.shaders.sample_variables tests:
sample_mask_in.bit_count_per_sample.multisample_{rbo,texture}_{1,2,4,8}
sample_mask_in.bit_count_per_two_samples.multisample_{rbo,texture}_{4,8}
sample_mask_in.bits_unique_per_sample.multisample_{rbo,texture}_{1,2,4,8}
sample_mask_in.bits_unique_per_two_samples.multisample_{rbo,texture}_{4,8}
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2016-04-05 20:14:22 -07:00
|
|
|
fs_reg *emit_samplemaskin_setup();
|
i965: Fix fragment shader struct inputs.
Apparently we have literally no support for FS varying struct inputs.
This is somewhat surprising, given that we've had tests for that very
feature that have been passing for a long time.
Normally, varying packing splits up structures for us, so we don't see
them in the backend. However, with SSO, varying packing isn't around
to save us, and we get actual structs that we have to handle.
This patch changes fs_visitor::emit_general_interpolation() to work
recursively, properly handling nested structs/arrays/and so on.
(It's easier to read with diff -b, as indentation changes.)
When using the vec4 VS backend, this fixes rendering in an upcoming
game from Feral Interactive. (The scalar VS backend requires additional
bug fixes in the next patch.)
v2: Use pointers instead of pass-by-mutable-reference (Jason, Matt).
Cc: "11.1 11.0" <mesa-stable@lists.freedesktop.org>
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason.ekstrand@intel.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-11-19 20:03:25 -08:00
|
|
|
void emit_general_interpolation(fs_reg *attr, const char *name,
|
2014-10-20 18:05:36 -07:00
|
|
|
const glsl_type *type,
|
|
|
|
glsl_interp_qualifier interpolation_mode,
|
i965: Fix fragment shader struct inputs.
Apparently we have literally no support for FS varying struct inputs.
This is somewhat surprising, given that we've had tests for that very
feature that have been passing for a long time.
Normally, varying packing splits up structures for us, so we don't see
them in the backend. However, with SSO, varying packing isn't around
to save us, and we get actual structs that we have to handle.
This patch changes fs_visitor::emit_general_interpolation() to work
recursively, properly handling nested structs/arrays/and so on.
(It's easier to read with diff -b, as indentation changes.)
When using the vec4 VS backend, this fixes rendering in an upcoming
game from Feral Interactive. (The scalar VS backend requires additional
bug fixes in the next patch.)
v2: Use pointers instead of pass-by-mutable-reference (Jason, Matt).
Cc: "11.1 11.0" <mesa-stable@lists.freedesktop.org>
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jason Ekstrand <jason.ekstrand@intel.com>
Reviewed-by: Matt Turner <mattst88@gmail.com>
2015-11-19 20:03:25 -08:00
|
|
|
int *location, bool mod_centroid,
|
2014-10-20 18:05:36 -07:00
|
|
|
bool mod_sample);
|
2015-02-04 01:40:09 -08:00
|
|
|
fs_reg *emit_vs_system_value(int location);
|
2010-10-10 15:42:37 -07:00
|
|
|
void emit_interpolation_setup_gen4();
|
|
|
|
void emit_interpolation_setup_gen6();
|
2013-10-24 15:53:05 -07:00
|
|
|
void compute_sample_position(fs_reg dst, fs_reg int_sample_pos);
|
2015-07-17 18:23:31 +03:00
|
|
|
fs_reg emit_mcs_fetch(const fs_reg &coordinate, unsigned components,
|
|
|
|
const fs_reg &sampler);
|
2014-02-03 22:15:16 +13:00
|
|
|
void emit_gen6_gather_wa(uint8_t wa, fs_reg dst);
|
2015-08-10 11:52:50 -07:00
|
|
|
fs_reg resolve_source_modifiers(const fs_reg &src);
|
2015-03-05 15:48:39 -08:00
|
|
|
void emit_discard_jump();
|
2013-10-22 17:51:28 -07:00
|
|
|
bool opt_peephole_sel();
|
2013-10-22 17:51:28 -07:00
|
|
|
bool opt_peephole_predicated_break();
|
2013-12-12 00:30:16 -08:00
|
|
|
bool opt_saturate_propagation();
|
2014-08-22 10:54:43 -07:00
|
|
|
bool opt_cmod_propagation();
|
2015-04-23 16:56:53 -07:00
|
|
|
bool opt_zero_samples();
|
2014-07-15 11:45:20 -07:00
|
|
|
void emit_unspill(bblock_t *block, fs_inst *inst, fs_reg reg,
|
|
|
|
uint32_t spill_offset, int count);
|
2014-08-18 14:27:55 -07:00
|
|
|
void emit_spill(bblock_t *block, fs_inst *inst, fs_reg reg,
|
|
|
|
uint32_t spill_offset, int count);
|
2010-10-08 14:35:34 -07:00
|
|
|
|
2014-08-15 10:32:07 -07:00
|
|
|
void emit_nir_code();
|
2015-10-01 12:23:53 -07:00
|
|
|
void nir_setup_inputs();
|
2015-11-24 21:33:01 -08:00
|
|
|
void nir_setup_single_output_varying(fs_reg *reg, const glsl_type *type,
|
|
|
|
unsigned *location);
|
2015-10-01 12:23:53 -07:00
|
|
|
void nir_setup_outputs();
|
|
|
|
void nir_setup_uniforms();
|
|
|
|
void nir_emit_system_values();
|
2014-08-15 10:32:07 -07:00
|
|
|
void nir_emit_impl(nir_function_impl *impl);
|
|
|
|
void nir_emit_cf_list(exec_list *list);
|
|
|
|
void nir_emit_if(nir_if *if_stmt);
|
|
|
|
void nir_emit_loop(nir_loop *loop);
|
|
|
|
void nir_emit_block(nir_block *block);
|
|
|
|
void nir_emit_instr(nir_instr *instr);
|
2015-06-03 20:59:26 +03:00
|
|
|
void nir_emit_alu(const brw::fs_builder &bld, nir_alu_instr *instr);
|
2015-06-25 16:22:26 -07:00
|
|
|
void nir_emit_load_const(const brw::fs_builder &bld,
|
|
|
|
nir_load_const_instr *instr);
|
2015-06-24 12:28:47 -07:00
|
|
|
void nir_emit_undef(const brw::fs_builder &bld,
|
|
|
|
nir_ssa_undef_instr *instr);
|
2015-11-04 23:05:07 -08:00
|
|
|
void nir_emit_vs_intrinsic(const brw::fs_builder &bld,
|
|
|
|
nir_intrinsic_instr *instr);
|
2015-11-14 17:40:43 -08:00
|
|
|
void nir_emit_tcs_intrinsic(const brw::fs_builder &bld,
|
|
|
|
nir_intrinsic_instr *instr);
|
2015-11-04 23:05:07 -08:00
|
|
|
void nir_emit_gs_intrinsic(const brw::fs_builder &bld,
|
|
|
|
nir_intrinsic_instr *instr);
|
|
|
|
void nir_emit_fs_intrinsic(const brw::fs_builder &bld,
|
|
|
|
nir_intrinsic_instr *instr);
|
|
|
|
void nir_emit_cs_intrinsic(const brw::fs_builder &bld,
|
|
|
|
nir_intrinsic_instr *instr);
|
2015-06-03 21:01:32 +03:00
|
|
|
void nir_emit_intrinsic(const brw::fs_builder &bld,
|
|
|
|
nir_intrinsic_instr *instr);
|
2015-11-10 14:35:27 -08:00
|
|
|
void nir_emit_tes_intrinsic(const brw::fs_builder &bld,
|
|
|
|
nir_intrinsic_instr *instr);
|
2015-06-01 09:41:47 +02:00
|
|
|
void nir_emit_ssbo_atomic(const brw::fs_builder &bld,
|
|
|
|
int op, nir_intrinsic_instr *instr);
|
2015-10-10 13:01:03 -07:00
|
|
|
void nir_emit_shared_atomic(const brw::fs_builder &bld,
|
|
|
|
int op, nir_intrinsic_instr *instr);
|
2015-06-03 21:02:57 +03:00
|
|
|
void nir_emit_texture(const brw::fs_builder &bld,
|
|
|
|
nir_tex_instr *instr);
|
2015-06-03 20:57:12 +03:00
|
|
|
void nir_emit_jump(const brw::fs_builder &bld,
|
|
|
|
nir_jump_instr *instr);
|
2014-08-15 10:32:07 -07:00
|
|
|
fs_reg get_nir_src(nir_src src);
|
|
|
|
fs_reg get_nir_dest(nir_dest dest);
|
2015-06-28 21:16:31 +03:00
|
|
|
fs_reg get_nir_image_deref(const nir_deref_var *deref);
|
2015-11-10 14:35:27 -08:00
|
|
|
fs_reg get_indirect_offset(nir_intrinsic_instr *instr);
|
2015-06-03 21:12:49 +03:00
|
|
|
void emit_percomp(const brw::fs_builder &bld, const fs_inst &inst,
|
|
|
|
unsigned wr_mask);
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2016-01-21 09:10:09 -08:00
|
|
|
bool optimize_extract_to_float(nir_alu_instr *instr,
|
|
|
|
const fs_reg &result);
|
2015-02-15 13:45:04 -08:00
|
|
|
bool optimize_frontfacing_ternary(nir_alu_instr *instr,
|
|
|
|
const fs_reg &result);
|
|
|
|
|
2013-10-27 12:32:03 +13:00
|
|
|
void emit_alpha_test();
|
2015-06-03 21:07:52 +03:00
|
|
|
fs_inst *emit_single_fb_write(const brw::fs_builder &bld,
|
|
|
|
fs_reg color1, fs_reg color2,
|
2015-07-16 16:12:48 +03:00
|
|
|
fs_reg src0_alpha, unsigned components);
|
2010-10-10 15:42:37 -07:00
|
|
|
void emit_fb_writes();
|
2015-03-11 23:14:31 -07:00
|
|
|
void emit_urb_writes(const fs_reg &gs_vertex_count = fs_reg());
|
|
|
|
void set_gs_stream_control_data_bits(const fs_reg &vertex_count,
|
|
|
|
unsigned stream_id);
|
|
|
|
void emit_gs_control_data_bits(const fs_reg &vertex_count);
|
|
|
|
void emit_gs_end_primitive(const nir_src &vertex_count_nir_src);
|
|
|
|
void emit_gs_vertex(const nir_src &vertex_count_nir_src,
|
|
|
|
unsigned stream_id);
|
|
|
|
void emit_gs_thread_end();
|
|
|
|
void emit_gs_input_load(const fs_reg &dst, const nir_src &vertex_src,
|
2015-11-25 14:14:05 -08:00
|
|
|
unsigned base_offset, const nir_src &offset_src,
|
2015-11-07 18:58:59 -08:00
|
|
|
unsigned num_components);
|
2015-04-12 02:06:57 -07:00
|
|
|
void emit_cs_terminate();
|
2014-11-21 19:14:41 -08:00
|
|
|
fs_reg *emit_cs_local_invocation_id_setup();
|
2014-10-10 08:28:24 -07:00
|
|
|
fs_reg *emit_cs_work_group_id_setup();
|
2012-11-27 14:10:52 -08:00
|
|
|
|
2014-08-27 11:32:08 -07:00
|
|
|
void emit_barrier();
|
|
|
|
|
2012-11-27 14:10:52 -08:00
|
|
|
void emit_shader_time_begin();
|
|
|
|
void emit_shader_time_end();
|
2015-06-03 20:43:09 +03:00
|
|
|
void SHADER_TIME_ADD(const brw::fs_builder &bld,
|
2015-06-19 15:40:09 -07:00
|
|
|
int shader_time_subindex,
|
2015-06-19 14:46:03 -07:00
|
|
|
fs_reg value);
|
2012-11-27 14:10:52 -08:00
|
|
|
|
2015-06-03 20:43:09 +03:00
|
|
|
fs_reg get_timestamp(const brw::fs_builder &bld);
|
2012-11-27 14:10:52 -08:00
|
|
|
|
2010-10-10 15:42:37 -07:00
|
|
|
struct brw_reg interp_reg(int location, int channel);
|
2015-06-28 21:04:17 +03:00
|
|
|
|
2010-11-19 15:57:05 +08:00
|
|
|
int implied_mrf_writes(fs_inst *inst);
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2013-08-04 23:34:01 -07:00
|
|
|
virtual void dump_instructions();
|
2014-05-29 13:08:59 -07:00
|
|
|
virtual void dump_instructions(const char *name);
|
2013-04-29 14:21:14 -07:00
|
|
|
void dump_instruction(backend_instruction *inst);
|
2014-05-29 11:45:15 -07:00
|
|
|
void dump_instruction(backend_instruction *inst, FILE *file);
|
2012-10-30 15:35:44 -07:00
|
|
|
|
2014-08-29 12:50:46 -07:00
|
|
|
const void *const key;
|
2015-03-09 01:58:51 -07:00
|
|
|
const struct brw_sampler_prog_key_data *key_tex;
|
|
|
|
|
2015-06-29 22:50:28 -07:00
|
|
|
struct brw_gs_compile *gs_compile;
|
|
|
|
|
2014-08-29 12:50:46 -07:00
|
|
|
struct brw_stage_prog_data *prog_data;
|
2015-10-01 15:21:57 -07:00
|
|
|
struct gl_program *prog;
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2015-11-10 14:35:27 -08:00
|
|
|
const struct brw_vue_map *input_vue_map;
|
|
|
|
|
2013-04-30 15:00:40 -07:00
|
|
|
int *virtual_grf_start;
|
|
|
|
int *virtual_grf_end;
|
2012-06-05 11:37:22 -07:00
|
|
|
brw::fs_live_variables *live_intervals;
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2013-08-04 23:27:14 -07:00
|
|
|
int *regs_live_at_ip;
|
|
|
|
|
2014-02-19 15:27:01 +01:00
|
|
|
/** Number of uniform variable components visited. */
|
|
|
|
unsigned uniforms;
|
|
|
|
|
2014-05-13 21:00:35 -07:00
|
|
|
/** Byte-offset for the next available spot in the scratch space buffer. */
|
|
|
|
unsigned last_scratch;
|
|
|
|
|
2014-03-07 02:10:14 -08:00
|
|
|
/**
|
|
|
|
* Array mapping UNIFORM register numbers to the pull parameter index,
|
|
|
|
* or -1 if this uniform register isn't being uploaded as a pull constant.
|
|
|
|
*/
|
|
|
|
int *pull_constant_loc;
|
|
|
|
|
2014-03-11 14:35:27 -07:00
|
|
|
/**
|
|
|
|
* Array mapping UNIFORM register numbers to the push parameter index,
|
|
|
|
* or -1 if this uniform register isn't being uploaded as a push constant.
|
2011-07-25 18:13:04 -07:00
|
|
|
*/
|
2014-03-11 14:35:27 -07:00
|
|
|
int *push_constant_loc;
|
2011-07-25 18:13:04 -07:00
|
|
|
|
2012-09-18 18:12:48 +02:00
|
|
|
fs_reg frag_depth;
|
2015-10-20 14:29:39 -07:00
|
|
|
fs_reg frag_stencil;
|
2013-10-24 16:21:13 -07:00
|
|
|
fs_reg sample_mask;
|
2014-10-27 22:42:50 -07:00
|
|
|
fs_reg outputs[VARYING_SLOT_MAX];
|
|
|
|
unsigned output_components[VARYING_SLOT_MAX];
|
2012-04-25 13:58:07 -07:00
|
|
|
fs_reg dual_src_output;
|
2014-03-25 16:46:12 -07:00
|
|
|
bool do_dual_src;
|
2010-10-10 15:42:37 -07:00
|
|
|
int first_non_payload_grf;
|
2012-10-01 16:39:54 -07:00
|
|
|
/** Either BRW_MAX_GRF or GEN7_MRF_HACK_START */
|
2015-02-10 15:51:34 +02:00
|
|
|
unsigned max_grf;
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2014-11-12 11:05:51 -08:00
|
|
|
fs_reg *nir_locals;
|
2015-06-24 12:28:47 -07:00
|
|
|
fs_reg *nir_ssa_values;
|
2014-08-15 10:32:07 -07:00
|
|
|
fs_reg nir_inputs;
|
|
|
|
fs_reg nir_outputs;
|
2014-12-17 12:34:27 -08:00
|
|
|
fs_reg *nir_system_values;
|
2014-08-15 10:32:07 -07:00
|
|
|
|
2011-03-13 13:43:05 -07:00
|
|
|
bool failed;
|
2011-05-16 15:10:26 -07:00
|
|
|
char *fail_msg;
|
i965: Accurately bail on SIMD16 compiles.
Ideally, we'd like to never even attempt the SIMD16 compile if we could
know ahead of time that it won't succeed---it's purely a waste of time.
This is especially important for state-based recompiles, which happen at
draw time.
The fragment shader compiler has a number of checks like:
if (dispatch_width == 16)
fail("...some reason...");
This patch introduces a new no16() function which replaces the above
pattern. In the SIMD8 compile, it sets a "SIMD16 will never work" flag.
Then, brw_wm_fs_emit can check that flag, skip the SIMD16 compile, and
issue a helpful performance warning if INTEL_DEBUG=perf is set. (In
SIMD16 mode, no16() calls fail(), for safety's sake.)
The great part is that this is not a heuristic---if the flag is set, we
know with 100% certainty that the SIMD16 compile would fail. (It might
fail anyway if we run out of registers, but it's always worth trying.)
v2: Fix missing va_end in early-return case (caught by Ilia Mirkin).
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Chris Forbes <chrisf@ijw.co.nz> [v1]
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com> [v1]
Reviewed-by: Eric Anholt <eric@anholt.net>
2014-03-07 00:49:45 -08:00
|
|
|
bool simd16_unsupported;
|
|
|
|
char *no16_msg;
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2014-05-13 21:52:51 -07:00
|
|
|
/** Register numbers for thread payload fields. */
|
2015-07-27 16:14:36 +03:00
|
|
|
struct thread_payload {
|
2014-05-13 21:52:51 -07:00
|
|
|
uint8_t source_depth_reg;
|
|
|
|
uint8_t source_w_reg;
|
|
|
|
uint8_t aa_dest_stencil_reg;
|
|
|
|
uint8_t dest_depth_reg;
|
|
|
|
uint8_t sample_pos_reg;
|
|
|
|
uint8_t sample_mask_in_reg;
|
|
|
|
uint8_t barycentric_coord_reg[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT];
|
2014-11-21 18:47:49 -08:00
|
|
|
uint8_t local_invocation_id_reg;
|
2014-05-13 21:52:51 -07:00
|
|
|
|
|
|
|
/** The number of thread payload registers the hardware will supply. */
|
|
|
|
uint8_t num_regs;
|
|
|
|
} payload;
|
|
|
|
|
2014-05-14 00:08:58 -07:00
|
|
|
bool source_depth_to_render_target;
|
|
|
|
bool runtime_check_aads_emit;
|
|
|
|
|
2010-10-10 15:42:37 -07:00
|
|
|
fs_reg pixel_x;
|
|
|
|
fs_reg pixel_y;
|
|
|
|
fs_reg wpos_w;
|
|
|
|
fs_reg pixel_w;
|
2015-04-06 17:44:40 -07:00
|
|
|
fs_reg delta_xy[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT];
|
2012-11-27 14:10:52 -08:00
|
|
|
fs_reg shader_start_time;
|
2014-10-27 22:42:50 -07:00
|
|
|
fs_reg userplane[MAX_CLIP_PLANES];
|
2015-03-11 23:14:31 -07:00
|
|
|
fs_reg final_gs_vertex_count;
|
|
|
|
fs_reg control_data_bits;
|
2015-11-14 17:40:43 -08:00
|
|
|
fs_reg invocation_id;
|
2010-10-10 15:42:37 -07:00
|
|
|
|
2015-02-10 15:51:34 +02:00
|
|
|
unsigned grf_used;
|
2013-10-29 12:46:18 -07:00
|
|
|
bool spilled_any_registers;
|
2011-03-11 19:19:01 -08:00
|
|
|
|
2012-11-20 13:50:52 -08:00
|
|
|
const unsigned dispatch_width; /**< 8 or 16 */
|
2016-02-21 20:55:09 -08:00
|
|
|
unsigned min_dispatch_width;
|
2015-03-16 12:18:31 -07:00
|
|
|
|
2015-06-19 15:40:09 -07:00
|
|
|
int shader_time_index;
|
|
|
|
|
2015-03-16 12:18:31 -07:00
|
|
|
unsigned promoted_constants;
|
2015-06-03 19:59:44 +03:00
|
|
|
brw::fs_builder bld;
|
2010-10-10 15:42:37 -07:00
|
|
|
};
|
|
|
|
|
2012-11-09 01:05:47 -08:00
|
|
|
/**
|
|
|
|
* The fragment shader code generator.
|
|
|
|
*
|
|
|
|
* Translates FS IR to actual i965 assembly code.
|
|
|
|
*/
|
|
|
|
class fs_generator
|
|
|
|
{
|
|
|
|
public:
|
2015-04-16 14:34:04 -07:00
|
|
|
fs_generator(const struct brw_compiler *compiler, void *log_data,
|
2014-05-14 01:21:02 -07:00
|
|
|
void *mem_ctx,
|
2014-10-20 22:53:31 -07:00
|
|
|
const void *key,
|
|
|
|
struct brw_stage_prog_data *prog_data,
|
2015-03-16 12:18:31 -07:00
|
|
|
unsigned promoted_constants,
|
2015-01-13 14:28:13 -08:00
|
|
|
bool runtime_check_aads_emit,
|
2016-01-14 20:27:51 -08:00
|
|
|
gl_shader_stage stage);
|
2012-11-09 01:05:47 -08:00
|
|
|
~fs_generator();
|
|
|
|
|
2014-10-27 19:40:47 -07:00
|
|
|
void enable_debug(const char *shader_name);
|
2014-11-13 16:28:08 -08:00
|
|
|
int generate_code(const cfg_t *cfg, int dispatch_width);
|
|
|
|
const unsigned *get_assembly(unsigned int *assembly_size);
|
2012-11-09 01:05:47 -08:00
|
|
|
|
|
|
|
private:
|
2014-06-05 15:03:08 +02:00
|
|
|
void fire_fb_write(fs_inst *inst,
|
2014-09-16 15:16:20 -07:00
|
|
|
struct brw_reg payload,
|
2014-06-05 15:03:08 +02:00
|
|
|
struct brw_reg implied_header,
|
|
|
|
GLuint nr);
|
2014-09-16 15:16:20 -07:00
|
|
|
void generate_fb_write(fs_inst *inst, struct brw_reg payload);
|
2015-09-29 14:32:02 -07:00
|
|
|
void generate_urb_read(fs_inst *inst, struct brw_reg dst, struct brw_reg payload);
|
2014-10-20 23:00:50 -07:00
|
|
|
void generate_urb_write(fs_inst *inst, struct brw_reg payload);
|
2014-08-27 11:33:25 -07:00
|
|
|
void generate_cs_terminate(fs_inst *inst, struct brw_reg payload);
|
2015-10-20 14:29:39 -07:00
|
|
|
void generate_stencil_ref_packing(fs_inst *inst, struct brw_reg dst,
|
|
|
|
struct brw_reg src);
|
2014-08-27 11:32:08 -07:00
|
|
|
void generate_barrier(fs_inst *inst, struct brw_reg src);
|
2016-03-30 20:50:41 +03:00
|
|
|
void generate_blorp_fb_write(fs_inst *inst, struct brw_reg payload);
|
2012-11-09 01:05:47 -08:00
|
|
|
void generate_linterp(fs_inst *inst, struct brw_reg dst,
|
|
|
|
struct brw_reg *src);
|
2014-08-03 21:23:31 +12:00
|
|
|
void generate_tex(fs_inst *inst, struct brw_reg dst, struct brw_reg src,
|
2015-11-02 15:24:05 -08:00
|
|
|
struct brw_reg surface_index,
|
2014-08-03 21:23:31 +12:00
|
|
|
struct brw_reg sampler_index);
|
2015-04-13 16:55:49 +02:00
|
|
|
void generate_get_buffer_size(fs_inst *inst, struct brw_reg dst,
|
|
|
|
struct brw_reg src,
|
|
|
|
struct brw_reg surf_index);
|
2014-06-07 02:21:47 -07:00
|
|
|
void generate_math_gen6(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src0,
|
|
|
|
struct brw_reg src1);
|
2012-11-09 01:05:47 -08:00
|
|
|
void generate_math_gen4(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src);
|
2013-03-30 00:15:54 -07:00
|
|
|
void generate_math_g45(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src);
|
2014-11-08 01:39:14 -08:00
|
|
|
void generate_ddx(enum opcode op, struct brw_reg dst, struct brw_reg src);
|
|
|
|
void generate_ddy(enum opcode op, struct brw_reg dst, struct brw_reg src,
|
|
|
|
bool negate_value);
|
2013-10-16 11:45:06 -07:00
|
|
|
void generate_scratch_write(fs_inst *inst, struct brw_reg src);
|
|
|
|
void generate_scratch_read(fs_inst *inst, struct brw_reg dst);
|
2013-10-16 11:51:22 -07:00
|
|
|
void generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst);
|
2012-11-07 10:42:34 -08:00
|
|
|
void generate_uniform_pull_constant_load(fs_inst *inst, struct brw_reg dst,
|
|
|
|
struct brw_reg index,
|
|
|
|
struct brw_reg offset);
|
2012-12-05 00:06:30 -08:00
|
|
|
void generate_uniform_pull_constant_load_gen7(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg surf_index,
|
|
|
|
struct brw_reg offset);
|
2012-11-07 11:18:34 -08:00
|
|
|
void generate_varying_pull_constant_load(fs_inst *inst, struct brw_reg dst,
|
2013-03-18 10:16:42 -07:00
|
|
|
struct brw_reg index,
|
|
|
|
struct brw_reg offset);
|
2012-11-07 11:18:34 -08:00
|
|
|
void generate_varying_pull_constant_load_gen7(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg index,
|
|
|
|
struct brw_reg offset);
|
2012-12-06 10:36:11 -08:00
|
|
|
void generate_mov_dispatch_to_flags(fs_inst *inst);
|
2013-10-24 16:17:08 -07:00
|
|
|
|
2013-11-18 21:13:13 +13:00
|
|
|
void generate_pixel_interpolator_query(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src,
|
|
|
|
struct brw_reg msg_data,
|
|
|
|
unsigned msg_type);
|
|
|
|
|
2013-10-24 16:17:08 -07:00
|
|
|
void generate_set_sample_id(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src0,
|
|
|
|
struct brw_reg src1);
|
|
|
|
|
2013-03-06 14:47:22 -08:00
|
|
|
void generate_set_simd4x2_offset(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg offset);
|
2012-12-06 10:15:08 -08:00
|
|
|
void generate_discard_jump(fs_inst *inst);
|
|
|
|
|
2013-01-09 11:46:42 -08:00
|
|
|
void generate_pack_half_2x16_split(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg x,
|
|
|
|
struct brw_reg y);
|
|
|
|
void generate_unpack_half_2x16_split(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg src);
|
|
|
|
|
2013-03-19 15:28:11 -07:00
|
|
|
void generate_shader_time_add(fs_inst *inst,
|
|
|
|
struct brw_reg payload,
|
|
|
|
struct brw_reg offset,
|
|
|
|
struct brw_reg value);
|
|
|
|
|
2015-11-07 18:58:34 -08:00
|
|
|
void generate_mov_indirect(fs_inst *inst,
|
|
|
|
struct brw_reg dst,
|
|
|
|
struct brw_reg reg,
|
|
|
|
struct brw_reg indirect_byte_offset);
|
|
|
|
|
2014-05-16 13:06:45 -07:00
|
|
|
bool patch_discard_jumps_to_fb_writes();
|
2012-11-09 01:05:47 -08:00
|
|
|
|
2015-04-16 14:13:52 -07:00
|
|
|
const struct brw_compiler *compiler;
|
2015-04-16 14:34:04 -07:00
|
|
|
void *log_data; /* Passed to compiler->*_log functions */
|
|
|
|
|
2015-04-14 17:45:40 -07:00
|
|
|
const struct brw_device_info *devinfo;
|
2012-11-09 01:05:47 -08:00
|
|
|
|
2015-04-16 11:06:57 -07:00
|
|
|
struct brw_codegen *p;
|
2014-08-29 12:50:46 -07:00
|
|
|
const void * const key;
|
2014-08-29 12:50:46 -07:00
|
|
|
struct brw_stage_prog_data * const prog_data;
|
2012-11-09 01:05:47 -08:00
|
|
|
|
|
|
|
unsigned dispatch_width; /**< 8 or 16 */
|
|
|
|
|
2012-12-06 10:15:08 -08:00
|
|
|
exec_list discard_halt_patches;
|
2015-03-16 12:18:31 -07:00
|
|
|
unsigned promoted_constants;
|
2014-06-05 15:03:06 +02:00
|
|
|
bool runtime_check_aads_emit;
|
2014-10-27 19:40:47 -07:00
|
|
|
bool debug_flag;
|
|
|
|
const char *shader_name;
|
2016-01-14 20:27:51 -08:00
|
|
|
gl_shader_stage stage;
|
2012-11-09 01:05:47 -08:00
|
|
|
void *mem_ctx;
|
|
|
|
};
|
|
|
|
|
2011-10-07 12:26:50 -07:00
|
|
|
bool brw_do_channel_expressions(struct exec_list *instructions);
|
|
|
|
bool brw_do_vector_splitting(struct exec_list *instructions);
|