v3d: Use the new lower_to_scratch implementation for indirects on temps.
We can use the same register spilling infrastructure for our loads/stores of indirect access of temp variables, instead of doing an if ladder. Cuts 50% of instructions and max-temps from 2 KSP shaders in shader-db. Also causes several other KSP shaders with large bodies and large loop counts to not be force-unrolled. The change was originally motivated by NOLTIS slightly modifying register pressure in piglit temp mat4 array read/write tests, triggering register allocation failures.
This commit is contained in:
@@ -41,6 +41,7 @@ BROADCOM_FILES = \
|
||||
compiler/v3d_compiler.h \
|
||||
compiler/v3d_nir_lower_image_load_store.c \
|
||||
compiler/v3d_nir_lower_io.c \
|
||||
compiler/v3d_nir_lower_scratch.c \
|
||||
compiler/v3d_nir_lower_txf_ms.c \
|
||||
qpu/qpu_disasm.c \
|
||||
qpu/qpu_disasm.h \
|
||||
|
@@ -37,6 +37,7 @@ libbroadcom_compiler_files = files(
|
||||
'v3d_compiler.h',
|
||||
'v3d_nir_lower_io.c',
|
||||
'v3d_nir_lower_image_load_store.c',
|
||||
'v3d_nir_lower_scratch.c',
|
||||
'v3d_nir_lower_txf_ms.c',
|
||||
)
|
||||
|
||||
|
@@ -132,9 +132,11 @@ v3d_general_tmu_op(nir_intrinsic_instr *instr)
|
||||
case nir_intrinsic_load_ubo:
|
||||
case nir_intrinsic_load_uniform:
|
||||
case nir_intrinsic_load_shared:
|
||||
case nir_intrinsic_load_scratch:
|
||||
return GENERAL_TMU_READ_OP_READ;
|
||||
case nir_intrinsic_store_ssbo:
|
||||
case nir_intrinsic_store_shared:
|
||||
case nir_intrinsic_store_scratch:
|
||||
return GENERAL_TMU_WRITE_OP_WRITE;
|
||||
case nir_intrinsic_ssbo_atomic_add:
|
||||
case nir_intrinsic_shared_atomic_add:
|
||||
@@ -177,7 +179,7 @@ v3d_general_tmu_op(nir_intrinsic_instr *instr)
|
||||
*/
|
||||
static void
|
||||
ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
|
||||
bool is_shared)
|
||||
bool is_shared_or_scratch)
|
||||
{
|
||||
/* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
|
||||
* wants to have support for inc/dec?
|
||||
@@ -185,8 +187,9 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
|
||||
|
||||
uint32_t tmu_op = v3d_general_tmu_op(instr);
|
||||
bool is_store = (instr->intrinsic == nir_intrinsic_store_ssbo ||
|
||||
instr->intrinsic == nir_intrinsic_store_scratch ||
|
||||
instr->intrinsic == nir_intrinsic_store_shared);
|
||||
bool has_index = !is_shared;
|
||||
bool has_index = !is_shared_or_scratch;
|
||||
|
||||
int offset_src;
|
||||
int tmu_writes = 1; /* address */
|
||||
@@ -194,6 +197,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
|
||||
offset_src = 0;
|
||||
} else if (instr->intrinsic == nir_intrinsic_load_ssbo ||
|
||||
instr->intrinsic == nir_intrinsic_load_ubo ||
|
||||
instr->intrinsic == nir_intrinsic_load_scratch ||
|
||||
instr->intrinsic == nir_intrinsic_load_shared) {
|
||||
offset_src = 0 + has_index;
|
||||
} else if (is_store) {
|
||||
@@ -244,13 +248,18 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
|
||||
offset = vir_uniform(c, QUNIFORM_UBO_ADDR,
|
||||
v3d_unit_data_create(index, const_offset));
|
||||
const_offset = 0;
|
||||
} else if (is_shared) {
|
||||
const_offset += nir_intrinsic_base(instr);
|
||||
|
||||
/* Shared variables have no buffer index, and all start from a
|
||||
* common base that we set up at the start of dispatch
|
||||
} else if (is_shared_or_scratch) {
|
||||
/* Shared and scratch variables have no buffer index, and all
|
||||
* start from a common base that we set up at the start of
|
||||
* dispatch.
|
||||
*/
|
||||
if (instr->intrinsic == nir_intrinsic_load_scratch ||
|
||||
instr->intrinsic == nir_intrinsic_store_scratch) {
|
||||
offset = c->spill_base;
|
||||
} else {
|
||||
offset = c->cs_shared_offset;
|
||||
const_offset += nir_intrinsic_base(instr);
|
||||
}
|
||||
} else {
|
||||
offset = vir_uniform(c, QUNIFORM_SSBO_OFFSET,
|
||||
nir_src_as_uint(instr->src[is_store ?
|
||||
@@ -1629,6 +1638,8 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
|
||||
case nir_intrinsic_shared_atomic_comp_swap:
|
||||
case nir_intrinsic_load_shared:
|
||||
case nir_intrinsic_store_shared:
|
||||
case nir_intrinsic_load_scratch:
|
||||
case nir_intrinsic_store_scratch:
|
||||
ntq_emit_tmu_general(c, instr, true);
|
||||
break;
|
||||
|
||||
@@ -2308,6 +2319,11 @@ nir_to_vir(struct v3d_compile *c)
|
||||
break;
|
||||
}
|
||||
|
||||
if (c->s->scratch_size) {
|
||||
v3d_setup_spill_base(c);
|
||||
c->spill_size += V3D_CHANNELS * c->s->scratch_size;
|
||||
}
|
||||
|
||||
if (c->s->info.stage == MESA_SHADER_FRAGMENT)
|
||||
ntq_setup_fs_inputs(c);
|
||||
else
|
||||
@@ -2524,7 +2540,7 @@ v3d_nir_to_vir(struct v3d_compile *c)
|
||||
vir_remove_thrsw(c);
|
||||
}
|
||||
|
||||
if (c->spill_size &&
|
||||
if (c->spills &&
|
||||
(V3D_DEBUG & (V3D_DEBUG_VIR |
|
||||
v3d_debug_flag_for_shader_stage(c->s->info.stage)))) {
|
||||
fprintf(stderr, "%s prog %d/%d spilled VIR:\n",
|
||||
|
@@ -749,6 +749,7 @@ struct qreg vir_uniform(struct v3d_compile *c,
|
||||
enum quniform_contents contents,
|
||||
uint32_t data);
|
||||
void vir_schedule_instructions(struct v3d_compile *c);
|
||||
void v3d_setup_spill_base(struct v3d_compile *c);
|
||||
struct v3d_qpu_instr v3d_qpu_nop(void);
|
||||
|
||||
struct qreg vir_emit_def(struct v3d_compile *c, struct qinst *inst);
|
||||
@@ -796,6 +797,7 @@ bool vir_opt_small_immediates(struct v3d_compile *c);
|
||||
bool vir_opt_vpm(struct v3d_compile *c);
|
||||
void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
|
||||
void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
|
||||
void v3d_nir_lower_scratch(nir_shader *s);
|
||||
void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
|
||||
void v3d_nir_lower_image_load_store(nir_shader *s);
|
||||
void vir_lower_uniforms(struct v3d_compile *c);
|
||||
|
153
src/broadcom/compiler/v3d_nir_lower_scratch.c
Normal file
153
src/broadcom/compiler/v3d_nir_lower_scratch.c
Normal file
@@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Copyright © 2018 Intel Corporation
|
||||
* Copyright © 2018 Broadcom
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "v3d_compiler.h"
|
||||
#include "compiler/nir/nir_builder.h"
|
||||
#include "compiler/nir/nir_format_convert.h"
|
||||
|
||||
/** @file v3d_nir_lower_scratch.c
|
||||
*
|
||||
* Swizzles around the addresses of
|
||||
* nir_intrinsic_load_scratch/nir_intrinsic_store_scratch so that a QPU stores
|
||||
* a cacheline at a time per dword of scratch access, scalarizing and removing
|
||||
* writemasks in the process.
|
||||
*/
|
||||
|
||||
static nir_ssa_def *
|
||||
v3d_nir_scratch_offset(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
{
|
||||
bool is_store = instr->intrinsic == nir_intrinsic_store_scratch;
|
||||
nir_ssa_def *offset = nir_ssa_for_src(b, instr->src[is_store ? 1 : 0], 1);
|
||||
|
||||
assert(nir_intrinsic_align_mul(instr) >= 4);
|
||||
assert(nir_intrinsic_align_offset(instr) == 0);
|
||||
|
||||
/* The spill_offset register will already have the subgroup ID (EIDX)
|
||||
* shifted and ORed in at bit 2, so all we need to do is to move the
|
||||
* dword index up above V3D_CHANNELS.
|
||||
*/
|
||||
return nir_imul_imm(b, offset, V3D_CHANNELS);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
{
|
||||
b->cursor = nir_before_instr(&instr->instr);
|
||||
|
||||
nir_ssa_def *offset = v3d_nir_scratch_offset(b,instr);
|
||||
|
||||
nir_ssa_def *chans[NIR_MAX_VEC_COMPONENTS];
|
||||
for (int i = 0; i < instr->num_components; i++) {
|
||||
nir_ssa_def *chan_offset =
|
||||
nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
|
||||
|
||||
nir_intrinsic_instr *chan_instr =
|
||||
nir_intrinsic_instr_create(b->shader, instr->intrinsic);
|
||||
chan_instr->num_components = 1;
|
||||
nir_ssa_dest_init(&chan_instr->instr, &chan_instr->dest, 1,
|
||||
instr->dest.ssa.bit_size, NULL);
|
||||
|
||||
chan_instr->src[0] = nir_src_for_ssa(chan_offset);
|
||||
|
||||
nir_intrinsic_set_align(chan_instr, 4, 0);
|
||||
|
||||
nir_builder_instr_insert(b, &chan_instr->instr);
|
||||
|
||||
chans[i] = &chan_instr->dest.ssa;
|
||||
}
|
||||
|
||||
nir_ssa_def *result = nir_vec(b, chans, instr->num_components);
|
||||
nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(result));
|
||||
nir_instr_remove(&instr->instr);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_nir_lower_store_scratch(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
{
|
||||
b->cursor = nir_before_instr(&instr->instr);
|
||||
|
||||
nir_ssa_def *offset = v3d_nir_scratch_offset(b, instr);
|
||||
nir_ssa_def *value = nir_ssa_for_src(b, instr->src[0],
|
||||
instr->num_components);
|
||||
|
||||
for (int i = 0; i < instr->num_components; i++) {
|
||||
if (!(nir_intrinsic_write_mask(instr) & (1 << i)))
|
||||
continue;
|
||||
|
||||
nir_ssa_def *chan_offset =
|
||||
nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
|
||||
|
||||
nir_intrinsic_instr *chan_instr =
|
||||
nir_intrinsic_instr_create(b->shader, instr->intrinsic);
|
||||
chan_instr->num_components = 1;
|
||||
|
||||
chan_instr->src[0] = nir_src_for_ssa(nir_channel(b,
|
||||
value,
|
||||
i));
|
||||
chan_instr->src[1] = nir_src_for_ssa(chan_offset);
|
||||
nir_intrinsic_set_write_mask(chan_instr, 0x1);
|
||||
nir_intrinsic_set_align(chan_instr, 4, 0);
|
||||
|
||||
nir_builder_instr_insert(b, &chan_instr->instr);
|
||||
}
|
||||
|
||||
nir_instr_remove(&instr->instr);
|
||||
}
|
||||
|
||||
void
|
||||
v3d_nir_lower_scratch(nir_shader *s)
|
||||
{
|
||||
nir_foreach_function(function, s) {
|
||||
if (!function->impl)
|
||||
continue;
|
||||
|
||||
nir_builder b;
|
||||
nir_builder_init(&b, function->impl);
|
||||
|
||||
nir_foreach_block(block, function->impl) {
|
||||
nir_foreach_instr_safe(instr, block) {
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
continue;
|
||||
|
||||
nir_intrinsic_instr *intr =
|
||||
nir_instr_as_intrinsic(instr);
|
||||
|
||||
switch (intr->intrinsic) {
|
||||
case nir_intrinsic_load_scratch:
|
||||
v3d_nir_lower_load_scratch(&b, intr);
|
||||
break;
|
||||
case nir_intrinsic_store_scratch:
|
||||
v3d_nir_lower_store_scratch(&b, intr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nir_metadata_preserve(function->impl,
|
||||
nir_metadata_block_index |
|
||||
nir_metadata_dominance);
|
||||
}
|
||||
}
|
@@ -579,6 +579,12 @@ v3d_lower_nir(struct v3d_compile *c)
|
||||
|
||||
NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
|
||||
NIR_PASS_V(c->s, nir_lower_system_values);
|
||||
|
||||
NIR_PASS_V(c->s, nir_lower_vars_to_scratch,
|
||||
nir_var_function_temp,
|
||||
0,
|
||||
glsl_get_natural_size_align_bytes);
|
||||
NIR_PASS_V(c->s, v3d_nir_lower_scratch);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -156,7 +156,7 @@ v3d_choose_spill_node(struct v3d_compile *c, struct ra_graph *g,
|
||||
/* The spill offset for this thread takes a bit of setup, so do it once at
|
||||
* program start.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
v3d_setup_spill_base(struct v3d_compile *c)
|
||||
{
|
||||
c->cursor = vir_before_block(vir_entry_block(c));
|
||||
@@ -185,6 +185,8 @@ v3d_setup_spill_base(struct v3d_compile *c)
|
||||
/* Make sure that we don't spill the spilling setup instructions. */
|
||||
for (int i = start_num_temps; i < c->num_temps; i++)
|
||||
BITSET_CLEAR(c->spillable, i);
|
||||
|
||||
c->cursor = vir_after_block(c->cur_block);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -310,8 +310,9 @@ v3d_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
|
||||
return 0;
|
||||
case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
|
||||
case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
|
||||
case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
|
||||
return 0;
|
||||
case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
|
||||
return 1;
|
||||
case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
|
||||
return 1;
|
||||
case PIPE_SHADER_CAP_SUBROUTINES:
|
||||
|
Reference in New Issue
Block a user