nir: Make image lowering optionally handle the !bindless case as well.

iris was doing this internally, but let's rename the function and move the
iris code there.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3728>
This commit is contained in:
Eric Anholt
2020-01-23 10:49:02 -08:00
committed by Marge Bot
parent cad2d6583c
commit 7342b859af
4 changed files with 33 additions and 14 deletions

View File

@@ -35,7 +35,7 @@ bool gl_nir_lower_atomics(nir_shader *shader,
const struct gl_shader_program *shader_program,
bool use_binding_as_idx);
bool gl_nir_lower_bindless_images(nir_shader *shader);
bool gl_nir_lower_images(nir_shader *shader, bool bindless_only);
bool gl_nir_lower_samplers(nir_shader *shader,
const struct gl_shader_program *shader_program);
bool gl_nir_lower_samplers_as_deref(nir_shader *shader,

View File

@@ -24,10 +24,10 @@
/**
* \file
*
* Lower bindless image operations by turning the image_deref_* into a
* bindless_image_* intrinsic and adding a load_deref on the previous deref
* source. All applicable indicies are also set so that fetching the variable
* in the backend wouldn't be needed anymore.
* Lower image operations by turning the image_deref_* into a image_* on an
* index number or bindless_image_* intrinsic on a load_deref of the previous
* deref source. All applicable indicies are also set so that fetching the
* variable in the backend wouldn't be needed anymore.
*/
#include "compiler/nir/nir.h"
@@ -36,8 +36,16 @@
#include "compiler/glsl/gl_nir.h"
static void
type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
{
*size = 1;
*align = 1;
}
static bool
lower_impl(nir_builder *b, nir_instr *instr) {
lower_impl(nir_builder *b, nir_instr *instr, bool bindless_only)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
@@ -70,17 +78,28 @@ lower_impl(nir_builder *b, nir_instr *instr) {
return false;
}
if (bindless_only) {
if (deref->mode == nir_var_uniform && !var->data.bindless)
return false;
}
b->cursor = nir_before_instr(instr);
nir_ssa_def *handle = nir_load_deref(b, deref);
nir_rewrite_image_intrinsic(intrinsic, handle, true);
nir_ssa_def *src;
if (var->data.bindless) {
src = nir_load_deref(b, deref);
} else {
src = nir_iadd_imm(b,
nir_build_deref_offset(b, deref, type_size_align_1),
var->data.driver_location);
}
nir_rewrite_image_intrinsic(intrinsic, src, var->data.bindless);
return true;
}
bool
gl_nir_lower_bindless_images(nir_shader *shader)
gl_nir_lower_images(nir_shader *shader, bool bindless_only)
{
bool progress = false;
@@ -91,7 +110,7 @@ gl_nir_lower_bindless_images(nir_shader *shader)
nir_foreach_block(block, function->impl)
nir_foreach_instr(instr, block)
progress |= lower_impl(&b, instr);
progress |= lower_impl(&b, instr, bindless_only);
}
}

View File

@@ -401,7 +401,7 @@ st_nir_preprocess(struct st_context *st, struct gl_program *prog,
}
/* before buffers and vars_to_ssa */
NIR_PASS_V(nir, gl_nir_lower_bindless_images);
NIR_PASS_V(nir, gl_nir_lower_images, true);
/* TODO: Change GLSL to not lower shared memory. */
if (prog->nir->info.stage == MESA_SHADER_COMPUTE &&

View File

@@ -62,7 +62,7 @@ compile_shader(char **argv)
NIR_PASS_V(nir[i], nir_lower_alu_to_scalar, NULL, NULL);
/* before buffers and vars_to_ssa */
NIR_PASS_V(nir[i], gl_nir_lower_bindless_images);
NIR_PASS_V(nir[i], gl_nir_lower_images, true);
NIR_PASS_V(nir[i], gl_nir_lower_buffers, prog);
NIR_PASS_V(nir[i], nir_opt_constant_folding);