nir/load_store_vectorize: add data as callback args

Signed-off-by: Rhys Perry <pendingchaos02@gmail.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4202>
This commit is contained in:
Rhys Perry
2020-03-13 15:43:16 +00:00
committed by Marge Bot
parent 00c8bec47b
commit f199b7188b
7 changed files with 17 additions and 8 deletions

View File

@@ -3032,7 +3032,8 @@ static bool
mem_vectorize_callback(unsigned align_mul, unsigned align_offset,
unsigned bit_size,
unsigned num_components,
nir_intrinsic_instr *low, nir_intrinsic_instr *high)
nir_intrinsic_instr *low, nir_intrinsic_instr *high,
void *data)
{
if (num_components > 4)
return false;

View File

@@ -5010,12 +5010,14 @@ typedef bool (*nir_should_vectorize_mem_func)(unsigned align_mul,
unsigned align_offset,
unsigned bit_size,
unsigned num_components,
nir_intrinsic_instr *low, nir_intrinsic_instr *high);
nir_intrinsic_instr *low, nir_intrinsic_instr *high,
void *data);
typedef struct {
nir_should_vectorize_mem_func callback;
nir_variable_mode modes;
nir_variable_mode robust_modes;
void *cb_data;
} nir_load_store_vectorize_options;
bool nir_opt_load_store_vectorize(nir_shader *shader, const nir_load_store_vectorize_options *options);

View File

@@ -649,7 +649,8 @@ new_bitsize_acceptable(struct vectorize_ctx *ctx, unsigned new_bit_size,
if (!ctx->options->callback(low->align_mul,
low->align_offset,
new_bit_size, new_num_components,
low->intrin, high->intrin))
low->intrin, high->intrin,
ctx->options->cb_data))
return false;
if (low->is_store) {

View File

@@ -73,7 +73,8 @@ protected:
static bool mem_vectorize_callback(unsigned align_mul, unsigned align_offset,
unsigned bit_size,
unsigned num_components,
nir_intrinsic_instr *low, nir_intrinsic_instr *high);
nir_intrinsic_instr *low, nir_intrinsic_instr *high,
void *data);
static void shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align);
std::string swizzle(nir_alu_instr *instr, int src);
@@ -359,7 +360,8 @@ bool nir_load_store_vectorize_test::test_alu_def(
bool nir_load_store_vectorize_test::mem_vectorize_callback(
unsigned align_mul, unsigned align_offset, unsigned bit_size,
unsigned num_components,
nir_intrinsic_instr *low, nir_intrinsic_instr *high)
nir_intrinsic_instr *low, nir_intrinsic_instr *high,
void *data)
{
/* Calculate a simple alignment, like how nir_intrinsic_align() does. */
uint32_t align = align_mul;

View File

@@ -150,7 +150,8 @@ ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
unsigned bit_size,
unsigned num_components,
nir_intrinsic_instr *low,
nir_intrinsic_instr *high)
nir_intrinsic_instr *high,
void *data)
{
assert(bit_size >= 8);
if (bit_size != 32)

View File

@@ -2204,7 +2204,8 @@ ntt_should_vectorize_instr(const nir_instr *instr, void *data)
static bool
ntt_should_vectorize_io(unsigned align, unsigned bit_size,
unsigned num_components, unsigned high_offset,
nir_intrinsic_instr *low, nir_intrinsic_instr *high)
nir_intrinsic_instr *low, nir_intrinsic_instr *high,
void *data)
{
if (bit_size != 32)
return false;

View File

@@ -969,7 +969,8 @@ brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
unsigned bit_size,
unsigned num_components,
nir_intrinsic_instr *low,
nir_intrinsic_instr *high)
nir_intrinsic_instr *high,
void *data)
{
/* Don't combine things to generate 64-bit loads/stores. We have to split
* those back into 32-bit ones anyway and UBO loads aren't split in NIR so