nir: prepare for bumping up max components to 16
OpenCL knows vector of size 8 and 16. v2: rebased on master (nir_swizzle rework) rework more declarations with nir_component_mask_t adjust print_var_decl Reviewed-by: Jason Ekstrand <jason@jlekstrand.net> Signed-off-by: Karol Herbst <kherbst@redhat.com>
This commit is contained in:
@@ -251,7 +251,7 @@ nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
|
|||||||
nir_src_copy(&dest->src, &src->src, &instr->instr);
|
nir_src_copy(&dest->src, &src->src, &instr->instr);
|
||||||
dest->abs = src->abs;
|
dest->abs = src->abs;
|
||||||
dest->negate = src->negate;
|
dest->negate = src->negate;
|
||||||
for (unsigned i = 0; i < 4; i++)
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
|
||||||
dest->swizzle[i] = src->swizzle[i];
|
dest->swizzle[i] = src->swizzle[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -421,10 +421,8 @@ alu_src_init(nir_alu_src *src)
|
|||||||
{
|
{
|
||||||
src_init(&src->src);
|
src_init(&src->src);
|
||||||
src->abs = src->negate = false;
|
src->abs = src->negate = false;
|
||||||
src->swizzle[0] = 0;
|
for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
|
||||||
src->swizzle[1] = 1;
|
src->swizzle[i] = i;
|
||||||
src->swizzle[2] = 2;
|
|
||||||
src->swizzle[3] = 3;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nir_alu_instr *
|
nir_alu_instr *
|
||||||
@@ -1426,10 +1424,10 @@ nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
|
|||||||
nir_if_rewrite_condition(use_src->parent_if, new_src);
|
nir_if_rewrite_condition(use_src->parent_if, new_src);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t
|
nir_component_mask_t
|
||||||
nir_ssa_def_components_read(const nir_ssa_def *def)
|
nir_ssa_def_components_read(const nir_ssa_def *def)
|
||||||
{
|
{
|
||||||
uint8_t read_mask = 0;
|
nir_component_mask_t read_mask = 0;
|
||||||
nir_foreach_use(use, def) {
|
nir_foreach_use(use, def) {
|
||||||
if (use->parent_instr->type == nir_instr_type_alu) {
|
if (use->parent_instr->type == nir_instr_type_alu) {
|
||||||
nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
|
nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
|
||||||
@@ -1437,7 +1435,7 @@ nir_ssa_def_components_read(const nir_ssa_def *def)
|
|||||||
int src_idx = alu_src - &alu->src[0];
|
int src_idx = alu_src - &alu->src[0];
|
||||||
assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
|
assert(src_idx >= 0 && src_idx < nir_op_infos[alu->op].num_inputs);
|
||||||
|
|
||||||
for (unsigned c = 0; c < 4; c++) {
|
for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) {
|
||||||
if (!nir_alu_instr_channel_used(alu, src_idx, c))
|
if (!nir_alu_instr_channel_used(alu, src_idx, c))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@@ -57,6 +57,8 @@ extern "C" {
|
|||||||
|
|
||||||
#define NIR_FALSE 0u
|
#define NIR_FALSE 0u
|
||||||
#define NIR_TRUE (~0u)
|
#define NIR_TRUE (~0u)
|
||||||
|
#define NIR_MAX_VEC_COMPONENTS 4
|
||||||
|
typedef uint8_t nir_component_mask_t;
|
||||||
|
|
||||||
/** Defines a cast function
|
/** Defines a cast function
|
||||||
*
|
*
|
||||||
@@ -115,16 +117,16 @@ typedef enum {
|
|||||||
} nir_rounding_mode;
|
} nir_rounding_mode;
|
||||||
|
|
||||||
typedef union {
|
typedef union {
|
||||||
float f32[4];
|
float f32[NIR_MAX_VEC_COMPONENTS];
|
||||||
double f64[4];
|
double f64[NIR_MAX_VEC_COMPONENTS];
|
||||||
int8_t i8[4];
|
int8_t i8[NIR_MAX_VEC_COMPONENTS];
|
||||||
uint8_t u8[4];
|
uint8_t u8[NIR_MAX_VEC_COMPONENTS];
|
||||||
int16_t i16[4];
|
int16_t i16[NIR_MAX_VEC_COMPONENTS];
|
||||||
uint16_t u16[4];
|
uint16_t u16[NIR_MAX_VEC_COMPONENTS];
|
||||||
int32_t i32[4];
|
int32_t i32[NIR_MAX_VEC_COMPONENTS];
|
||||||
uint32_t u32[4];
|
uint32_t u32[NIR_MAX_VEC_COMPONENTS];
|
||||||
int64_t i64[4];
|
int64_t i64[NIR_MAX_VEC_COMPONENTS];
|
||||||
uint64_t u64[4];
|
uint64_t u64[NIR_MAX_VEC_COMPONENTS];
|
||||||
} nir_const_value;
|
} nir_const_value;
|
||||||
|
|
||||||
typedef struct nir_constant {
|
typedef struct nir_constant {
|
||||||
@@ -135,7 +137,7 @@ typedef struct nir_constant {
|
|||||||
* by the type associated with the \c nir_variable. Constants may be
|
* by the type associated with the \c nir_variable. Constants may be
|
||||||
* scalars, vectors, or matrices.
|
* scalars, vectors, or matrices.
|
||||||
*/
|
*/
|
||||||
nir_const_value values[4];
|
nir_const_value values[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
|
||||||
/* we could get this from the var->type but makes clone *much* easier to
|
/* we could get this from the var->type but makes clone *much* easier to
|
||||||
* not have to care about the type.
|
* not have to care about the type.
|
||||||
@@ -697,7 +699,7 @@ typedef struct {
|
|||||||
* a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
|
* a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
|
||||||
* a swizzle of {2, x, 1, 0} where x means "don't care."
|
* a swizzle of {2, x, 1, 0} where x means "don't care."
|
||||||
*/
|
*/
|
||||||
uint8_t swizzle[4];
|
uint8_t swizzle[NIR_MAX_VEC_COMPONENTS];
|
||||||
} nir_alu_src;
|
} nir_alu_src;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@@ -712,7 +714,7 @@ typedef struct {
|
|||||||
|
|
||||||
bool saturate;
|
bool saturate;
|
||||||
|
|
||||||
unsigned write_mask : 4; /* ignored if dest.is_ssa is true */
|
unsigned write_mask : NIR_MAX_VEC_COMPONENTS; /* ignored if dest.is_ssa is true */
|
||||||
} nir_alu_dest;
|
} nir_alu_dest;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
@@ -841,14 +843,14 @@ typedef struct {
|
|||||||
/**
|
/**
|
||||||
* The number of components in each input
|
* The number of components in each input
|
||||||
*/
|
*/
|
||||||
unsigned input_sizes[4];
|
unsigned input_sizes[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The type of vector that each input takes. Note that negate and
|
* The type of vector that each input takes. Note that negate and
|
||||||
* absolute value are only allowed on inputs with int or float type and
|
* absolute value are only allowed on inputs with int or float type and
|
||||||
* behave differently on the two.
|
* behave differently on the two.
|
||||||
*/
|
*/
|
||||||
nir_alu_type input_types[4];
|
nir_alu_type input_types[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
|
||||||
nir_op_algebraic_property algebraic_properties;
|
nir_op_algebraic_property algebraic_properties;
|
||||||
} nir_op_info;
|
} nir_op_info;
|
||||||
@@ -2420,7 +2422,7 @@ void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src);
|
|||||||
void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
|
void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
|
||||||
nir_instr *after_me);
|
nir_instr *after_me);
|
||||||
|
|
||||||
uint8_t nir_ssa_def_components_read(const nir_ssa_def *def);
|
nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* finds the next basic block in source-code order, returns NULL if there is
|
* finds the next basic block in source-code order, returns NULL if there is
|
||||||
|
@@ -361,7 +361,8 @@ nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
|
|||||||
* scalar value was passed into a multiply with a vector).
|
* scalar value was passed into a multiply with a vector).
|
||||||
*/
|
*/
|
||||||
for (unsigned i = 0; i < op_info->num_inputs; i++) {
|
for (unsigned i = 0; i < op_info->num_inputs; i++) {
|
||||||
for (unsigned j = instr->src[i].src.ssa->num_components; j < 4; j++) {
|
for (unsigned j = instr->src[i].src.ssa->num_components;
|
||||||
|
j < NIR_MAX_VEC_COMPONENTS; j++) {
|
||||||
instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
|
instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -433,10 +434,10 @@ static inline nir_ssa_def *
|
|||||||
nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
|
nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned *swiz,
|
||||||
unsigned num_components, bool use_fmov)
|
unsigned num_components, bool use_fmov)
|
||||||
{
|
{
|
||||||
assert(num_components <= 4);
|
assert(num_components <= NIR_MAX_VEC_COMPONENTS);
|
||||||
nir_alu_src alu_src = { NIR_SRC_INIT };
|
nir_alu_src alu_src = { NIR_SRC_INIT };
|
||||||
alu_src.src = nir_src_for_ssa(src);
|
alu_src.src = nir_src_for_ssa(src);
|
||||||
for (unsigned i = 0; i < num_components && i < 4; i++)
|
for (unsigned i = 0; i < num_components && i < NIR_MAX_VEC_COMPONENTS; i++)
|
||||||
alu_src.swizzle[i] = swiz[i];
|
alu_src.swizzle[i] = swiz[i];
|
||||||
|
|
||||||
return use_fmov ? nir_fmov_alu(build, alu_src, num_components) :
|
return use_fmov ? nir_fmov_alu(build, alu_src, num_components) :
|
||||||
@@ -486,11 +487,11 @@ nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline nir_ssa_def *
|
static inline nir_ssa_def *
|
||||||
nir_channels(nir_builder *b, nir_ssa_def *def, unsigned mask)
|
nir_channels(nir_builder *b, nir_ssa_def *def, nir_component_mask_t mask)
|
||||||
{
|
{
|
||||||
unsigned num_channels = 0, swizzle[4] = { 0, 0, 0, 0 };
|
unsigned num_channels = 0, swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
||||||
|
|
||||||
for (unsigned i = 0; i < 4; i++) {
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
|
||||||
if ((mask & (1 << i)) == 0)
|
if ((mask & (1 << i)) == 0)
|
||||||
continue;
|
continue;
|
||||||
swizzle[num_channels++] = i;
|
swizzle[num_channels++] = i;
|
||||||
@@ -526,7 +527,9 @@ nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
|
|||||||
static inline nir_ssa_def *
|
static inline nir_ssa_def *
|
||||||
nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
|
nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
|
||||||
{
|
{
|
||||||
static uint8_t trivial_swizzle[4] = { 0, 1, 2, 3 };
|
static uint8_t trivial_swizzle[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
for (int i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
|
||||||
|
trivial_swizzle[i] = i;
|
||||||
nir_alu_src *src = &instr->src[srcn];
|
nir_alu_src *src = &instr->src[srcn];
|
||||||
unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
|
unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
|
||||||
|
|
||||||
|
@@ -209,9 +209,9 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
unsigned num_components = instr->dest.dest.ssa.num_components;
|
unsigned num_components = instr->dest.dest.ssa.num_components;
|
||||||
nir_ssa_def *comps[] = { NULL, NULL, NULL, NULL };
|
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL };
|
||||||
|
|
||||||
for (chan = 0; chan < 4; chan++) {
|
for (chan = 0; chan < NIR_MAX_VEC_COMPONENTS; chan++) {
|
||||||
if (!(instr->dest.write_mask & (1 << chan)))
|
if (!(instr->dest.write_mask & (1 << chan)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -225,7 +225,7 @@ lower_alu_instr_scalar(nir_alu_instr *instr, nir_builder *b)
|
|||||||
0 : chan);
|
0 : chan);
|
||||||
|
|
||||||
nir_alu_src_copy(&lower->src[i], &instr->src[i], lower);
|
nir_alu_src_copy(&lower->src[i], &instr->src[i], lower);
|
||||||
for (int j = 0; j < 4; j++)
|
for (int j = 0; j < NIR_MAX_VEC_COMPONENTS; j++)
|
||||||
lower->src[i].swizzle[j] = instr->src[i].swizzle[src_chan];
|
lower->src[i].swizzle[j] = instr->src[i].swizzle[src_chan];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -38,7 +38,7 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
|||||||
|
|
||||||
assert(intr->dest.is_ssa);
|
assert(intr->dest.is_ssa);
|
||||||
|
|
||||||
nir_ssa_def *loads[4];
|
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
|
||||||
for (unsigned i = 0; i < intr->num_components; i++) {
|
for (unsigned i = 0; i < intr->num_components; i++) {
|
||||||
nir_intrinsic_instr *chan_intr =
|
nir_intrinsic_instr *chan_intr =
|
||||||
@@ -177,7 +177,7 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
|
|||||||
|
|
||||||
assert(intr->dest.is_ssa);
|
assert(intr->dest.is_ssa);
|
||||||
|
|
||||||
nir_ssa_def *loads[4];
|
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
|
||||||
nir_variable **chan_vars;
|
nir_variable **chan_vars;
|
||||||
if (var->data.mode == nir_var_shader_in) {
|
if (var->data.mode == nir_var_shader_in) {
|
||||||
|
@@ -46,7 +46,7 @@ lower_load_const_instr_scalar(nir_load_const_instr *lower)
|
|||||||
b.cursor = nir_before_instr(&lower->instr);
|
b.cursor = nir_before_instr(&lower->instr);
|
||||||
|
|
||||||
/* Emit the individual loads. */
|
/* Emit the individual loads. */
|
||||||
nir_ssa_def *loads[4];
|
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
|
||||||
for (unsigned i = 0; i < lower->def.num_components; i++) {
|
for (unsigned i = 0; i < lower->def.num_components; i++) {
|
||||||
nir_load_const_instr *load_comp =
|
nir_load_const_instr *load_comp =
|
||||||
nir_load_const_instr_create(b.shader, 1, lower->def.bit_size);
|
nir_load_const_instr_create(b.shader, 1, lower->def.bit_size);
|
||||||
|
@@ -41,7 +41,7 @@ struct constant_fold_state {
|
|||||||
static bool
|
static bool
|
||||||
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
|
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
|
||||||
{
|
{
|
||||||
nir_const_value src[4];
|
nir_const_value src[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
|
||||||
if (!instr->dest.dest.is_ssa)
|
if (!instr->dest.dest.is_ssa)
|
||||||
return false;
|
return false;
|
||||||
|
@@ -389,7 +389,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
|
|||||||
const struct glsl_type *type = entry->dst->type;
|
const struct glsl_type *type = entry->dst->type;
|
||||||
unsigned num_components = glsl_get_vector_elements(type);
|
unsigned num_components = glsl_get_vector_elements(type);
|
||||||
|
|
||||||
uint8_t available = 0;
|
nir_component_mask_t available = 0;
|
||||||
bool all_same = true;
|
bool all_same = true;
|
||||||
for (unsigned i = 0; i < num_components; i++) {
|
for (unsigned i = 0; i < num_components; i++) {
|
||||||
if (value->ssa[i])
|
if (value->ssa[i])
|
||||||
@@ -422,7 +422,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
|
|||||||
intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
|
intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
|
||||||
|
|
||||||
bool keep_intrin = false;
|
bool keep_intrin = false;
|
||||||
nir_ssa_def *comps[4];
|
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
|
||||||
for (unsigned i = 0; i < num_components; i++) {
|
for (unsigned i = 0; i < num_components; i++) {
|
||||||
if (value->ssa[i]) {
|
if (value->ssa[i]) {
|
||||||
comps[i] = nir_channel(b, value->ssa[i], i);
|
comps[i] = nir_channel(b, value->ssa[i], i);
|
||||||
|
@@ -186,9 +186,9 @@ print_alu_src(nir_alu_instr *instr, unsigned src, print_state *state)
|
|||||||
print_src(&instr->src[src].src, state);
|
print_src(&instr->src[src].src, state);
|
||||||
|
|
||||||
bool print_swizzle = false;
|
bool print_swizzle = false;
|
||||||
unsigned used_channels = 0;
|
nir_component_mask_t used_channels = 0;
|
||||||
|
|
||||||
for (unsigned i = 0; i < 4; i++) {
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
|
||||||
if (!nir_alu_instr_channel_used(instr, src, i))
|
if (!nir_alu_instr_channel_used(instr, src, i))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -204,7 +204,7 @@ print_alu_src(nir_alu_instr *instr, unsigned src, print_state *state)
|
|||||||
|
|
||||||
if (print_swizzle || used_channels != live_channels) {
|
if (print_swizzle || used_channels != live_channels) {
|
||||||
fprintf(fp, ".");
|
fprintf(fp, ".");
|
||||||
for (unsigned i = 0; i < 4; i++) {
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
|
||||||
if (!nir_alu_instr_channel_used(instr, src, i))
|
if (!nir_alu_instr_channel_used(instr, src, i))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@@ -227,7 +227,7 @@ print_alu_dest(nir_alu_dest *dest, print_state *state)
|
|||||||
if (!dest->dest.is_ssa &&
|
if (!dest->dest.is_ssa &&
|
||||||
dest->write_mask != (1 << dest->dest.reg.reg->num_components) - 1) {
|
dest->write_mask != (1 << dest->dest.reg.reg->num_components) - 1) {
|
||||||
fprintf(fp, ".");
|
fprintf(fp, ".");
|
||||||
for (unsigned i = 0; i < 4; i++)
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
|
||||||
if ((dest->write_mask >> i) & 1)
|
if ((dest->write_mask >> i) & 1)
|
||||||
fprintf(fp, "%c", "xyzw"[i]);
|
fprintf(fp, "%c", "xyzw"[i]);
|
||||||
}
|
}
|
||||||
@@ -491,6 +491,7 @@ print_var_decl(nir_variable *var, print_state *state)
|
|||||||
switch (var->data.mode) {
|
switch (var->data.mode) {
|
||||||
case nir_var_shader_in:
|
case nir_var_shader_in:
|
||||||
case nir_var_shader_out:
|
case nir_var_shader_out:
|
||||||
|
assert(num_components <= 4);
|
||||||
if (num_components < 4 && num_components != 0) {
|
if (num_components < 4 && num_components != 0) {
|
||||||
const char *xyzw = "xyzw";
|
const char *xyzw = "xyzw";
|
||||||
for (int i = 0; i < num_components; i++)
|
for (int i = 0; i < num_components; i++)
|
||||||
|
@@ -41,7 +41,7 @@ match_expression(const nir_search_expression *expr, nir_alu_instr *instr,
|
|||||||
unsigned num_components, const uint8_t *swizzle,
|
unsigned num_components, const uint8_t *swizzle,
|
||||||
struct match_state *state);
|
struct match_state *state);
|
||||||
|
|
||||||
static const uint8_t identity_swizzle[] = { 0, 1, 2, 3 };
|
static const uint8_t identity_swizzle[NIR_MAX_VEC_COMPONENTS] = { 0, 1, 2, 3 };
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if a source produces a value of the given type.
|
* Check if a source produces a value of the given type.
|
||||||
@@ -97,7 +97,7 @@ match_value(const nir_search_value *value, nir_alu_instr *instr, unsigned src,
|
|||||||
unsigned num_components, const uint8_t *swizzle,
|
unsigned num_components, const uint8_t *swizzle,
|
||||||
struct match_state *state)
|
struct match_state *state)
|
||||||
{
|
{
|
||||||
uint8_t new_swizzle[4];
|
uint8_t new_swizzle[NIR_MAX_VEC_COMPONENTS];
|
||||||
|
|
||||||
/* Searching only works on SSA values because, if it's not SSA, we can't
|
/* Searching only works on SSA values because, if it's not SSA, we can't
|
||||||
* know if the value changed between one instance of that value in the
|
* know if the value changed between one instance of that value in the
|
||||||
@@ -167,7 +167,7 @@ match_value(const nir_search_value *value, nir_alu_instr *instr, unsigned src,
|
|||||||
state->variables[var->variable].abs = false;
|
state->variables[var->variable].abs = false;
|
||||||
state->variables[var->variable].negate = false;
|
state->variables[var->variable].negate = false;
|
||||||
|
|
||||||
for (unsigned i = 0; i < 4; ++i) {
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i) {
|
||||||
if (i < num_components)
|
if (i < num_components)
|
||||||
state->variables[var->variable].swizzle[i] = new_swizzle[i];
|
state->variables[var->variable].swizzle[i] = new_swizzle[i];
|
||||||
else
|
else
|
||||||
@@ -606,7 +606,7 @@ nir_alu_instr *
|
|||||||
nir_replace_instr(nir_alu_instr *instr, const nir_search_expression *search,
|
nir_replace_instr(nir_alu_instr *instr, const nir_search_expression *search,
|
||||||
const nir_search_value *replace, void *mem_ctx)
|
const nir_search_value *replace, void *mem_ctx)
|
||||||
{
|
{
|
||||||
uint8_t swizzle[4] = { 0, 0, 0, 0 };
|
uint8_t swizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
||||||
|
|
||||||
for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
|
for (unsigned i = 0; i < instr->dest.dest.ssa.num_components; ++i)
|
||||||
swizzle[i] = i;
|
swizzle[i] = i;
|
||||||
|
@@ -230,9 +230,9 @@ validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
|
|||||||
|
|
||||||
unsigned num_components = nir_src_num_components(src->src);
|
unsigned num_components = nir_src_num_components(src->src);
|
||||||
if (!src->src.is_ssa && src->src.reg.reg->is_packed)
|
if (!src->src.is_ssa && src->src.reg.reg->is_packed)
|
||||||
num_components = 4; /* can't check anything */
|
num_components = NIR_MAX_VEC_COMPONENTS; /* can't check anything */
|
||||||
for (unsigned i = 0; i < 4; i++) {
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
|
||||||
validate_assert(state, src->swizzle[i] < 4);
|
validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
|
||||||
|
|
||||||
if (nir_alu_instr_channel_used(instr, index, i))
|
if (nir_alu_instr_channel_used(instr, index, i))
|
||||||
validate_assert(state, src->swizzle[i] < num_components);
|
validate_assert(state, src->swizzle[i] < num_components);
|
||||||
|
@@ -3087,7 +3087,7 @@ vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
|
|||||||
unsigned elems = count - 3;
|
unsigned elems = count - 3;
|
||||||
assume(elems >= 1);
|
assume(elems >= 1);
|
||||||
if (glsl_type_is_vector_or_scalar(type)) {
|
if (glsl_type_is_vector_or_scalar(type)) {
|
||||||
nir_ssa_def *srcs[4];
|
nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
|
||||||
for (unsigned i = 0; i < elems; i++)
|
for (unsigned i = 0; i < elems; i++)
|
||||||
srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
|
srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
|
||||||
val->ssa->def =
|
val->ssa->def =
|
||||||
|
@@ -246,7 +246,7 @@ vtn_handle_bitcast(struct vtn_builder *b, struct vtn_ssa_value *dest,
|
|||||||
unsigned dest_components = glsl_get_vector_elements(dest->type);
|
unsigned dest_components = glsl_get_vector_elements(dest->type);
|
||||||
vtn_assert(src_bit_size * src_components == dest_bit_size * dest_components);
|
vtn_assert(src_bit_size * src_components == dest_bit_size * dest_components);
|
||||||
|
|
||||||
nir_ssa_def *dest_chan[4];
|
nir_ssa_def *dest_chan[NIR_MAX_VEC_COMPONENTS];
|
||||||
if (src_bit_size > dest_bit_size) {
|
if (src_bit_size > dest_bit_size) {
|
||||||
vtn_assert(src_bit_size % dest_bit_size == 0);
|
vtn_assert(src_bit_size % dest_bit_size == 0);
|
||||||
unsigned divisor = src_bit_size / dest_bit_size;
|
unsigned divisor = src_bit_size / dest_bit_size;
|
||||||
|
Reference in New Issue
Block a user