nir: Split NIR_INTRINSIC_TYPE into separate src/dest indices
We're about to introduce conversion ops which are going to want two different types. We may as well just split the one we have rather than end up with three. There are a couple places where this is mildly inconvenient but most of the time I find it to actually be nicer. Reviewed-by: Jesse Natalie <jenatali@microsoft.com> Reviewed-by: Daniel Stone <daniels@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6945>
This commit is contained in:

committed by
Marge Bot

parent
4c70f1ba2f
commit
0aa08ae2f6
@@ -302,7 +302,7 @@ v3d_emit_ms_output(struct v3d_compile *c, nir_builder *b,
|
||||
store->num_components = 4;
|
||||
nir_intrinsic_set_base(store, sample);
|
||||
nir_intrinsic_set_component(store, 0);
|
||||
nir_intrinsic_set_type(store, type);
|
||||
nir_intrinsic_set_src_type(store, type);
|
||||
store->src[0] = nir_src_for_ssa(color);
|
||||
store->src[1] = nir_src_for_ssa(nir_imm_int(b, rt));
|
||||
nir_builder_instr_insert(b, &store->instr);
|
||||
@@ -322,7 +322,7 @@ v3d_nir_lower_logic_op_instr(struct v3d_compile *c,
|
||||
c->msaa_per_sample_output = true;
|
||||
|
||||
nir_src *offset = &intr->src[1];
|
||||
nir_alu_type type = nir_intrinsic_type(intr);
|
||||
nir_alu_type type = nir_intrinsic_src_type(intr);
|
||||
for (int i = 0; i < V3D_MAX_SAMPLES; i++) {
|
||||
nir_ssa_def *sample =
|
||||
v3d_nir_emit_logic_op(c, b, frag_color, rt, i);
|
||||
|
@@ -1344,10 +1344,13 @@ nir_visitor::visit(ir_call *ir)
|
||||
|
||||
if (op == nir_intrinsic_image_deref_size) {
|
||||
instr->num_components = instr->dest.ssa.num_components;
|
||||
} else if (op == nir_intrinsic_image_deref_load ||
|
||||
op == nir_intrinsic_image_deref_store) {
|
||||
} else if (op == nir_intrinsic_image_deref_load) {
|
||||
instr->num_components = 4;
|
||||
nir_intrinsic_set_type(instr,
|
||||
nir_intrinsic_set_dest_type(instr,
|
||||
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
|
||||
} else if (op == nir_intrinsic_image_deref_store) {
|
||||
instr->num_components = 4;
|
||||
nir_intrinsic_set_src_type(instr,
|
||||
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
|
||||
}
|
||||
|
||||
|
@@ -2295,9 +2295,16 @@ nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_ssa_def *src,
|
||||
bool bindless)
|
||||
{
|
||||
enum gl_access_qualifier access = nir_intrinsic_access(intrin);
|
||||
nir_alu_type type = nir_type_invalid;
|
||||
if (nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_TYPE])
|
||||
type = nir_intrinsic_type(intrin);
|
||||
|
||||
/* Image intrinsics only have one of these */
|
||||
assert(!nir_intrinsic_has_src_type(intrin) ||
|
||||
!nir_intrinsic_has_dest_type(intrin));
|
||||
|
||||
nir_alu_type data_type = nir_type_invalid;
|
||||
if (nir_intrinsic_has_src_type(intrin))
|
||||
data_type = nir_intrinsic_src_type(intrin);
|
||||
if (nir_intrinsic_has_dest_type(intrin))
|
||||
data_type = nir_intrinsic_dest_type(intrin);
|
||||
|
||||
switch (intrin->intrinsic) {
|
||||
#define CASE(op) \
|
||||
@@ -2336,8 +2343,10 @@ nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_ssa_def *src,
|
||||
nir_intrinsic_set_image_array(intrin, glsl_sampler_type_is_array(deref->type));
|
||||
nir_intrinsic_set_access(intrin, access | var->data.access);
|
||||
nir_intrinsic_set_format(intrin, var->data.image.format);
|
||||
if (nir_intrinsic_infos[intrin->intrinsic].index_map[NIR_INTRINSIC_TYPE])
|
||||
nir_intrinsic_set_type(intrin, type);
|
||||
if (nir_intrinsic_has_src_type(intrin))
|
||||
nir_intrinsic_set_src_type(intrin, data_type);
|
||||
if (nir_intrinsic_has_dest_type(intrin))
|
||||
nir_intrinsic_set_dest_type(intrin, data_type);
|
||||
|
||||
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
|
||||
nir_src_for_ssa(src));
|
||||
|
@@ -1736,9 +1736,14 @@ typedef enum {
|
||||
NIR_INTRINSIC_DESC_TYPE,
|
||||
|
||||
/**
|
||||
* The nir_alu_type of a uniform/input/output
|
||||
* The nir_alu_type of input data to a store
|
||||
*/
|
||||
NIR_INTRINSIC_TYPE,
|
||||
NIR_INTRINSIC_SRC_TYPE,
|
||||
|
||||
/**
|
||||
* The nir_alu_type of the data output from a load
|
||||
*/
|
||||
NIR_INTRINSIC_DEST_TYPE,
|
||||
|
||||
/**
|
||||
* The swizzle mask for the instructions
|
||||
@@ -1936,7 +1941,8 @@ INTRINSIC_IDX_ACCESSORS(format, FORMAT, enum pipe_format)
|
||||
INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned)
|
||||
INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned)
|
||||
INTRINSIC_IDX_ACCESSORS(desc_type, DESC_TYPE, unsigned)
|
||||
INTRINSIC_IDX_ACCESSORS(type, TYPE, nir_alu_type)
|
||||
INTRINSIC_IDX_ACCESSORS(src_type, SRC_TYPE, nir_alu_type)
|
||||
INTRINSIC_IDX_ACCESSORS(dest_type, DEST_TYPE, nir_alu_type)
|
||||
INTRINSIC_IDX_ACCESSORS(swizzle_mask, SWIZZLE_MASK, unsigned)
|
||||
INTRINSIC_IDX_ACCESSORS(driver_location, DRIVER_LOCATION, unsigned)
|
||||
INTRINSIC_IDX_ACCESSORS(memory_semantics, MEMORY_SEMANTICS, nir_memory_semantics)
|
||||
|
@@ -186,14 +186,14 @@ nir_gather_ssa_types(nir_function_impl *impl,
|
||||
case nir_intrinsic_load_uniform:
|
||||
assert(intrin->dest.is_ssa);
|
||||
set_type(intrin->dest.ssa.index,
|
||||
nir_intrinsic_type(intrin),
|
||||
nir_intrinsic_dest_type(intrin),
|
||||
float_types, int_types, &progress);
|
||||
break;
|
||||
|
||||
case nir_intrinsic_store_output:
|
||||
assert(intrin->src[0].is_ssa);
|
||||
set_type(intrin->src[0].ssa->index,
|
||||
nir_intrinsic_type(intrin),
|
||||
nir_intrinsic_src_type(intrin),
|
||||
float_types, int_types, &progress);
|
||||
break;
|
||||
|
||||
|
@@ -124,8 +124,10 @@ ALIGN_MUL = "NIR_INTRINSIC_ALIGN_MUL"
|
||||
ALIGN_OFFSET = "NIR_INTRINSIC_ALIGN_OFFSET"
|
||||
# The vulkan descriptor type for vulkan_resource_index
|
||||
DESC_TYPE = "NIR_INTRINSIC_DESC_TYPE"
|
||||
# The nir_alu_type of a uniform/input/output
|
||||
TYPE = "NIR_INTRINSIC_TYPE"
|
||||
# The nir_alu_type of input data to a store
|
||||
SRC_TYPE = "NIR_INTRINSIC_SRC_TYPE"
|
||||
# The nir_alu_type of the data output from a load
|
||||
DEST_TYPE = "NIR_INTRINSIC_DEST_TYPE"
|
||||
# The swizzle mask for quad_swizzle_amd & masked_swizzle_amd
|
||||
SWIZZLE_MASK = "NIR_INTRINSIC_SWIZZLE_MASK"
|
||||
# Driver location of attribute
|
||||
@@ -399,8 +401,8 @@ def image(name, src_comp=[], extra_indices=[], **kwargs):
|
||||
intrinsic("bindless_image_" + name, src_comp=[1] + src_comp,
|
||||
indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS] + extra_indices, **kwargs)
|
||||
|
||||
image("load", src_comp=[4, 1, 1], extra_indices=[TYPE], dest_comp=0, flags=[CAN_ELIMINATE])
|
||||
image("store", src_comp=[4, 1, 0, 1], extra_indices=[TYPE])
|
||||
image("load", src_comp=[4, 1, 1], extra_indices=[DEST_TYPE], dest_comp=0, flags=[CAN_ELIMINATE])
|
||||
image("store", src_comp=[4, 1, 0, 1], extra_indices=[SRC_TYPE])
|
||||
image("atomic_add", src_comp=[4, 1, 1], dest_comp=1)
|
||||
image("atomic_imin", src_comp=[4, 1, 1], dest_comp=1)
|
||||
image("atomic_umin", src_comp=[4, 1, 1], dest_comp=1)
|
||||
@@ -746,15 +748,15 @@ def load(name, src_comp, indices=[], flags=[]):
|
||||
flags=flags)
|
||||
|
||||
# src[] = { offset }.
|
||||
load("uniform", [1], [BASE, RANGE, TYPE], [CAN_ELIMINATE, CAN_REORDER])
|
||||
load("uniform", [1], [BASE, RANGE, DEST_TYPE], [CAN_ELIMINATE, CAN_REORDER])
|
||||
# src[] = { buffer_index, offset }.
|
||||
load("ubo", [-1, 1], [ACCESS, ALIGN_MUL, ALIGN_OFFSET, RANGE_BASE, RANGE], flags=[CAN_ELIMINATE, CAN_REORDER])
|
||||
# src[] = { buffer_index, offset in vec4 units }
|
||||
load("ubo_vec4", [-1, 1], [ACCESS, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
|
||||
# src[] = { offset }.
|
||||
load("input", [1], [BASE, COMPONENT, TYPE, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
||||
load("input", [1], [BASE, COMPONENT, DEST_TYPE, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
||||
# src[] = { vertex_id, offset }.
|
||||
load("input_vertex", [1, 1], [BASE, COMPONENT, TYPE, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
||||
load("input_vertex", [1, 1], [BASE, COMPONENT, DEST_TYPE, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
||||
# src[] = { vertex, offset }.
|
||||
load("per_vertex_input", [1, 1], [BASE, COMPONENT, IO_SEMANTICS], [CAN_ELIMINATE, CAN_REORDER])
|
||||
# src[] = { barycoord, offset }.
|
||||
@@ -794,7 +796,7 @@ def store(name, srcs, indices=[], flags=[]):
|
||||
intrinsic("store_" + name, [0] + srcs, indices=indices, flags=flags)
|
||||
|
||||
# src[] = { value, offset }.
|
||||
store("output", [1], [BASE, WRMASK, COMPONENT, TYPE, IO_SEMANTICS])
|
||||
store("output", [1], [BASE, WRMASK, COMPONENT, SRC_TYPE, IO_SEMANTICS])
|
||||
# src[] = { value, vertex, offset }.
|
||||
store("per_vertex_output", [1, 1], [BASE, WRMASK, COMPONENT, IO_SEMANTICS])
|
||||
# src[] = { value, block_index, offset }
|
||||
@@ -928,7 +930,7 @@ load("tlb_color_v3d", [1], [BASE, COMPONENT], [])
|
||||
#
|
||||
# src[] = { value, render_target }
|
||||
# BASE = sample index
|
||||
store("tlb_sample_color_v3d", [1], [BASE, COMPONENT, TYPE], [])
|
||||
store("tlb_sample_color_v3d", [1], [BASE, COMPONENT, SRC_TYPE], [])
|
||||
|
||||
# V3D-specific intrinsic to load the number of layers attached to
|
||||
# the target framebuffer
|
||||
|
@@ -101,7 +101,7 @@ lower_cl_images_to_tex_impl(nir_function_impl *impl)
|
||||
|
||||
assert(num_srcs == 3);
|
||||
|
||||
tex->dest_type = nir_intrinsic_type(intrin);
|
||||
tex->dest_type = nir_intrinsic_dest_type(intrin);
|
||||
nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, NULL);
|
||||
break;
|
||||
}
|
||||
|
@@ -239,7 +239,7 @@ static nir_ssa_def *
|
||||
emit_load(struct lower_io_state *state,
|
||||
nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
|
||||
unsigned component, unsigned num_components, unsigned bit_size,
|
||||
nir_alu_type type)
|
||||
nir_alu_type dest_type)
|
||||
{
|
||||
nir_builder *b = &state->builder;
|
||||
const nir_shader *nir = b->shader;
|
||||
@@ -302,7 +302,7 @@ emit_load(struct lower_io_state *state,
|
||||
if (load->intrinsic == nir_intrinsic_load_input ||
|
||||
load->intrinsic == nir_intrinsic_load_input_vertex ||
|
||||
load->intrinsic == nir_intrinsic_load_uniform)
|
||||
nir_intrinsic_set_type(load, type);
|
||||
nir_intrinsic_set_dest_type(load, dest_type);
|
||||
|
||||
if (load->intrinsic != nir_intrinsic_load_uniform) {
|
||||
nir_io_semantics semantics = {0};
|
||||
@@ -386,7 +386,7 @@ static void
|
||||
emit_store(struct lower_io_state *state, nir_ssa_def *data,
|
||||
nir_ssa_def *vertex_index, nir_variable *var, nir_ssa_def *offset,
|
||||
unsigned component, unsigned num_components,
|
||||
nir_component_mask_t write_mask, nir_alu_type type)
|
||||
nir_component_mask_t write_mask, nir_alu_type src_type)
|
||||
{
|
||||
nir_builder *b = &state->builder;
|
||||
nir_variable_mode mode = var->data.mode;
|
||||
@@ -408,7 +408,7 @@ emit_store(struct lower_io_state *state, nir_ssa_def *data,
|
||||
nir_intrinsic_set_component(store, component);
|
||||
|
||||
if (store->intrinsic == nir_intrinsic_store_output)
|
||||
nir_intrinsic_set_type(store, type);
|
||||
nir_intrinsic_set_src_type(store, src_type);
|
||||
|
||||
nir_intrinsic_set_write_mask(store, write_mask);
|
||||
|
||||
|
@@ -49,7 +49,7 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
||||
|
||||
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
|
||||
nir_intrinsic_set_component(chan_intr, nir_intrinsic_component(intr) + i);
|
||||
nir_intrinsic_set_type(chan_intr, nir_intrinsic_type(intr));
|
||||
nir_intrinsic_set_dest_type(chan_intr, nir_intrinsic_dest_type(intr));
|
||||
/* offset */
|
||||
nir_src_copy(&chan_intr->src[0], &intr->src[0], chan_intr);
|
||||
|
||||
@@ -82,7 +82,7 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
||||
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
|
||||
nir_intrinsic_set_write_mask(chan_intr, 0x1);
|
||||
nir_intrinsic_set_component(chan_intr, nir_intrinsic_component(intr) + i);
|
||||
nir_intrinsic_set_type(chan_intr, nir_intrinsic_type(intr));
|
||||
nir_intrinsic_set_src_type(chan_intr, nir_intrinsic_src_type(intr));
|
||||
|
||||
/* value */
|
||||
chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
|
||||
|
@@ -50,26 +50,26 @@ nir_lower_mediump_outputs(nir_shader *nir)
|
||||
if (!nir_intrinsic_io_semantics(intr).medium_precision)
|
||||
break; /* can't lower */
|
||||
|
||||
switch (nir_intrinsic_type(intr)) {
|
||||
switch (nir_intrinsic_src_type(intr)) {
|
||||
case nir_type_float32:
|
||||
b.cursor = nir_before_instr(&intr->instr);
|
||||
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
|
||||
nir_src_for_ssa(nir_f2f16(&b, intr->src[0].ssa)));
|
||||
nir_intrinsic_set_type(intr, nir_type_float16);
|
||||
nir_intrinsic_set_src_type(intr, nir_type_float16);
|
||||
break;
|
||||
|
||||
case nir_type_int32:
|
||||
b.cursor = nir_before_instr(&intr->instr);
|
||||
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
|
||||
nir_src_for_ssa(nir_i2i16(&b, intr->src[0].ssa)));
|
||||
nir_intrinsic_set_type(intr, nir_type_int16);
|
||||
nir_intrinsic_set_src_type(intr, nir_type_int16);
|
||||
break;
|
||||
|
||||
case nir_type_uint32:
|
||||
b.cursor = nir_before_instr(&intr->instr);
|
||||
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
|
||||
nir_src_for_ssa(nir_u2u16(&b, intr->src[0].ssa)));
|
||||
nir_intrinsic_set_type(intr, nir_type_uint16);
|
||||
nir_intrinsic_set_src_type(intr, nir_type_uint16);
|
||||
break;
|
||||
|
||||
default:;
|
||||
|
@@ -53,7 +53,7 @@ lower_impl(nir_function_impl *impl)
|
||||
|
||||
nir_intrinsic_set_base(load, shader->num_inputs++);
|
||||
nir_intrinsic_set_component(load, 0);
|
||||
nir_intrinsic_set_type(load, nir_type_float32);
|
||||
nir_intrinsic_set_dest_type(load, nir_type_float32);
|
||||
|
||||
nir_io_semantics load_sem = {0};
|
||||
load_sem.location = VERT_ATTRIB_EDGEFLAG;
|
||||
@@ -70,7 +70,7 @@ lower_impl(nir_function_impl *impl)
|
||||
|
||||
nir_intrinsic_set_base(store, shader->num_outputs++);
|
||||
nir_intrinsic_set_component(store, 0);
|
||||
nir_intrinsic_set_type(store, nir_type_float32);
|
||||
nir_intrinsic_set_src_type(store, nir_type_float32);
|
||||
nir_intrinsic_set_write_mask(store, 0x1);
|
||||
|
||||
nir_io_semantics semantics = {0};
|
||||
|
@@ -837,7 +837,8 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
|
||||
[NIR_INTRINSIC_ALIGN_MUL] = "align_mul",
|
||||
[NIR_INTRINSIC_ALIGN_OFFSET] = "align_offset",
|
||||
[NIR_INTRINSIC_DESC_TYPE] = "desc_type",
|
||||
[NIR_INTRINSIC_TYPE] = "type",
|
||||
[NIR_INTRINSIC_SRC_TYPE] = "src_type",
|
||||
[NIR_INTRINSIC_DEST_TYPE] = "dest_type",
|
||||
[NIR_INTRINSIC_SWIZZLE_MASK] = "swizzle_mask",
|
||||
[NIR_INTRINSIC_DRIVER_LOCATION] = "driver_location",
|
||||
[NIR_INTRINSIC_MEMORY_SEMANTICS] = "mem_semantics",
|
||||
@@ -898,9 +899,15 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
|
||||
break;
|
||||
}
|
||||
|
||||
case NIR_INTRINSIC_TYPE: {
|
||||
fprintf(fp, " type=");
|
||||
print_alu_type(nir_intrinsic_type(instr), state);
|
||||
case NIR_INTRINSIC_SRC_TYPE: {
|
||||
fprintf(fp, " src_type=");
|
||||
print_alu_type(nir_intrinsic_src_type(instr), state);
|
||||
break;
|
||||
}
|
||||
|
||||
case NIR_INTRINSIC_DEST_TYPE: {
|
||||
fprintf(fp, " src_type=");
|
||||
print_alu_type(nir_intrinsic_dest_type(instr), state);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@@ -3237,7 +3237,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
|
||||
intrin->src[4] = nir_src_for_ssa(image.lod);
|
||||
|
||||
if (opcode == SpvOpImageWrite)
|
||||
nir_intrinsic_set_type(intrin, nir_get_nir_type_for_glsl_type(value->type));
|
||||
nir_intrinsic_set_src_type(intrin, nir_get_nir_type_for_glsl_type(value->type));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -3292,7 +3292,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
|
||||
vtn_push_nir_ssa(b, w[2], result);
|
||||
|
||||
if (opcode == SpvOpImageRead)
|
||||
nir_intrinsic_set_type(intrin, nir_get_nir_type_for_glsl_type(type->type));
|
||||
nir_intrinsic_set_dest_type(intrin, nir_get_nir_type_for_glsl_type(type->type));
|
||||
} else {
|
||||
nir_builder_instr_insert(&b->nb, &intrin->instr);
|
||||
}
|
||||
|
@@ -118,9 +118,12 @@ ir3_get_type_for_image_intrinsic(const nir_intrinsic_instr *instr)
|
||||
switch (instr->intrinsic) {
|
||||
case nir_intrinsic_image_load:
|
||||
case nir_intrinsic_bindless_image_load:
|
||||
type = nir_alu_type_get_base_type(nir_intrinsic_dest_type(instr));
|
||||
break;
|
||||
|
||||
case nir_intrinsic_image_store:
|
||||
case nir_intrinsic_bindless_image_store:
|
||||
type = nir_alu_type_get_base_type(nir_intrinsic_type(instr));
|
||||
type = nir_alu_type_get_base_type(nir_intrinsic_src_type(instr));
|
||||
break;
|
||||
|
||||
case nir_intrinsic_image_atomic_add:
|
||||
|
@@ -694,8 +694,8 @@ ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
|
||||
|
||||
load = nir_intrinsic_instr_create(b->shader, op);
|
||||
if (op == nir_intrinsic_load_uniform) {
|
||||
nir_intrinsic_set_type(load, src_is_float ? nir_type_float :
|
||||
nir_type_int);
|
||||
nir_intrinsic_set_dest_type(load, src_is_float ? nir_type_float :
|
||||
nir_type_int);
|
||||
}
|
||||
|
||||
load->num_components = 4;
|
||||
|
@@ -158,7 +158,7 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
|
||||
load->num_components = 2;
|
||||
load->src[0] = nir_src_for_ssa(nir_imm_float(&b, 0.0f));
|
||||
nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
|
||||
nir_intrinsic_set_type(load, nir_type_float);
|
||||
nir_intrinsic_set_dest_type(load, nir_type_float);
|
||||
|
||||
nir_builder_instr_insert(&b, &load->instr);
|
||||
|
||||
|
@@ -41,7 +41,7 @@ lower_load_uniform_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
|
||||
|
||||
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr) * 4 + i);
|
||||
nir_intrinsic_set_range(chan_intr, nir_intrinsic_range(intr) * 4);
|
||||
nir_intrinsic_set_type(chan_intr, nir_intrinsic_type(intr));
|
||||
nir_intrinsic_set_dest_type(chan_intr, nir_intrinsic_dest_type(intr));
|
||||
|
||||
chan_intr->src[0] =
|
||||
nir_src_for_ssa(nir_imul_imm(b, intr->src[0].ssa, 4));
|
||||
|
@@ -78,7 +78,7 @@ lima_nir_split_load_input_block(nir_block *block, nir_builder *b)
|
||||
new_intrin->num_components = nir_dest_num_components(alu->dest.dest);
|
||||
nir_intrinsic_set_base(new_intrin, nir_intrinsic_base(intrin));
|
||||
nir_intrinsic_set_component(new_intrin, nir_intrinsic_component(intrin) + swizzle);
|
||||
nir_intrinsic_set_type(new_intrin, nir_intrinsic_type(intrin));
|
||||
nir_intrinsic_set_dest_type(new_intrin, nir_intrinsic_dest_type(intrin));
|
||||
|
||||
/* offset */
|
||||
nir_src_copy(&new_intrin->src[0], &intrin->src[0], new_intrin);
|
||||
|
@@ -154,8 +154,10 @@ static void scan_io_usage(struct si_shader_info *info, nir_intrinsic_instr *intr
|
||||
}
|
||||
}
|
||||
|
||||
if (nir_intrinsic_has_type(intr))
|
||||
info->output_type[loc] = nir_intrinsic_type(intr);
|
||||
if (nir_intrinsic_has_src_type(intr))
|
||||
info->output_type[loc] = nir_intrinsic_src_type(intr);
|
||||
else if (nir_intrinsic_has_dest_type(intr))
|
||||
info->output_type[loc] = nir_intrinsic_dest_type(intr);
|
||||
else
|
||||
info->output_type[loc] = nir_type_float32;
|
||||
|
||||
@@ -166,11 +168,11 @@ static void scan_io_usage(struct si_shader_info *info, nir_intrinsic_instr *intr
|
||||
semantic >= FRAG_RESULT_DATA0 && semantic <= FRAG_RESULT_DATA7) {
|
||||
unsigned index = semantic - FRAG_RESULT_DATA0;
|
||||
|
||||
if (nir_intrinsic_type(intr) == nir_type_float16)
|
||||
if (nir_intrinsic_src_type(intr) == nir_type_float16)
|
||||
info->output_color_types |= SI_TYPE_FLOAT16 << (index * 2);
|
||||
else if (nir_intrinsic_type(intr) == nir_type_int16)
|
||||
else if (nir_intrinsic_src_type(intr) == nir_type_int16)
|
||||
info->output_color_types |= SI_TYPE_INT16 << (index * 2);
|
||||
else if (nir_intrinsic_type(intr) == nir_type_uint16)
|
||||
else if (nir_intrinsic_src_type(intr) == nir_type_uint16)
|
||||
info->output_color_types |= SI_TYPE_UINT16 << (index * 2);
|
||||
}
|
||||
}
|
||||
|
@@ -90,8 +90,8 @@ bi_load(enum bi_class T, nir_intrinsic_instr *instr)
|
||||
if (info->has_dest)
|
||||
load.dest = pan_dest_index(&instr->dest);
|
||||
|
||||
if (info->has_dest && nir_intrinsic_has_type(instr))
|
||||
load.dest_type = nir_intrinsic_type(instr);
|
||||
if (info->has_dest && nir_intrinsic_has_dest_type(instr))
|
||||
load.dest_type = nir_intrinsic_dest_type(instr);
|
||||
|
||||
nir_src *offset = nir_get_io_offset_src(instr);
|
||||
|
||||
@@ -136,7 +136,7 @@ bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
|
||||
},
|
||||
.src_types = {
|
||||
nir_type_uint32,
|
||||
nir_intrinsic_type(instr)
|
||||
nir_intrinsic_src_type(instr)
|
||||
},
|
||||
.swizzle = {
|
||||
{ 0 },
|
||||
@@ -161,7 +161,7 @@ bi_emit_frag_out(bi_context *ctx, nir_intrinsic_instr *instr)
|
||||
BIR_INDEX_PASS | BIFROST_SRC_CONST_HI,
|
||||
},
|
||||
.src_types = {
|
||||
nir_intrinsic_type(instr),
|
||||
nir_intrinsic_src_type(instr),
|
||||
nir_type_uint32
|
||||
},
|
||||
.swizzle = {
|
||||
@@ -189,7 +189,7 @@ bi_load_with_r61(enum bi_class T, nir_intrinsic_instr *instr)
|
||||
ld.src[2] = BIR_INDEX_REGISTER | 62;
|
||||
ld.src_types[1] = nir_type_uint32;
|
||||
ld.src_types[2] = nir_type_uint32;
|
||||
ld.format = nir_intrinsic_type(instr);
|
||||
ld.format = nir_intrinsic_dest_type(instr);
|
||||
return ld;
|
||||
}
|
||||
|
||||
|
@@ -1736,7 +1736,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
|
||||
nir_alu_type t =
|
||||
(is_ubo || is_global || is_shared) ? nir_type_uint :
|
||||
(is_interp) ? nir_type_float :
|
||||
nir_intrinsic_type(instr);
|
||||
nir_intrinsic_dest_type(instr);
|
||||
|
||||
t = nir_alu_type_get_base_type(t);
|
||||
|
||||
@@ -1948,7 +1948,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
|
||||
st.load_store.arg_1 = 0x9E;
|
||||
st.load_store.arg_2 = 0x1E;
|
||||
|
||||
switch (nir_alu_type_get_base_type(nir_intrinsic_type(instr))) {
|
||||
switch (nir_alu_type_get_base_type(nir_intrinsic_src_type(instr))) {
|
||||
case nir_type_uint:
|
||||
case nir_type_bool:
|
||||
st.op = midgard_op_st_vary_32u;
|
||||
|
Reference in New Issue
Block a user