ir3: Plumb through bindless support
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4358>
This commit is contained in:
@@ -841,6 +841,12 @@ intrinsic("store_global_ir3", [0, 2, 1], indices=[WRMASK, ACCESS, ALIGN_MUL, ALI
|
|||||||
# const_index[] = { access, align_mul, align_offset }
|
# const_index[] = { access, align_mul, align_offset }
|
||||||
intrinsic("load_global_ir3", [2, 1], dest_comp=0, indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE])
|
intrinsic("load_global_ir3", [2, 1], dest_comp=0, indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE])
|
||||||
|
|
||||||
|
# IR3-specific bindless handle specifier. Similar to vulkan_resource_index, but
|
||||||
|
# without the binding because the hardware expects a single flattened index
|
||||||
|
# rather than a (binding, index) pair. We may also want to use this with GL.
|
||||||
|
# Note that this doesn't actually turn into a HW instruction.
|
||||||
|
intrinsic("bindless_resource_ir3", [1], dest_comp=1, indices=[DESC_SET], flags=[CAN_ELIMINATE, CAN_REORDER])
|
||||||
|
|
||||||
# Intrinsics used by the Midgard/Bifrost blend pipeline. These are defined
|
# Intrinsics used by the Midgard/Bifrost blend pipeline. These are defined
|
||||||
# within a blend shader to read/write the raw value from the tile buffer,
|
# within a blend shader to read/write the raw value from the tile buffer,
|
||||||
# without applying any format conversion in the process. If the shader needs
|
# without applying any format conversion in the process. If the shader needs
|
||||||
|
@@ -487,17 +487,38 @@ static int emit_cat5(struct ir3_instruction *instr, void *ptr,
|
|||||||
cat5->src2 = reg(src2, info, instr->repeat, IR3_REG_HALF);
|
cat5->src2 = reg(src2, info, instr->repeat, IR3_REG_HALF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (instr->flags & IR3_INSTR_B) {
|
||||||
|
cat5->s2en_bindless.base_hi = instr->cat5.tex_base >> 1;
|
||||||
|
cat5->base_lo = instr->cat5.tex_base & 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (instr->flags & IR3_INSTR_S2EN) {
|
if (instr->flags & IR3_INSTR_S2EN) {
|
||||||
struct ir3_register *samp_tex = instr->regs[1];
|
struct ir3_register *samp_tex = instr->regs[1];
|
||||||
iassert(samp_tex->flags & IR3_REG_HALF);
|
iassert(samp_tex->flags & IR3_REG_HALF);
|
||||||
cat5->s2en_bindless.src3 = reg(samp_tex, info, instr->repeat, IR3_REG_HALF);
|
cat5->s2en_bindless.src3 = reg(samp_tex, info, instr->repeat,
|
||||||
/* TODO: This should probably be CAT5_UNIFORM, at least on a6xx, as
|
(instr->flags & IR3_INSTR_B) ? 0 : IR3_REG_HALF);
|
||||||
* this is what the blob does and it is presumably faster, but first
|
if (instr->flags & IR3_INSTR_B) {
|
||||||
* we should confirm it is actually nonuniform and figure out when the
|
if (instr->flags & IR3_INSTR_A1EN) {
|
||||||
* whole descriptor mode mechanism was introduced.
|
cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_UNIFORM;
|
||||||
*/
|
} else {
|
||||||
cat5->s2en_bindless.desc_mode = CAT5_NONUNIFORM;
|
cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_UNIFORM;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* TODO: This should probably be CAT5_UNIFORM, at least on a6xx,
|
||||||
|
* as this is what the blob does and it is presumably faster, but
|
||||||
|
* first we should confirm it is actually nonuniform and figure
|
||||||
|
* out when the whole descriptor mode mechanism was introduced.
|
||||||
|
*/
|
||||||
|
cat5->s2en_bindless.desc_mode = CAT5_NONUNIFORM;
|
||||||
|
}
|
||||||
iassert(!(instr->cat5.samp | instr->cat5.tex));
|
iassert(!(instr->cat5.samp | instr->cat5.tex));
|
||||||
|
} else if (instr->flags & IR3_INSTR_B) {
|
||||||
|
cat5->s2en_bindless.src3 = instr->cat5.samp;
|
||||||
|
if (instr->flags & IR3_INSTR_A1EN) {
|
||||||
|
cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_A1_IMM;
|
||||||
|
} else {
|
||||||
|
cat5->s2en_bindless.desc_mode = CAT5_BINDLESS_IMM;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
cat5->norm.samp = instr->cat5.samp;
|
cat5->norm.samp = instr->cat5.samp;
|
||||||
cat5->norm.tex = instr->cat5.tex;
|
cat5->norm.tex = instr->cat5.tex;
|
||||||
@@ -509,7 +530,7 @@ static int emit_cat5(struct ir3_instruction *instr, void *ptr,
|
|||||||
cat5->is_3d = !!(instr->flags & IR3_INSTR_3D);
|
cat5->is_3d = !!(instr->flags & IR3_INSTR_3D);
|
||||||
cat5->is_a = !!(instr->flags & IR3_INSTR_A);
|
cat5->is_a = !!(instr->flags & IR3_INSTR_A);
|
||||||
cat5->is_s = !!(instr->flags & IR3_INSTR_S);
|
cat5->is_s = !!(instr->flags & IR3_INSTR_S);
|
||||||
cat5->is_s2en_bindless = !!(instr->flags & IR3_INSTR_S2EN);
|
cat5->is_s2en_bindless = !!(instr->flags & (IR3_INSTR_S2EN | IR3_INSTR_B));
|
||||||
cat5->is_o = !!(instr->flags & IR3_INSTR_O);
|
cat5->is_o = !!(instr->flags & IR3_INSTR_O);
|
||||||
cat5->is_p = !!(instr->flags & IR3_INSTR_P);
|
cat5->is_p = !!(instr->flags & IR3_INSTR_P);
|
||||||
cat5->opc = instr->opc;
|
cat5->opc = instr->opc;
|
||||||
@@ -523,13 +544,11 @@ static int emit_cat5(struct ir3_instruction *instr, void *ptr,
|
|||||||
static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
|
static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
|
||||||
struct ir3_info *info)
|
struct ir3_info *info)
|
||||||
{
|
{
|
||||||
struct ir3_register *src1, *src2;
|
struct ir3_register *src1, *src2, *ssbo;
|
||||||
instr_cat6_a6xx_t *cat6 = ptr;
|
instr_cat6_a6xx_t *cat6 = ptr;
|
||||||
bool has_dest = (instr->opc == OPC_LDIB || instr->opc == OPC_LDC);
|
bool has_dest = (instr->opc == OPC_LDIB || instr->opc == OPC_LDC);
|
||||||
|
|
||||||
/* first reg should be SSBO binding point: */
|
ssbo = instr->regs[1];
|
||||||
iassert(instr->regs[1]->flags & IR3_REG_IMMED);
|
|
||||||
|
|
||||||
src1 = instr->regs[2];
|
src1 = instr->regs[2];
|
||||||
|
|
||||||
if (has_dest) {
|
if (has_dest) {
|
||||||
@@ -552,7 +571,20 @@ static int emit_cat6_a6xx(struct ir3_instruction *instr, void *ptr,
|
|||||||
|
|
||||||
cat6->src1 = reg(src1, info, instr->repeat, 0);
|
cat6->src1 = reg(src1, info, instr->repeat, 0);
|
||||||
cat6->src2 = reg(src2, info, instr->repeat, 0);
|
cat6->src2 = reg(src2, info, instr->repeat, 0);
|
||||||
cat6->ssbo = instr->regs[1]->iim_val;
|
cat6->ssbo = reg(ssbo, info, instr->repeat, IR3_REG_IMMED);
|
||||||
|
|
||||||
|
if (instr->flags & IR3_INSTR_B) {
|
||||||
|
if (ssbo->flags & IR3_REG_IMMED) {
|
||||||
|
cat6->desc_mode = CAT6_BINDLESS_IMM;
|
||||||
|
} else {
|
||||||
|
cat6->desc_mode = CAT6_BINDLESS_UNIFORM;
|
||||||
|
}
|
||||||
|
cat6->base = instr->cat6.base;
|
||||||
|
} else {
|
||||||
|
/* TODO figure out mode for indirect SSBO index in !bindless */
|
||||||
|
iassert(ssbo->flags & IR3_REG_IMMED);
|
||||||
|
cat6->desc_mode = CAT6_IMM;
|
||||||
|
}
|
||||||
|
|
||||||
switch (instr->opc) {
|
switch (instr->opc) {
|
||||||
case OPC_ATOMIC_ADD:
|
case OPC_ATOMIC_ADD:
|
||||||
|
@@ -218,11 +218,15 @@ struct ir3_instruction {
|
|||||||
IR3_INSTR_S2EN = 0x200,
|
IR3_INSTR_S2EN = 0x200,
|
||||||
IR3_INSTR_G = 0x400,
|
IR3_INSTR_G = 0x400,
|
||||||
IR3_INSTR_SAT = 0x800,
|
IR3_INSTR_SAT = 0x800,
|
||||||
|
/* (cat5/cat6) Bindless */
|
||||||
|
IR3_INSTR_B = 0x1000,
|
||||||
|
/* (cat5-only) Get some parts of the encoding from a1.x */
|
||||||
|
IR3_INSTR_A1EN = 0x2000,
|
||||||
/* meta-flags, for intermediate stages of IR, ie.
|
/* meta-flags, for intermediate stages of IR, ie.
|
||||||
* before register assignment is done:
|
* before register assignment is done:
|
||||||
*/
|
*/
|
||||||
IR3_INSTR_MARK = 0x1000,
|
IR3_INSTR_MARK = 0x4000,
|
||||||
IR3_INSTR_UNUSED= 0x2000,
|
IR3_INSTR_UNUSED= 0x8000,
|
||||||
} flags;
|
} flags;
|
||||||
uint8_t repeat;
|
uint8_t repeat;
|
||||||
uint8_t nop;
|
uint8_t nop;
|
||||||
@@ -253,6 +257,7 @@ struct ir3_instruction {
|
|||||||
} cat2;
|
} cat2;
|
||||||
struct {
|
struct {
|
||||||
unsigned samp, tex;
|
unsigned samp, tex;
|
||||||
|
unsigned tex_base : 3;
|
||||||
type_t type;
|
type_t type;
|
||||||
} cat5;
|
} cat5;
|
||||||
struct {
|
struct {
|
||||||
@@ -262,6 +267,7 @@ struct ir3_instruction {
|
|||||||
int iim_val : 3; /* for ldgb/stgb, # of components */
|
int iim_val : 3; /* for ldgb/stgb, # of components */
|
||||||
unsigned d : 3;
|
unsigned d : 3;
|
||||||
bool typed : 1;
|
bool typed : 1;
|
||||||
|
unsigned base : 3;
|
||||||
} cat6;
|
} cat6;
|
||||||
struct {
|
struct {
|
||||||
unsigned w : 1; /* write */
|
unsigned w : 1; /* write */
|
||||||
@@ -284,6 +290,8 @@ struct ir3_instruction {
|
|||||||
struct {
|
struct {
|
||||||
unsigned samp, tex;
|
unsigned samp, tex;
|
||||||
unsigned input_offset;
|
unsigned input_offset;
|
||||||
|
unsigned samp_base : 3;
|
||||||
|
unsigned tex_base : 3;
|
||||||
} prefetch;
|
} prefetch;
|
||||||
struct {
|
struct {
|
||||||
/* maps back to entry in ir3_shader_variant::inputs table: */
|
/* maps back to entry in ir3_shader_variant::inputs table: */
|
||||||
@@ -1559,9 +1567,11 @@ ir3_SAM(struct ir3_block *block, opc_t opc, type_t type,
|
|||||||
struct ir3_instruction *sam;
|
struct ir3_instruction *sam;
|
||||||
|
|
||||||
sam = ir3_instr_create(block, opc);
|
sam = ir3_instr_create(block, opc);
|
||||||
sam->flags |= flags | IR3_INSTR_S2EN;
|
sam->flags |= flags;
|
||||||
__ssa_dst(sam)->wrmask = wrmask;
|
__ssa_dst(sam)->wrmask = wrmask;
|
||||||
__ssa_src(sam, samp_tex, IR3_REG_HALF);
|
if (flags & IR3_INSTR_S2EN) {
|
||||||
|
__ssa_src(sam, samp_tex, IR3_REG_HALF);
|
||||||
|
}
|
||||||
if (src0) {
|
if (src0) {
|
||||||
__ssa_src(sam, src0, 0)->wrmask = (1 << (src0->regs_count - 1)) - 1;
|
__ssa_src(sam, src0, 0)->wrmask = (1 << (src0->regs_count - 1)) - 1;
|
||||||
}
|
}
|
||||||
@@ -1594,6 +1604,7 @@ INSTR2(ATOMIC_MAX)
|
|||||||
INSTR2(ATOMIC_AND)
|
INSTR2(ATOMIC_AND)
|
||||||
INSTR2(ATOMIC_OR)
|
INSTR2(ATOMIC_OR)
|
||||||
INSTR2(ATOMIC_XOR)
|
INSTR2(ATOMIC_XOR)
|
||||||
|
INSTR2(LDC)
|
||||||
#if GPU >= 600
|
#if GPU >= 600
|
||||||
INSTR3(STIB);
|
INSTR3(STIB);
|
||||||
INSTR2(LDIB);
|
INSTR2(LDIB);
|
||||||
|
@@ -37,6 +37,42 @@
|
|||||||
* encoding compared to a4xx/a5xx.
|
* encoding compared to a4xx/a5xx.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static void
|
||||||
|
handle_bindless_cat6(struct ir3_instruction *instr, nir_src rsrc)
|
||||||
|
{
|
||||||
|
nir_intrinsic_instr *intrin = ir3_bindless_resource(rsrc);
|
||||||
|
if (!intrin)
|
||||||
|
return;
|
||||||
|
|
||||||
|
instr->flags |= IR3_INSTR_B;
|
||||||
|
instr->cat6.base = nir_intrinsic_desc_set(intrin);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ir3_instruction *
|
||||||
|
ssbo_idx(struct ir3_context *ctx, nir_src src)
|
||||||
|
{
|
||||||
|
if (ir3_bindless_resource(src)) {
|
||||||
|
ctx->so->bindless_ibo = true;
|
||||||
|
return ir3_get_src(ctx, &src)[0];
|
||||||
|
} else {
|
||||||
|
/* can this be non-const buffer_index? how do we handle that? */
|
||||||
|
int ibo_idx = ir3_ssbo_to_ibo(ctx->so->shader, nir_src_as_uint(src));
|
||||||
|
return create_immed(ctx->block, ibo_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ir3_instruction *
|
||||||
|
image_idx(struct ir3_context *ctx, nir_src src)
|
||||||
|
{
|
||||||
|
if (ir3_bindless_resource(src)) {
|
||||||
|
ctx->so->bindless_ibo = true;
|
||||||
|
return ir3_get_src(ctx, &src)[0];
|
||||||
|
} else {
|
||||||
|
/* can this be non-const buffer_index? how do we handle that? */
|
||||||
|
int ibo_idx = ir3_image_to_ibo(ctx->so->shader, nir_src_as_uint(src));
|
||||||
|
return create_immed(ctx->block, ibo_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* src[] = { buffer_index, offset }. No const_index */
|
/* src[] = { buffer_index, offset }. No const_index */
|
||||||
static void
|
static void
|
||||||
@@ -47,18 +83,16 @@ emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
|||||||
struct ir3_instruction *offset;
|
struct ir3_instruction *offset;
|
||||||
struct ir3_instruction *ldib;
|
struct ir3_instruction *ldib;
|
||||||
|
|
||||||
/* can this be non-const buffer_index? how do we handle that? */
|
|
||||||
int ibo_idx = ir3_ssbo_to_ibo(ctx->so->shader, nir_src_as_uint(intr->src[0]));
|
|
||||||
|
|
||||||
offset = ir3_get_src(ctx, &intr->src[2])[0];
|
offset = ir3_get_src(ctx, &intr->src[2])[0];
|
||||||
|
|
||||||
ldib = ir3_LDIB(b, create_immed(b, ibo_idx), 0, offset, 0);
|
ldib = ir3_LDIB(b, ssbo_idx(ctx, intr->src[0]), 0, offset, 0);
|
||||||
ldib->regs[0]->wrmask = MASK(intr->num_components);
|
ldib->regs[0]->wrmask = MASK(intr->num_components);
|
||||||
ldib->cat6.iim_val = intr->num_components;
|
ldib->cat6.iim_val = intr->num_components;
|
||||||
ldib->cat6.d = 1;
|
ldib->cat6.d = 1;
|
||||||
ldib->cat6.type = TYPE_U32;
|
ldib->cat6.type = TYPE_U32;
|
||||||
ldib->barrier_class = IR3_BARRIER_BUFFER_R;
|
ldib->barrier_class = IR3_BARRIER_BUFFER_R;
|
||||||
ldib->barrier_conflict = IR3_BARRIER_BUFFER_W;
|
ldib->barrier_conflict = IR3_BARRIER_BUFFER_W;
|
||||||
|
handle_bindless_cat6(ldib, intr->src[0]);
|
||||||
|
|
||||||
ir3_split_dest(b, dst, ldib, 0, intr->num_components);
|
ir3_split_dest(b, dst, ldib, 0, intr->num_components);
|
||||||
}
|
}
|
||||||
@@ -76,20 +110,18 @@ emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
unsigned wrmask = intr->const_index[0];
|
unsigned wrmask = intr->const_index[0];
|
||||||
unsigned ncomp = ffs(~wrmask) - 1;
|
unsigned ncomp = ffs(~wrmask) - 1;
|
||||||
|
|
||||||
/* can this be non-const buffer_index? how do we handle that? */
|
|
||||||
int ibo_idx = ir3_ssbo_to_ibo(ctx->so->shader, nir_src_as_uint(intr->src[1]));
|
|
||||||
|
|
||||||
/* src0 is offset, src1 is value:
|
/* src0 is offset, src1 is value:
|
||||||
*/
|
*/
|
||||||
val = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
|
val = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
|
||||||
offset = ir3_get_src(ctx, &intr->src[3])[0];
|
offset = ir3_get_src(ctx, &intr->src[3])[0];
|
||||||
|
|
||||||
stib = ir3_STIB(b, create_immed(b, ibo_idx), 0, offset, 0, val, 0);
|
stib = ir3_STIB(b, ssbo_idx(ctx, intr->src[1]), 0, offset, 0, val, 0);
|
||||||
stib->cat6.iim_val = ncomp;
|
stib->cat6.iim_val = ncomp;
|
||||||
stib->cat6.d = 1;
|
stib->cat6.d = 1;
|
||||||
stib->cat6.type = TYPE_U32;
|
stib->cat6.type = TYPE_U32;
|
||||||
stib->barrier_class = IR3_BARRIER_BUFFER_W;
|
stib->barrier_class = IR3_BARRIER_BUFFER_W;
|
||||||
stib->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
|
stib->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
|
||||||
|
handle_bindless_cat6(stib, intr->src[1]);
|
||||||
|
|
||||||
array_insert(b, b->keeps, stib);
|
array_insert(b, b->keeps, stib);
|
||||||
}
|
}
|
||||||
@@ -118,10 +150,7 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
struct ir3_instruction *atomic, *ibo, *src0, *src1, *data, *dummy;
|
struct ir3_instruction *atomic, *ibo, *src0, *src1, *data, *dummy;
|
||||||
type_t type = TYPE_U32;
|
type_t type = TYPE_U32;
|
||||||
|
|
||||||
/* can this be non-const buffer_index? how do we handle that? */
|
ibo = ssbo_idx(ctx, intr->src[0]);
|
||||||
int ibo_idx = ir3_ssbo_to_ibo(ctx->so->shader,
|
|
||||||
nir_src_as_uint(intr->src[0]));
|
|
||||||
ibo = create_immed(b, ibo_idx);
|
|
||||||
|
|
||||||
data = ir3_get_src(ctx, &intr->src[2])[0];
|
data = ir3_get_src(ctx, &intr->src[2])[0];
|
||||||
|
|
||||||
@@ -196,6 +225,7 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
atomic->cat6.type = type;
|
atomic->cat6.type = type;
|
||||||
atomic->barrier_class = IR3_BARRIER_BUFFER_W;
|
atomic->barrier_class = IR3_BARRIER_BUFFER_W;
|
||||||
atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
|
atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
|
||||||
|
handle_bindless_cat6(atomic, intr->src[0]);
|
||||||
|
|
||||||
/* even if nothing consume the result, we can't DCE the instruction: */
|
/* even if nothing consume the result, we can't DCE the instruction: */
|
||||||
array_insert(b, b->keeps, atomic);
|
array_insert(b, b->keeps, atomic);
|
||||||
@@ -203,6 +233,30 @@ emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
return atomic;
|
return atomic;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* src[] = { deref, coord, sample_index }. const_index[] = {} */
|
||||||
|
static void
|
||||||
|
emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
||||||
|
struct ir3_instruction **dst)
|
||||||
|
{
|
||||||
|
struct ir3_block *b = ctx->block;
|
||||||
|
struct ir3_instruction *ldib;
|
||||||
|
struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
|
||||||
|
unsigned ncoords = ir3_get_image_coords(intr, NULL);
|
||||||
|
|
||||||
|
ldib = ir3_LDIB(b, image_idx(ctx, intr->src[0]), 0,
|
||||||
|
ir3_create_collect(ctx, coords, ncoords), 0);
|
||||||
|
ldib->regs[0]->wrmask = MASK(intr->num_components);
|
||||||
|
ldib->cat6.iim_val = intr->num_components;
|
||||||
|
ldib->cat6.d = ncoords;
|
||||||
|
ldib->cat6.type = ir3_get_type_for_image_intrinsic(intr);
|
||||||
|
ldib->cat6.typed = true;
|
||||||
|
ldib->barrier_class = IR3_BARRIER_IMAGE_R;
|
||||||
|
ldib->barrier_conflict = IR3_BARRIER_IMAGE_W;
|
||||||
|
handle_bindless_cat6(ldib, intr->src[0]);
|
||||||
|
|
||||||
|
ir3_split_dest(b, dst, ldib, 0, intr->num_components);
|
||||||
|
}
|
||||||
|
|
||||||
/* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
|
/* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
|
||||||
static void
|
static void
|
||||||
emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
||||||
@@ -212,14 +266,12 @@ emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
struct ir3_instruction * const *value = ir3_get_src(ctx, &intr->src[3]);
|
struct ir3_instruction * const *value = ir3_get_src(ctx, &intr->src[3]);
|
||||||
struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
|
struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
|
||||||
unsigned ncoords = ir3_get_image_coords(intr, NULL);
|
unsigned ncoords = ir3_get_image_coords(intr, NULL);
|
||||||
unsigned slot = nir_src_as_uint(intr->src[0]);
|
|
||||||
unsigned ibo_idx = ir3_image_to_ibo(ctx->so->shader, slot);
|
|
||||||
enum pipe_format format = nir_intrinsic_format(intr);
|
enum pipe_format format = nir_intrinsic_format(intr);
|
||||||
unsigned ncomp = ir3_get_num_components_for_image_format(format);
|
unsigned ncomp = ir3_get_num_components_for_image_format(format);
|
||||||
|
|
||||||
/* src0 is offset, src1 is value:
|
/* src0 is offset, src1 is value:
|
||||||
*/
|
*/
|
||||||
stib = ir3_STIB(b, create_immed(b, ibo_idx), 0,
|
stib = ir3_STIB(b, image_idx(ctx, intr->src[0]), 0,
|
||||||
ir3_create_collect(ctx, coords, ncoords), 0,
|
ir3_create_collect(ctx, coords, ncoords), 0,
|
||||||
ir3_create_collect(ctx, value, ncomp), 0);
|
ir3_create_collect(ctx, value, ncomp), 0);
|
||||||
stib->cat6.iim_val = ncomp;
|
stib->cat6.iim_val = ncomp;
|
||||||
@@ -228,6 +280,7 @@ emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
stib->cat6.typed = true;
|
stib->cat6.typed = true;
|
||||||
stib->barrier_class = IR3_BARRIER_IMAGE_W;
|
stib->barrier_class = IR3_BARRIER_IMAGE_W;
|
||||||
stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
|
stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
|
||||||
|
handle_bindless_cat6(stib, intr->src[0]);
|
||||||
|
|
||||||
array_insert(b, b->keeps, stib);
|
array_insert(b, b->keeps, stib);
|
||||||
}
|
}
|
||||||
@@ -241,10 +294,8 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
|
struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
|
||||||
struct ir3_instruction *value = ir3_get_src(ctx, &intr->src[3])[0];
|
struct ir3_instruction *value = ir3_get_src(ctx, &intr->src[3])[0];
|
||||||
unsigned ncoords = ir3_get_image_coords(intr, NULL);
|
unsigned ncoords = ir3_get_image_coords(intr, NULL);
|
||||||
unsigned slot = nir_src_as_uint(intr->src[0]);
|
|
||||||
unsigned ibo_idx = ir3_image_to_ibo(ctx->so->shader, slot);
|
|
||||||
|
|
||||||
ibo = create_immed(b, ibo_idx);
|
ibo = image_idx(ctx, intr->src[0]);
|
||||||
|
|
||||||
/* So this gets a bit creative:
|
/* So this gets a bit creative:
|
||||||
*
|
*
|
||||||
@@ -261,7 +312,8 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
dummy = create_immed(b, 0);
|
dummy = create_immed(b, 0);
|
||||||
src0 = ir3_create_collect(ctx, coords, ncoords);
|
src0 = ir3_create_collect(ctx, coords, ncoords);
|
||||||
|
|
||||||
if (intr->intrinsic == nir_intrinsic_image_atomic_comp_swap) {
|
if (intr->intrinsic == nir_intrinsic_image_atomic_comp_swap ||
|
||||||
|
intr->intrinsic == nir_intrinsic_bindless_image_atomic_comp_swap) {
|
||||||
struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[4])[0];
|
struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[4])[0];
|
||||||
src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
||||||
dummy, compare, value
|
dummy, compare, value
|
||||||
@@ -274,29 +326,39 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
|
|
||||||
switch (intr->intrinsic) {
|
switch (intr->intrinsic) {
|
||||||
case nir_intrinsic_image_atomic_add:
|
case nir_intrinsic_image_atomic_add:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_add:
|
||||||
atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_imin:
|
case nir_intrinsic_image_atomic_imin:
|
||||||
case nir_intrinsic_image_atomic_umin:
|
case nir_intrinsic_image_atomic_umin:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_imin:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_umin:
|
||||||
atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_imax:
|
case nir_intrinsic_image_atomic_imax:
|
||||||
case nir_intrinsic_image_atomic_umax:
|
case nir_intrinsic_image_atomic_umax:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_imax:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_umax:
|
||||||
atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_and:
|
case nir_intrinsic_image_atomic_and:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_and:
|
||||||
atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_or:
|
case nir_intrinsic_image_atomic_or:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_or:
|
||||||
atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_xor:
|
case nir_intrinsic_image_atomic_xor:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_xor:
|
||||||
atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_exchange:
|
case nir_intrinsic_image_atomic_exchange:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_exchange:
|
||||||
atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_comp_swap:
|
case nir_intrinsic_image_atomic_comp_swap:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_comp_swap:
|
||||||
atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0);
|
atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -309,6 +371,7 @@ emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
atomic->cat6.typed = true;
|
atomic->cat6.typed = true;
|
||||||
atomic->barrier_class = IR3_BARRIER_IMAGE_W;
|
atomic->barrier_class = IR3_BARRIER_IMAGE_W;
|
||||||
atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
|
atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
|
||||||
|
handle_bindless_cat6(atomic, intr->src[0]);
|
||||||
|
|
||||||
/* even if nothing consume the result, we can't DCE the instruction: */
|
/* even if nothing consume the result, we can't DCE the instruction: */
|
||||||
array_insert(b, b->keeps, atomic);
|
array_insert(b, b->keeps, atomic);
|
||||||
@@ -320,6 +383,7 @@ const struct ir3_context_funcs ir3_a6xx_funcs = {
|
|||||||
.emit_intrinsic_load_ssbo = emit_intrinsic_load_ssbo,
|
.emit_intrinsic_load_ssbo = emit_intrinsic_load_ssbo,
|
||||||
.emit_intrinsic_store_ssbo = emit_intrinsic_store_ssbo,
|
.emit_intrinsic_store_ssbo = emit_intrinsic_store_ssbo,
|
||||||
.emit_intrinsic_atomic_ssbo = emit_intrinsic_atomic_ssbo,
|
.emit_intrinsic_atomic_ssbo = emit_intrinsic_atomic_ssbo,
|
||||||
|
.emit_intrinsic_load_image = emit_intrinsic_load_image,
|
||||||
.emit_intrinsic_store_image = emit_intrinsic_store_image,
|
.emit_intrinsic_store_image = emit_intrinsic_store_image,
|
||||||
.emit_intrinsic_atomic_image = emit_intrinsic_atomic_image,
|
.emit_intrinsic_atomic_image = emit_intrinsic_atomic_image,
|
||||||
};
|
};
|
||||||
|
@@ -721,11 +721,44 @@ emit_alu(struct ir3_context *ctx, nir_alu_instr *alu)
|
|||||||
ir3_put_dst(ctx, &alu->dest.dest);
|
ir3_put_dst(ctx, &alu->dest.dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
emit_intrinsic_load_ubo_ldc(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
||||||
|
struct ir3_instruction **dst)
|
||||||
|
{
|
||||||
|
struct ir3_block *b = ctx->block;
|
||||||
|
|
||||||
|
unsigned ncomp = intr->num_components;
|
||||||
|
struct ir3_instruction *offset = ir3_get_src(ctx, &intr->src[1])[0];
|
||||||
|
struct ir3_instruction *idx = ir3_get_src(ctx, &intr->src[0])[0];
|
||||||
|
struct ir3_instruction *ldc = ir3_LDC(b, idx, 0, offset, 0);
|
||||||
|
ldc->regs[0]->wrmask = MASK(ncomp);
|
||||||
|
ldc->cat6.iim_val = intr->num_components;
|
||||||
|
ldc->cat6.d = 1;
|
||||||
|
ldc->cat6.type = TYPE_U32;
|
||||||
|
|
||||||
|
nir_intrinsic_instr *bindless = ir3_bindless_resource(intr->src[0]);
|
||||||
|
if (bindless) {
|
||||||
|
ldc->flags |= IR3_INSTR_B;
|
||||||
|
ldc->cat6.base = nir_intrinsic_desc_set(bindless);
|
||||||
|
ctx->so->bindless_ubo = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
ir3_split_dest(b, dst, ldc, 0, ncomp);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* handles direct/indirect UBO reads: */
|
/* handles direct/indirect UBO reads: */
|
||||||
static void
|
static void
|
||||||
emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
emit_intrinsic_load_ubo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
||||||
struct ir3_instruction **dst)
|
struct ir3_instruction **dst)
|
||||||
{
|
{
|
||||||
|
if (ir3_bindless_resource(intr->src[0])) {
|
||||||
|
/* TODO: We should be using ldc for non-bindless things on a6xx as
|
||||||
|
* well.
|
||||||
|
*/
|
||||||
|
emit_intrinsic_load_ubo_ldc(ctx, intr, dst);
|
||||||
|
return;
|
||||||
|
}
|
||||||
struct ir3_block *b = ctx->block;
|
struct ir3_block *b = ctx->block;
|
||||||
struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
|
struct ir3_instruction *base_lo, *base_hi, *addr, *src0, *src1;
|
||||||
/* UBO addresses are the first driver params, but subtract 2 here to
|
/* UBO addresses are the first driver params, but subtract 2 here to
|
||||||
@@ -1034,23 +1067,102 @@ emit_intrinsic_atomic_shared(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
return atomic;
|
return atomic;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct tex_src_info {
|
||||||
|
/* For prefetch */
|
||||||
|
unsigned tex_base, samp_base, tex_idx, samp_idx;
|
||||||
|
/* For normal tex instructions */
|
||||||
|
unsigned base, combined_idx, a1_val, flags;
|
||||||
|
struct ir3_instruction *samp_tex;
|
||||||
|
};
|
||||||
|
|
||||||
/* TODO handle actual indirect/dynamic case.. which is going to be weird
|
/* TODO handle actual indirect/dynamic case.. which is going to be weird
|
||||||
* to handle with the image_mapping table..
|
* to handle with the image_mapping table..
|
||||||
*/
|
*/
|
||||||
static struct ir3_instruction *
|
static struct tex_src_info
|
||||||
get_image_samp_tex_src(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
get_image_samp_tex_src(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
||||||
{
|
{
|
||||||
unsigned slot = nir_src_as_uint(intr->src[0]);
|
struct ir3_block *b = ctx->block;
|
||||||
unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
|
struct tex_src_info info = { 0 };
|
||||||
struct ir3_instruction *texture, *sampler;
|
nir_intrinsic_instr *bindless_tex = ir3_bindless_resource(intr->src[0]);
|
||||||
|
ctx->so->bindless_tex = true;
|
||||||
|
|
||||||
texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
|
if (bindless_tex) {
|
||||||
sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
|
/* Bindless case */
|
||||||
|
info.flags |= IR3_INSTR_B;
|
||||||
|
|
||||||
return ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
/* Gather information required to determine which encoding to
|
||||||
sampler,
|
* choose as well as for prefetch.
|
||||||
texture,
|
*/
|
||||||
}, 2);
|
info.tex_base = nir_intrinsic_desc_set(bindless_tex);
|
||||||
|
bool tex_const = nir_src_is_const(bindless_tex->src[0]);
|
||||||
|
if (tex_const)
|
||||||
|
info.tex_idx = nir_src_as_uint(bindless_tex->src[0]);
|
||||||
|
info.samp_idx = 0;
|
||||||
|
|
||||||
|
/* Choose encoding. */
|
||||||
|
if (tex_const && info.tex_idx < 256) {
|
||||||
|
if (info.tex_idx < 16) {
|
||||||
|
/* Everything fits within the instruction */
|
||||||
|
info.base = info.tex_base;
|
||||||
|
info.combined_idx = info.samp_idx | (info.tex_idx << 4);
|
||||||
|
} else {
|
||||||
|
info.base = info.tex_base;
|
||||||
|
info.a1_val = info.tex_idx << 3;
|
||||||
|
info.combined_idx = 0;
|
||||||
|
info.flags |= IR3_INSTR_A1EN;
|
||||||
|
}
|
||||||
|
info.samp_tex = NULL;
|
||||||
|
} else {
|
||||||
|
info.flags |= IR3_INSTR_S2EN;
|
||||||
|
info.base = info.tex_base;
|
||||||
|
|
||||||
|
/* Note: the indirect source is now a vec2 instead of hvec2 */
|
||||||
|
struct ir3_instruction *texture, *sampler;
|
||||||
|
|
||||||
|
texture = ir3_get_src(ctx, &intr->src[0])[0];
|
||||||
|
sampler = create_immed(b, 0);
|
||||||
|
info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
||||||
|
texture,
|
||||||
|
sampler,
|
||||||
|
}, 2);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info.flags |= IR3_INSTR_S2EN;
|
||||||
|
unsigned slot = nir_src_as_uint(intr->src[0]);
|
||||||
|
unsigned tex_idx = ir3_image_to_tex(&ctx->so->image_mapping, slot);
|
||||||
|
struct ir3_instruction *texture, *sampler;
|
||||||
|
|
||||||
|
texture = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
|
||||||
|
sampler = create_immed_typed(ctx->block, tex_idx, TYPE_U16);
|
||||||
|
|
||||||
|
info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
||||||
|
sampler,
|
||||||
|
texture,
|
||||||
|
}, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct ir3_instruction *
|
||||||
|
emit_sam(struct ir3_context *ctx, opc_t opc, struct tex_src_info info,
|
||||||
|
type_t type, unsigned wrmask, struct ir3_instruction *src0,
|
||||||
|
struct ir3_instruction *src1)
|
||||||
|
{
|
||||||
|
struct ir3_instruction *sam, *addr;
|
||||||
|
if (info.flags & IR3_INSTR_A1EN) {
|
||||||
|
addr = ir3_get_addr1(ctx, info.a1_val);
|
||||||
|
}
|
||||||
|
sam = ir3_SAM(ctx->block, opc, type, 0b1111, info.flags,
|
||||||
|
info.samp_tex, src0, src1);
|
||||||
|
if (info.flags & IR3_INSTR_A1EN) {
|
||||||
|
ir3_instr_set_address(sam, addr);
|
||||||
|
}
|
||||||
|
if (info.flags & IR3_INSTR_B) {
|
||||||
|
sam->cat5.tex_base = info.base;
|
||||||
|
sam->cat5.samp = info.combined_idx;
|
||||||
|
}
|
||||||
|
return sam;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* src[] = { deref, coord, sample_index }. const_index[] = {} */
|
/* src[] = { deref, coord, sample_index }. const_index[] = {} */
|
||||||
@@ -1059,7 +1171,7 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
|||||||
struct ir3_instruction **dst)
|
struct ir3_instruction **dst)
|
||||||
{
|
{
|
||||||
struct ir3_block *b = ctx->block;
|
struct ir3_block *b = ctx->block;
|
||||||
struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
|
struct tex_src_info info = get_image_samp_tex_src(ctx, intr);
|
||||||
struct ir3_instruction *sam;
|
struct ir3_instruction *sam;
|
||||||
struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
|
struct ir3_instruction * const *src0 = ir3_get_src(ctx, &intr->src[1]);
|
||||||
struct ir3_instruction *coords[4];
|
struct ir3_instruction *coords[4];
|
||||||
@@ -1073,6 +1185,7 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
|||||||
flags &= ~IR3_INSTR_3D;
|
flags &= ~IR3_INSTR_3D;
|
||||||
flags |= IR3_INSTR_A;
|
flags |= IR3_INSTR_A;
|
||||||
}
|
}
|
||||||
|
info.flags |= flags;
|
||||||
|
|
||||||
for (unsigned i = 0; i < ncoords; i++)
|
for (unsigned i = 0; i < ncoords; i++)
|
||||||
coords[i] = src0[i];
|
coords[i] = src0[i];
|
||||||
@@ -1080,8 +1193,8 @@ emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
|||||||
if (ncoords == 1)
|
if (ncoords == 1)
|
||||||
coords[ncoords++] = create_immed(b, 0);
|
coords[ncoords++] = create_immed(b, 0);
|
||||||
|
|
||||||
sam = ir3_SAM(b, OPC_ISAM, type, 0b1111, flags,
|
sam = emit_sam(ctx, OPC_ISAM, info, type, 0b1111,
|
||||||
samp_tex, ir3_create_collect(ctx, coords, ncoords), NULL);
|
ir3_create_collect(ctx, coords, ncoords), NULL);
|
||||||
|
|
||||||
sam->barrier_class = IR3_BARRIER_IMAGE_R;
|
sam->barrier_class = IR3_BARRIER_IMAGE_R;
|
||||||
sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
|
sam->barrier_conflict = IR3_BARRIER_IMAGE_W;
|
||||||
@@ -1094,15 +1207,15 @@ emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
|||||||
struct ir3_instruction **dst)
|
struct ir3_instruction **dst)
|
||||||
{
|
{
|
||||||
struct ir3_block *b = ctx->block;
|
struct ir3_block *b = ctx->block;
|
||||||
struct ir3_instruction *samp_tex = get_image_samp_tex_src(ctx, intr);
|
struct tex_src_info info = get_image_samp_tex_src(ctx, intr);
|
||||||
struct ir3_instruction *sam, *lod;
|
struct ir3_instruction *sam, *lod;
|
||||||
unsigned flags, ncoords = ir3_get_image_coords(intr, &flags);
|
unsigned flags, ncoords = ir3_get_image_coords(intr, &flags);
|
||||||
type_t dst_type = nir_dest_bit_size(intr->dest) < 32 ?
|
type_t dst_type = nir_dest_bit_size(intr->dest) < 32 ?
|
||||||
TYPE_U16 : TYPE_U32;
|
TYPE_U16 : TYPE_U32;
|
||||||
|
|
||||||
|
info.flags |= flags;
|
||||||
lod = create_immed(b, 0);
|
lod = create_immed(b, 0);
|
||||||
sam = ir3_SAM(b, OPC_GETSIZE, dst_type, 0b1111, flags,
|
sam = emit_sam(ctx, OPC_GETSIZE, info, dst_type, 0b1111, lod, NULL);
|
||||||
samp_tex, lod, NULL);
|
|
||||||
|
|
||||||
/* Array size actually ends up in .w rather than .z. This doesn't
|
/* Array size actually ends up in .w rather than .z. This doesn't
|
||||||
* matter for miplevel 0, but for higher mips the value in z is
|
* matter for miplevel 0, but for higher mips the value in z is
|
||||||
@@ -1618,25 +1731,45 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
case nir_intrinsic_image_load:
|
case nir_intrinsic_image_load:
|
||||||
emit_intrinsic_load_image(ctx, intr, dst);
|
emit_intrinsic_load_image(ctx, intr, dst);
|
||||||
break;
|
break;
|
||||||
|
case nir_intrinsic_bindless_image_load:
|
||||||
|
/* Bindless uses the IBO state, which doesn't have swizzle filled out,
|
||||||
|
* so using isam doesn't work.
|
||||||
|
*
|
||||||
|
* TODO: can we use isam if we fill out more fields?
|
||||||
|
*/
|
||||||
|
ctx->funcs->emit_intrinsic_load_image(ctx, intr, dst);
|
||||||
|
break;
|
||||||
case nir_intrinsic_image_store:
|
case nir_intrinsic_image_store:
|
||||||
|
case nir_intrinsic_bindless_image_store:
|
||||||
if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
|
if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
|
||||||
!ctx->s->info.fs.early_fragment_tests)
|
!ctx->s->info.fs.early_fragment_tests)
|
||||||
ctx->so->no_earlyz = true;
|
ctx->so->no_earlyz = true;
|
||||||
ctx->funcs->emit_intrinsic_store_image(ctx, intr);
|
ctx->funcs->emit_intrinsic_store_image(ctx, intr);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_size:
|
case nir_intrinsic_image_size:
|
||||||
|
case nir_intrinsic_bindless_image_size:
|
||||||
emit_intrinsic_image_size(ctx, intr, dst);
|
emit_intrinsic_image_size(ctx, intr, dst);
|
||||||
break;
|
break;
|
||||||
case nir_intrinsic_image_atomic_add:
|
case nir_intrinsic_image_atomic_add:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_add:
|
||||||
case nir_intrinsic_image_atomic_imin:
|
case nir_intrinsic_image_atomic_imin:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_imin:
|
||||||
case nir_intrinsic_image_atomic_umin:
|
case nir_intrinsic_image_atomic_umin:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_umin:
|
||||||
case nir_intrinsic_image_atomic_imax:
|
case nir_intrinsic_image_atomic_imax:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_imax:
|
||||||
case nir_intrinsic_image_atomic_umax:
|
case nir_intrinsic_image_atomic_umax:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_umax:
|
||||||
case nir_intrinsic_image_atomic_and:
|
case nir_intrinsic_image_atomic_and:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_and:
|
||||||
case nir_intrinsic_image_atomic_or:
|
case nir_intrinsic_image_atomic_or:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_or:
|
||||||
case nir_intrinsic_image_atomic_xor:
|
case nir_intrinsic_image_atomic_xor:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_xor:
|
||||||
case nir_intrinsic_image_atomic_exchange:
|
case nir_intrinsic_image_atomic_exchange:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_exchange:
|
||||||
case nir_intrinsic_image_atomic_comp_swap:
|
case nir_intrinsic_image_atomic_comp_swap:
|
||||||
|
case nir_intrinsic_bindless_image_atomic_comp_swap:
|
||||||
if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
|
if ((ctx->so->type == MESA_SHADER_FRAGMENT) &&
|
||||||
!ctx->s->info.fs.early_fragment_tests)
|
!ctx->s->info.fs.early_fragment_tests)
|
||||||
ctx->so->no_earlyz = true;
|
ctx->so->no_earlyz = true;
|
||||||
@@ -1812,6 +1945,9 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
|
|||||||
case nir_intrinsic_store_shared_ir3:
|
case nir_intrinsic_store_shared_ir3:
|
||||||
emit_intrinsic_store_shared_ir3(ctx, intr);
|
emit_intrinsic_store_shared_ir3(ctx, intr);
|
||||||
break;
|
break;
|
||||||
|
case nir_intrinsic_bindless_resource_ir3:
|
||||||
|
dst[0] = ir3_get_src(ctx, &intr->src[0])[0];
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
|
ir3_context_error(ctx, "Unhandled intrinsic type: %s\n",
|
||||||
nir_intrinsic_infos[intr->intrinsic].name);
|
nir_intrinsic_infos[intr->intrinsic].name);
|
||||||
@@ -1911,37 +2047,135 @@ tex_info(nir_tex_instr *tex, unsigned *flagsp, unsigned *coordsp)
|
|||||||
* or immediate (in which case it will get lowered later to a non .s2en
|
* or immediate (in which case it will get lowered later to a non .s2en
|
||||||
* version of the tex instruction which encode tex/samp as immediates:
|
* version of the tex instruction which encode tex/samp as immediates:
|
||||||
*/
|
*/
|
||||||
static struct ir3_instruction *
|
static struct tex_src_info
|
||||||
get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex)
|
get_tex_samp_tex_src(struct ir3_context *ctx, nir_tex_instr *tex)
|
||||||
{
|
{
|
||||||
int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
|
struct ir3_block *b = ctx->block;
|
||||||
int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
|
struct tex_src_info info = { 0 };
|
||||||
|
int texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
|
||||||
|
int sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle);
|
||||||
struct ir3_instruction *texture, *sampler;
|
struct ir3_instruction *texture, *sampler;
|
||||||
|
|
||||||
if (texture_idx >= 0) {
|
if (texture_idx >= 0 || sampler_idx >= 0) {
|
||||||
texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
|
/* Bindless case */
|
||||||
texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16);
|
info.flags |= IR3_INSTR_B;
|
||||||
} else {
|
|
||||||
/* TODO what to do for dynamic case? I guess we only need the
|
/* Gather information required to determine which encoding to
|
||||||
* max index for astc srgb workaround so maybe not a problem
|
* choose as well as for prefetch.
|
||||||
* to worry about if we don't enable indirect samplers for
|
|
||||||
* a4xx?
|
|
||||||
*/
|
*/
|
||||||
ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index);
|
nir_intrinsic_instr *bindless_tex = NULL;
|
||||||
texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16);
|
bool tex_const;
|
||||||
}
|
if (texture_idx >= 0) {
|
||||||
|
ctx->so->bindless_tex = true;
|
||||||
|
bindless_tex = ir3_bindless_resource(tex->src[texture_idx].src);
|
||||||
|
assert(bindless_tex);
|
||||||
|
info.tex_base = nir_intrinsic_desc_set(bindless_tex);
|
||||||
|
tex_const = nir_src_is_const(bindless_tex->src[0]);
|
||||||
|
if (tex_const)
|
||||||
|
info.tex_idx = nir_src_as_uint(bindless_tex->src[0]);
|
||||||
|
} else {
|
||||||
|
/* To simplify some of the logic below, assume the index is
|
||||||
|
* constant 0 when it's not enabled.
|
||||||
|
*/
|
||||||
|
tex_const = true;
|
||||||
|
info.tex_idx = 0;
|
||||||
|
}
|
||||||
|
nir_intrinsic_instr *bindless_samp = NULL;
|
||||||
|
bool samp_const;
|
||||||
|
if (sampler_idx >= 0) {
|
||||||
|
ctx->so->bindless_samp = true;
|
||||||
|
bindless_samp = ir3_bindless_resource(tex->src[sampler_idx].src);
|
||||||
|
assert(bindless_samp);
|
||||||
|
info.samp_base = nir_intrinsic_desc_set(bindless_samp);
|
||||||
|
samp_const = nir_src_is_const(bindless_samp->src[0]);
|
||||||
|
if (samp_const)
|
||||||
|
info.samp_idx = nir_src_as_uint(bindless_samp->src[0]);
|
||||||
|
} else {
|
||||||
|
samp_const = true;
|
||||||
|
info.samp_idx = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (sampler_idx >= 0) {
|
/* Choose encoding. */
|
||||||
sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
|
if (tex_const && samp_const && info.tex_idx < 256 && info.samp_idx < 256) {
|
||||||
sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16);
|
if (info.tex_idx < 16 && info.samp_idx < 16 &&
|
||||||
|
(!bindless_tex || !bindless_samp || info.tex_base == info.samp_base)) {
|
||||||
|
/* Everything fits within the instruction */
|
||||||
|
info.base = info.tex_base;
|
||||||
|
info.combined_idx = info.samp_idx | (info.tex_idx << 4);
|
||||||
|
} else {
|
||||||
|
info.base = info.tex_base;
|
||||||
|
info.a1_val = info.tex_idx << 3 | info.samp_base;
|
||||||
|
info.combined_idx = info.samp_idx;
|
||||||
|
info.flags |= IR3_INSTR_A1EN;
|
||||||
|
}
|
||||||
|
info.samp_tex = NULL;
|
||||||
|
} else {
|
||||||
|
info.flags |= IR3_INSTR_S2EN;
|
||||||
|
/* In the indirect case, we only use a1.x to store the sampler
|
||||||
|
* base if it differs from the texture base.
|
||||||
|
*/
|
||||||
|
if (!bindless_tex || !bindless_samp || info.tex_base == info.samp_base) {
|
||||||
|
info.base = info.tex_base;
|
||||||
|
} else {
|
||||||
|
info.base = info.tex_base;
|
||||||
|
info.a1_val = info.samp_base;
|
||||||
|
info.flags |= IR3_INSTR_A1EN;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Note: the indirect source is now a vec2 instead of hvec2, and
|
||||||
|
* for some reason the texture and sampler are swapped.
|
||||||
|
*/
|
||||||
|
struct ir3_instruction *texture, *sampler;
|
||||||
|
|
||||||
|
if (bindless_tex) {
|
||||||
|
texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
|
||||||
|
} else {
|
||||||
|
texture = create_immed(b, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bindless_samp) {
|
||||||
|
sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
|
||||||
|
} else {
|
||||||
|
sampler = create_immed(b, 0);
|
||||||
|
}
|
||||||
|
info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
||||||
|
texture,
|
||||||
|
sampler,
|
||||||
|
}, 2);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16);
|
info.flags |= IR3_INSTR_S2EN;
|
||||||
}
|
texture_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_offset);
|
||||||
|
sampler_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_offset);
|
||||||
|
if (texture_idx >= 0) {
|
||||||
|
texture = ir3_get_src(ctx, &tex->src[texture_idx].src)[0];
|
||||||
|
texture = ir3_COV(ctx->block, texture, TYPE_U32, TYPE_U16);
|
||||||
|
} else {
|
||||||
|
/* TODO what to do for dynamic case? I guess we only need the
|
||||||
|
* max index for astc srgb workaround so maybe not a problem
|
||||||
|
* to worry about if we don't enable indirect samplers for
|
||||||
|
* a4xx?
|
||||||
|
*/
|
||||||
|
ctx->max_texture_index = MAX2(ctx->max_texture_index, tex->texture_index);
|
||||||
|
texture = create_immed_typed(ctx->block, tex->texture_index, TYPE_U16);
|
||||||
|
info.tex_idx = tex->texture_index;
|
||||||
|
}
|
||||||
|
|
||||||
return ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
if (sampler_idx >= 0) {
|
||||||
sampler,
|
sampler = ir3_get_src(ctx, &tex->src[sampler_idx].src)[0];
|
||||||
texture,
|
sampler = ir3_COV(ctx->block, sampler, TYPE_U32, TYPE_U16);
|
||||||
}, 2);
|
} else {
|
||||||
|
sampler = create_immed_typed(ctx->block, tex->sampler_index, TYPE_U16);
|
||||||
|
info.samp_idx = tex->texture_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
||||||
|
sampler,
|
||||||
|
texture,
|
||||||
|
}, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
return info;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@@ -1951,6 +2185,7 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
|
struct ir3_instruction **dst, *sam, *src0[12], *src1[4];
|
||||||
struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
|
struct ir3_instruction * const *coord, * const *off, * const *ddx, * const *ddy;
|
||||||
struct ir3_instruction *lod, *compare, *proj, *sample_index;
|
struct ir3_instruction *lod, *compare, *proj, *sample_index;
|
||||||
|
struct tex_src_info info = { 0 };
|
||||||
bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
|
bool has_bias = false, has_lod = false, has_proj = false, has_off = false;
|
||||||
unsigned i, coords, flags, ncomp;
|
unsigned i, coords, flags, ncomp;
|
||||||
unsigned nsrc0 = 0, nsrc1 = 0;
|
unsigned nsrc0 = 0, nsrc1 = 0;
|
||||||
@@ -1999,6 +2234,8 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
break;
|
break;
|
||||||
case nir_tex_src_texture_offset:
|
case nir_tex_src_texture_offset:
|
||||||
case nir_tex_src_sampler_offset:
|
case nir_tex_src_sampler_offset:
|
||||||
|
case nir_tex_src_texture_handle:
|
||||||
|
case nir_tex_src_sampler_handle:
|
||||||
/* handled in get_tex_samp_src() */
|
/* handled in get_tex_samp_src() */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -2169,7 +2406,6 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
if (opc == OPC_GETLOD)
|
if (opc == OPC_GETLOD)
|
||||||
type = TYPE_S32;
|
type = TYPE_S32;
|
||||||
|
|
||||||
struct ir3_instruction *samp_tex;
|
|
||||||
|
|
||||||
if (tex->op == nir_texop_txf_ms_fb) {
|
if (tex->op == nir_texop_txf_ms_fb) {
|
||||||
/* only expect a single txf_ms_fb per shader: */
|
/* only expect a single txf_ms_fb per shader: */
|
||||||
@@ -2177,14 +2413,15 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT);
|
compile_assert(ctx, ctx->so->type == MESA_SHADER_FRAGMENT);
|
||||||
|
|
||||||
ctx->so->fb_read = true;
|
ctx->so->fb_read = true;
|
||||||
samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
info.samp_tex = ir3_create_collect(ctx, (struct ir3_instruction*[]){
|
||||||
create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
|
create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
|
||||||
create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
|
create_immed_typed(ctx->block, ctx->so->num_samp, TYPE_U16),
|
||||||
}, 2);
|
}, 2);
|
||||||
|
info.flags = IR3_INSTR_S2EN;
|
||||||
|
|
||||||
ctx->so->num_samp++;
|
ctx->so->num_samp++;
|
||||||
} else {
|
} else {
|
||||||
samp_tex = get_tex_samp_tex_src(ctx, tex);
|
info = get_tex_samp_tex_src(ctx, tex);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
|
struct ir3_instruction *col0 = ir3_create_collect(ctx, src0, nsrc0);
|
||||||
@@ -2200,11 +2437,15 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
__ssa_src(sam, get_barycentric_pixel(ctx), 0);
|
__ssa_src(sam, get_barycentric_pixel(ctx), 0);
|
||||||
sam->prefetch.input_offset =
|
sam->prefetch.input_offset =
|
||||||
ir3_nir_coord_offset(tex->src[idx].src.ssa);
|
ir3_nir_coord_offset(tex->src[idx].src.ssa);
|
||||||
sam->prefetch.tex = tex->texture_index;
|
/* make sure not to add irrelevant flags like S2EN */
|
||||||
sam->prefetch.samp = tex->sampler_index;
|
sam->flags = flags | (info.flags & IR3_INSTR_B);
|
||||||
|
sam->prefetch.tex = info.tex_idx;
|
||||||
|
sam->prefetch.samp = info.samp_idx;
|
||||||
|
sam->prefetch.tex_base = info.tex_base;
|
||||||
|
sam->prefetch.samp_base = info.samp_base;
|
||||||
} else {
|
} else {
|
||||||
sam = ir3_SAM(b, opc, type, MASK(ncomp), flags,
|
info.flags |= flags;
|
||||||
samp_tex, col0, col1);
|
sam = emit_sam(ctx, opc, info, type, MASK(ncomp), col0, col1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) {
|
if ((ctx->astc_srgb & (1 << tex->texture_index)) && !nir_tex_instr_is_query(tex)) {
|
||||||
@@ -2217,8 +2458,8 @@ emit_tex(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
/* we need to sample the alpha separately with a non-ASTC
|
/* we need to sample the alpha separately with a non-ASTC
|
||||||
* texture state:
|
* texture state:
|
||||||
*/
|
*/
|
||||||
sam = ir3_SAM(b, opc, type, 0b1000, flags,
|
sam = ir3_SAM(b, opc, type, 0b1000, flags | info.flags,
|
||||||
samp_tex, col0, col1);
|
info.samp_tex, col0, col1);
|
||||||
|
|
||||||
array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
|
array_insert(ctx->ir, ctx->ir->astc_srgb, sam);
|
||||||
|
|
||||||
@@ -2249,11 +2490,11 @@ emit_tex_info(struct ir3_context *ctx, nir_tex_instr *tex, unsigned idx)
|
|||||||
struct ir3_block *b = ctx->block;
|
struct ir3_block *b = ctx->block;
|
||||||
struct ir3_instruction **dst, *sam;
|
struct ir3_instruction **dst, *sam;
|
||||||
type_t dst_type = get_tex_dest_type(tex);
|
type_t dst_type = get_tex_dest_type(tex);
|
||||||
|
struct tex_src_info info = get_tex_samp_tex_src(ctx, tex);
|
||||||
|
|
||||||
dst = ir3_get_dst(ctx, &tex->dest, 1);
|
dst = ir3_get_dst(ctx, &tex->dest, 1);
|
||||||
|
|
||||||
sam = ir3_SAM(b, OPC_GETINFO, dst_type, 1 << idx, 0,
|
sam = emit_sam(ctx, OPC_GETINFO, info, dst_type, 1 << idx, NULL, NULL);
|
||||||
get_tex_samp_tex_src(ctx, tex), NULL, NULL);
|
|
||||||
|
|
||||||
/* even though there is only one component, since it ends
|
/* even though there is only one component, since it ends
|
||||||
* up in .y/.z/.w rather than .x, we need a split_dest()
|
* up in .y/.z/.w rather than .x, we need a split_dest()
|
||||||
@@ -2278,8 +2519,10 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
struct ir3_instruction *lod;
|
struct ir3_instruction *lod;
|
||||||
unsigned flags, coords;
|
unsigned flags, coords;
|
||||||
type_t dst_type = get_tex_dest_type(tex);
|
type_t dst_type = get_tex_dest_type(tex);
|
||||||
|
struct tex_src_info info = get_tex_samp_tex_src(ctx, tex);
|
||||||
|
|
||||||
tex_info(tex, &flags, &coords);
|
tex_info(tex, &flags, &coords);
|
||||||
|
info.flags |= flags;
|
||||||
|
|
||||||
/* Actually we want the number of dimensions, not coordinates. This
|
/* Actually we want the number of dimensions, not coordinates. This
|
||||||
* distinction only matters for cubes.
|
* distinction only matters for cubes.
|
||||||
@@ -2294,9 +2537,7 @@ emit_tex_txs(struct ir3_context *ctx, nir_tex_instr *tex)
|
|||||||
|
|
||||||
lod = ir3_get_src(ctx, &tex->src[0].src)[0];
|
lod = ir3_get_src(ctx, &tex->src[0].src)[0];
|
||||||
|
|
||||||
sam = ir3_SAM(b, OPC_GETSIZE, dst_type, 0b1111, flags,
|
sam = emit_sam(ctx, OPC_GETSIZE, info, dst_type, 0b1111, lod, NULL);
|
||||||
get_tex_samp_tex_src(ctx, tex), lod, NULL);
|
|
||||||
|
|
||||||
ir3_split_dest(b, dst, sam, 0, 4);
|
ir3_split_dest(b, dst, sam, 0, 4);
|
||||||
|
|
||||||
/* Array size actually ends up in .w rather than .z. This doesn't
|
/* Array size actually ends up in .w rather than .z. This doesn't
|
||||||
@@ -3192,10 +3433,19 @@ collect_tex_prefetches(struct ir3_context *ctx, struct ir3 *ir)
|
|||||||
&ctx->so->sampler_prefetch[idx];
|
&ctx->so->sampler_prefetch[idx];
|
||||||
idx++;
|
idx++;
|
||||||
|
|
||||||
fetch->cmd = IR3_SAMPLER_PREFETCH_CMD;
|
if (instr->flags & IR3_INSTR_B) {
|
||||||
|
fetch->cmd = IR3_SAMPLER_BINDLESS_PREFETCH_CMD;
|
||||||
|
/* In bindless mode, the index is actually the base */
|
||||||
|
fetch->tex_id = instr->prefetch.tex_base;
|
||||||
|
fetch->samp_id = instr->prefetch.samp_base;
|
||||||
|
fetch->tex_bindless_id = instr->prefetch.tex;
|
||||||
|
fetch->samp_bindless_id = instr->prefetch.samp;
|
||||||
|
} else {
|
||||||
|
fetch->cmd = IR3_SAMPLER_PREFETCH_CMD;
|
||||||
|
fetch->tex_id = instr->prefetch.tex;
|
||||||
|
fetch->samp_id = instr->prefetch.samp;
|
||||||
|
}
|
||||||
fetch->wrmask = instr->regs[0]->wrmask;
|
fetch->wrmask = instr->regs[0]->wrmask;
|
||||||
fetch->tex_id = instr->prefetch.tex;
|
|
||||||
fetch->samp_id = instr->prefetch.samp;
|
|
||||||
fetch->dst = instr->regs[0]->num;
|
fetch->dst = instr->regs[0]->num;
|
||||||
fetch->src = instr->prefetch.input_offset;
|
fetch->src = instr->prefetch.input_offset;
|
||||||
|
|
||||||
|
@@ -155,6 +155,8 @@ struct ir3_context_funcs {
|
|||||||
struct ir3_instruction **dst);
|
struct ir3_instruction **dst);
|
||||||
void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
void (*emit_intrinsic_store_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
||||||
struct ir3_instruction * (*emit_intrinsic_atomic_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
struct ir3_instruction * (*emit_intrinsic_atomic_ssbo)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
||||||
|
void (*emit_intrinsic_load_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr,
|
||||||
|
struct ir3_instruction **dst);
|
||||||
void (*emit_intrinsic_store_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
void (*emit_intrinsic_store_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
||||||
struct ir3_instruction * (*emit_intrinsic_atomic_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
struct ir3_instruction * (*emit_intrinsic_atomic_image)(struct ir3_context *ctx, nir_intrinsic_instr *intr);
|
||||||
};
|
};
|
||||||
|
@@ -713,12 +713,14 @@ instr_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle converting a sam.s2en (taking samp/tex idx params via
|
/* Handle converting a sam.s2en (taking samp/tex idx params via register)
|
||||||
* register) into a normal sam (encoding immediate samp/tex idx)
|
* into a normal sam (encoding immediate samp/tex idx) if they are
|
||||||
* if they are immediate. This saves some instructions and regs
|
* immediate. This saves some instructions and regs in the common case
|
||||||
* in the common case where we know samp/tex at compile time:
|
* where we know samp/tex at compile time. This needs to be done in the
|
||||||
|
* frontend for bindless tex, though, so don't replicate it here.
|
||||||
*/
|
*/
|
||||||
if (is_tex(instr) && (instr->flags & IR3_INSTR_S2EN) &&
|
if (is_tex(instr) && (instr->flags & IR3_INSTR_S2EN) &&
|
||||||
|
!(instr->flags & IR3_INSTR_B) &&
|
||||||
!(ir3_shader_debug & IR3_DBG_FORCES2EN)) {
|
!(ir3_shader_debug & IR3_DBG_FORCES2EN)) {
|
||||||
/* The first src will be a collect, if both of it's
|
/* The first src will be a collect, if both of it's
|
||||||
* two sources are mov from imm, then we can
|
* two sources are mov from imm, then we can
|
||||||
|
@@ -64,4 +64,20 @@ uint32_t ir3_link_geometry_stages(const struct ir3_shader_variant *producer,
|
|||||||
const struct ir3_shader_variant *consumer,
|
const struct ir3_shader_variant *consumer,
|
||||||
uint32_t *locs);
|
uint32_t *locs);
|
||||||
|
|
||||||
|
static inline nir_intrinsic_instr *
|
||||||
|
ir3_bindless_resource(nir_src src)
|
||||||
|
{
|
||||||
|
if (!src.is_ssa)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (src.ssa->parent_instr->type != nir_instr_type_intrinsic)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
|
||||||
|
if (intrin->intrinsic != nir_intrinsic_bindless_resource_ir3)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return intrin;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* IR3_NIR_H_ */
|
#endif /* IR3_NIR_H_ */
|
||||||
|
@@ -135,11 +135,30 @@ lower_tex_prefetch_block(nir_block *block)
|
|||||||
has_src(tex, nir_tex_src_sampler_offset))
|
has_src(tex, nir_tex_src_sampler_offset))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/* Disallow indirect or large bindless handles */
|
||||||
|
int idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
|
||||||
|
if (idx >= 0) {
|
||||||
|
nir_intrinsic_instr *bindless =
|
||||||
|
ir3_bindless_resource(tex->src[idx].src);
|
||||||
|
if (!nir_src_is_const(bindless->src[0]) ||
|
||||||
|
nir_src_as_uint(bindless->src[0]) >= (1 << 16))
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_handle);
|
||||||
|
if (idx >= 0) {
|
||||||
|
nir_intrinsic_instr *bindless =
|
||||||
|
ir3_bindless_resource(tex->src[idx].src);
|
||||||
|
if (!nir_src_is_const(bindless->src[0]) ||
|
||||||
|
nir_src_as_uint(bindless->src[0]) >= (1 << 16))
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/* only prefetch for simple 2d tex fetch case */
|
/* only prefetch for simple 2d tex fetch case */
|
||||||
if (tex->sampler_dim != GLSL_SAMPLER_DIM_2D || tex->is_array)
|
if (tex->sampler_dim != GLSL_SAMPLER_DIM_2D || tex->is_array)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
|
idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
|
||||||
/* First source should be the sampling coordinate. */
|
/* First source should be the sampling coordinate. */
|
||||||
nir_tex_src *coord = &tex->src[idx];
|
nir_tex_src *coord = &tex->src[idx];
|
||||||
debug_assert(coord->src.is_ssa);
|
debug_assert(coord->src.is_ssa);
|
||||||
|
@@ -122,6 +122,12 @@ static void print_instr_name(struct ir3_instruction *instr, bool flags)
|
|||||||
printf(".p");
|
printf(".p");
|
||||||
if (instr->flags & IR3_INSTR_S)
|
if (instr->flags & IR3_INSTR_S)
|
||||||
printf(".s");
|
printf(".s");
|
||||||
|
if (instr->flags & IR3_INSTR_A1EN)
|
||||||
|
printf(".a1en");
|
||||||
|
if (instr->flags & IR3_INSTR_B) {
|
||||||
|
printf(".base%d",
|
||||||
|
is_tex(instr) ? instr->cat5.tex_base : instr->cat6.base);
|
||||||
|
}
|
||||||
if (instr->flags & IR3_INSTR_S2EN)
|
if (instr->flags & IR3_INSTR_S2EN)
|
||||||
printf(".s2en");
|
printf(".s2en");
|
||||||
}
|
}
|
||||||
@@ -210,8 +216,18 @@ print_instr(struct ir3_instruction *instr, int lvl)
|
|||||||
print_reg_name(reg);
|
print_reg_name(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_tex(instr) && !(instr->flags & IR3_INSTR_S2EN))
|
if (is_tex(instr) && !(instr->flags & IR3_INSTR_S2EN)) {
|
||||||
printf(", s#%d, t#%d", instr->cat5.samp, instr->cat5.tex);
|
if (!!(instr->flags & IR3_INSTR_B)) {
|
||||||
|
if (!!(instr->flags & IR3_INSTR_A1EN)) {
|
||||||
|
printf(", s#%d", instr->cat5.samp);
|
||||||
|
} else {
|
||||||
|
printf(", s#%d, t#%d", instr->cat5.samp & 0xf,
|
||||||
|
instr->cat5.samp >> 4);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
printf(", s#%d, t#%d", instr->cat5.samp, instr->cat5.tex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (instr->address) {
|
if (instr->address) {
|
||||||
printf(", address=_");
|
printf(", address=_");
|
||||||
|
@@ -204,6 +204,7 @@ struct ir3_stream_output_info {
|
|||||||
* encode the return type (in 3 bits) but it hasn't been verified yet.
|
* encode the return type (in 3 bits) but it hasn't been verified yet.
|
||||||
*/
|
*/
|
||||||
#define IR3_SAMPLER_PREFETCH_CMD 0x4
|
#define IR3_SAMPLER_PREFETCH_CMD 0x4
|
||||||
|
#define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stream output for texture sampling pre-dispatches.
|
* Stream output for texture sampling pre-dispatches.
|
||||||
@@ -212,6 +213,8 @@ struct ir3_sampler_prefetch {
|
|||||||
uint8_t src;
|
uint8_t src;
|
||||||
uint8_t samp_id;
|
uint8_t samp_id;
|
||||||
uint8_t tex_id;
|
uint8_t tex_id;
|
||||||
|
uint16_t samp_bindless_id;
|
||||||
|
uint16_t tex_bindless_id;
|
||||||
uint8_t dst;
|
uint8_t dst;
|
||||||
uint8_t wrmask;
|
uint8_t wrmask;
|
||||||
uint8_t half_precision;
|
uint8_t half_precision;
|
||||||
@@ -563,6 +566,12 @@ struct ir3_shader_variant {
|
|||||||
/* do we have one or more SSBO instructions: */
|
/* do we have one or more SSBO instructions: */
|
||||||
bool has_ssbo;
|
bool has_ssbo;
|
||||||
|
|
||||||
|
/* Which bindless resources are used, for filling out sp_xs_config */
|
||||||
|
bool bindless_tex;
|
||||||
|
bool bindless_samp;
|
||||||
|
bool bindless_ibo;
|
||||||
|
bool bindless_ubo;
|
||||||
|
|
||||||
/* do we need derivatives: */
|
/* do we need derivatives: */
|
||||||
bool need_pixlod;
|
bool need_pixlod;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user