v3d: Add support for shader_image_load_store.
This is only exposed on V3D 4.1+, because we didn't have the TMU write operations for images on 3.3 (To do GLES 3.1 there, you have to lower it to SSBO load/stores, which is a problem to solve later).
This commit is contained in:
@@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "v3d_compiler.h"
|
||||
#include "nir_deref.h"
|
||||
|
||||
/* We don't do any address packing. */
|
||||
#define __gen_user_data void
|
||||
@@ -51,14 +52,19 @@ vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data
|
||||
inst->src[0] = vir_uniform(c, contents, data);
|
||||
}
|
||||
|
||||
static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
|
||||
.per_pixel_mask_enable = true,
|
||||
};
|
||||
|
||||
static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
|
||||
.op = V3D_TMU_OP_REGULAR,
|
||||
};
|
||||
|
||||
void
|
||||
v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
|
||||
{
|
||||
unsigned unit = instr->texture_index;
|
||||
int tmu_writes = 0;
|
||||
static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
|
||||
.op = V3D_TMU_OP_REGULAR,
|
||||
};
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
|
||||
};
|
||||
@@ -229,3 +235,173 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
|
||||
ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
|
||||
{
|
||||
*size = 1;
|
||||
*align = 1;
|
||||
}
|
||||
|
||||
void
|
||||
v3d40_vir_emit_image_load_store(struct v3d_compile *c,
|
||||
nir_intrinsic_instr *instr)
|
||||
{
|
||||
nir_variable *var = nir_intrinsic_get_var(instr, 0);
|
||||
const struct glsl_type *sampler_type = glsl_without_array(var->type);
|
||||
unsigned unit = (var->data.driver_location +
|
||||
nir_deref_instr_get_const_offset(nir_src_as_deref(instr->src[0]),
|
||||
type_size_align_1));
|
||||
int tmu_writes = 0;
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
|
||||
};
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
|
||||
.per_pixel_mask_enable = true,
|
||||
.output_type_32_bit = v3d_gl_format_is_return_32(var->data.image.format),
|
||||
};
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
|
||||
|
||||
/* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
|
||||
* wants to have support for inc/dec?
|
||||
*/
|
||||
switch (instr->intrinsic) {
|
||||
case nir_intrinsic_image_deref_load:
|
||||
case nir_intrinsic_image_deref_store:
|
||||
p2_unpacked.op = V3D_TMU_OP_REGULAR;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_add:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_min:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
|
||||
break;
|
||||
|
||||
case nir_intrinsic_image_deref_atomic_max:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_UMAX;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_and:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_AND_READ_INC;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_or:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_OR_READ_DEC;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_xor:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_XOR_READ_NOT;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_exchange:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_comp_swap:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown image intrinsic");
|
||||
};
|
||||
|
||||
bool is_1d = false;
|
||||
switch (glsl_get_sampler_dim(sampler_type)) {
|
||||
case GLSL_SAMPLER_DIM_1D:
|
||||
is_1d = true;
|
||||
break;
|
||||
case GLSL_SAMPLER_DIM_BUF:
|
||||
break;
|
||||
case GLSL_SAMPLER_DIM_2D:
|
||||
case GLSL_SAMPLER_DIM_RECT:
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
|
||||
ntq_get_src(c, instr->src[1], 1), &tmu_writes);
|
||||
break;
|
||||
case GLSL_SAMPLER_DIM_3D:
|
||||
case GLSL_SAMPLER_DIM_CUBE:
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
|
||||
ntq_get_src(c, instr->src[1], 1), &tmu_writes);
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
|
||||
ntq_get_src(c, instr->src[1], 2), &tmu_writes);
|
||||
break;
|
||||
default:
|
||||
unreachable("bad image sampler dim");
|
||||
}
|
||||
|
||||
if (glsl_sampler_type_is_array(sampler_type)) {
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
|
||||
ntq_get_src(c, instr->src[1],
|
||||
is_1d ? 1 : 2), &tmu_writes);
|
||||
}
|
||||
|
||||
/* Limit the number of channels returned to both how many the NIR
|
||||
* instruction writes and how many the instruction could produce.
|
||||
*/
|
||||
uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
|
||||
if (!p1_unpacked.output_type_32_bit)
|
||||
instr_return_channels = (instr_return_channels + 1) / 2;
|
||||
|
||||
p0_unpacked.return_words_of_texture_data =
|
||||
(1 << instr_return_channels) - 1;
|
||||
|
||||
uint32_t p0_packed;
|
||||
V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
|
||||
(uint8_t *)&p0_packed,
|
||||
&p0_unpacked);
|
||||
|
||||
uint32_t p1_packed;
|
||||
V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
|
||||
(uint8_t *)&p1_packed,
|
||||
&p1_unpacked);
|
||||
|
||||
uint32_t p2_packed;
|
||||
V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
|
||||
(uint8_t *)&p2_packed,
|
||||
&p2_unpacked);
|
||||
|
||||
/* Load unit number into the high bits of the texture or sampler
|
||||
* address field, which will be be used by the driver to decide which
|
||||
* texture to put in the actual address field.
|
||||
*/
|
||||
p0_packed |= unit << 24;
|
||||
|
||||
vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
|
||||
if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
|
||||
vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
|
||||
if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
|
||||
vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
|
||||
|
||||
/* Emit the data writes for atomics or image store. */
|
||||
if (instr->intrinsic != nir_intrinsic_image_deref_load) {
|
||||
/* Vector for stores, or first atomic argument */
|
||||
struct qreg src[4];
|
||||
for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
|
||||
src[i] = ntq_get_src(c, instr->src[3], i);
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
|
||||
&tmu_writes);
|
||||
}
|
||||
|
||||
/* Second atomic argument */
|
||||
if (instr->intrinsic ==
|
||||
nir_intrinsic_image_deref_atomic_comp_swap) {
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
|
||||
ntq_get_src(c, instr->src[4], 0),
|
||||
&tmu_writes);
|
||||
}
|
||||
}
|
||||
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
|
||||
&tmu_writes);
|
||||
|
||||
vir_emit_thrsw(c);
|
||||
|
||||
/* The input FIFO has 16 slots across all threads, so make sure we
|
||||
* don't overfill our allocation.
|
||||
*/
|
||||
while (tmu_writes > 16 / c->threads)
|
||||
c->threads /= 2;
|
||||
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if (p0_unpacked.return_words_of_texture_data & (1 << i))
|
||||
ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
|
||||
}
|
||||
|
||||
if (nir_intrinsic_dest_components(instr) == 0)
|
||||
vir_TMUWT(c);
|
||||
}
|
||||
|
Reference in New Issue
Block a user