v3d: Add support for shader_image_load_store.
This is only exposed on V3D 4.1+, because we didn't have the TMU write operations for images on 3.3 (To do GLES 3.1 there, you have to lower it to SSBO load/stores, which is a problem to solve later).
This commit is contained in:
@@ -38,6 +38,7 @@ BROADCOM_FILES = \
|
||||
compiler/v3d33_vpm_setup.c \
|
||||
compiler/v3d40_tex.c \
|
||||
compiler/v3d_compiler.h \
|
||||
compiler/v3d_nir_lower_image_load_store.c \
|
||||
compiler/v3d_nir_lower_io.c \
|
||||
compiler/v3d_nir_lower_txf_ms.c \
|
||||
qpu/qpu_disasm.c \
|
||||
|
@@ -36,6 +36,7 @@ libbroadcom_compiler_files = files(
|
||||
'v3d33_vpm_setup.c',
|
||||
'v3d_compiler.h',
|
||||
'v3d_nir_lower_io.c',
|
||||
'v3d_nir_lower_image_load_store.c',
|
||||
'v3d_nir_lower_txf_ms.c',
|
||||
)
|
||||
|
||||
|
@@ -1692,6 +1692,32 @@ ntq_emit_ssa_undef(struct v3d_compile *c, nir_ssa_undef_instr *instr)
|
||||
qregs[i] = vir_uniform_ui(c, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr)
|
||||
{
|
||||
assert(instr->intrinsic == nir_intrinsic_image_deref_size);
|
||||
nir_variable *var = nir_intrinsic_get_var(instr, 0);
|
||||
unsigned image_index = var->data.driver_location;
|
||||
const struct glsl_type *sampler_type = glsl_without_array(var->type);
|
||||
bool is_array = glsl_sampler_type_is_array(sampler_type);
|
||||
|
||||
ntq_store_dest(c, &instr->dest, 0,
|
||||
vir_uniform(c, QUNIFORM_IMAGE_WIDTH, image_index));
|
||||
if (instr->num_components > 1) {
|
||||
ntq_store_dest(c, &instr->dest, 1,
|
||||
vir_uniform(c, QUNIFORM_IMAGE_HEIGHT,
|
||||
image_index));
|
||||
}
|
||||
if (instr->num_components > 2) {
|
||||
ntq_store_dest(c, &instr->dest, 2,
|
||||
vir_uniform(c,
|
||||
is_array ?
|
||||
QUNIFORM_IMAGE_ARRAY_SIZE :
|
||||
QUNIFORM_IMAGE_DEPTH,
|
||||
image_index));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
|
||||
{
|
||||
@@ -1734,6 +1760,19 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
|
||||
ntq_emit_tmu_general(c, instr);
|
||||
break;
|
||||
|
||||
case nir_intrinsic_image_deref_load:
|
||||
case nir_intrinsic_image_deref_store:
|
||||
case nir_intrinsic_image_deref_atomic_add:
|
||||
case nir_intrinsic_image_deref_atomic_min:
|
||||
case nir_intrinsic_image_deref_atomic_max:
|
||||
case nir_intrinsic_image_deref_atomic_and:
|
||||
case nir_intrinsic_image_deref_atomic_or:
|
||||
case nir_intrinsic_image_deref_atomic_xor:
|
||||
case nir_intrinsic_image_deref_atomic_exchange:
|
||||
case nir_intrinsic_image_deref_atomic_comp_swap:
|
||||
v3d40_vir_emit_image_load_store(c, instr);
|
||||
break;
|
||||
|
||||
case nir_intrinsic_get_buffer_size:
|
||||
ntq_store_dest(c, &instr->dest, 0,
|
||||
vir_uniform(c, QUNIFORM_GET_BUFFER_SIZE,
|
||||
@@ -1807,6 +1846,10 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
|
||||
offset + instr->num_components);
|
||||
break;
|
||||
|
||||
case nir_intrinsic_image_deref_size:
|
||||
ntq_emit_image_size(c, instr);
|
||||
break;
|
||||
|
||||
case nir_intrinsic_discard:
|
||||
if (c->execute.file != QFILE_NULL) {
|
||||
vir_PF(c, c->execute, V3D_QPU_PF_PUSHZ);
|
||||
@@ -1846,6 +1889,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
|
||||
case nir_intrinsic_memory_barrier:
|
||||
case nir_intrinsic_memory_barrier_atomic_counter:
|
||||
case nir_intrinsic_memory_barrier_buffer:
|
||||
case nir_intrinsic_memory_barrier_image:
|
||||
/* We don't do any instruction scheduling of these NIR
|
||||
* instructions between each other, so we just need to make
|
||||
* sure that the TMU operations before the barrier are flushed
|
||||
@@ -2066,6 +2110,10 @@ static void
|
||||
ntq_emit_instr(struct v3d_compile *c, nir_instr *instr)
|
||||
{
|
||||
switch (instr->type) {
|
||||
case nir_instr_type_deref:
|
||||
/* ignored, will be walked by the intrinsic using it. */
|
||||
break;
|
||||
|
||||
case nir_instr_type_alu:
|
||||
ntq_emit_alu(c, nir_instr_as_alu(instr));
|
||||
break;
|
||||
|
@@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "v3d_compiler.h"
|
||||
#include "nir_deref.h"
|
||||
|
||||
/* We don't do any address packing. */
|
||||
#define __gen_user_data void
|
||||
@@ -51,14 +52,19 @@ vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data
|
||||
inst->src[0] = vir_uniform(c, contents, data);
|
||||
}
|
||||
|
||||
static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
|
||||
.per_pixel_mask_enable = true,
|
||||
};
|
||||
|
||||
static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
|
||||
.op = V3D_TMU_OP_REGULAR,
|
||||
};
|
||||
|
||||
void
|
||||
v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
|
||||
{
|
||||
unsigned unit = instr->texture_index;
|
||||
int tmu_writes = 0;
|
||||
static const struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked_default = {
|
||||
.op = V3D_TMU_OP_REGULAR,
|
||||
};
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
|
||||
};
|
||||
@@ -229,3 +235,173 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
|
||||
ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
|
||||
{
|
||||
*size = 1;
|
||||
*align = 1;
|
||||
}
|
||||
|
||||
void
|
||||
v3d40_vir_emit_image_load_store(struct v3d_compile *c,
|
||||
nir_intrinsic_instr *instr)
|
||||
{
|
||||
nir_variable *var = nir_intrinsic_get_var(instr, 0);
|
||||
const struct glsl_type *sampler_type = glsl_without_array(var->type);
|
||||
unsigned unit = (var->data.driver_location +
|
||||
nir_deref_instr_get_const_offset(nir_src_as_deref(instr->src[0]),
|
||||
type_size_align_1));
|
||||
int tmu_writes = 0;
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
|
||||
};
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
|
||||
.per_pixel_mask_enable = true,
|
||||
.output_type_32_bit = v3d_gl_format_is_return_32(var->data.image.format),
|
||||
};
|
||||
|
||||
struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
|
||||
|
||||
/* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
|
||||
* wants to have support for inc/dec?
|
||||
*/
|
||||
switch (instr->intrinsic) {
|
||||
case nir_intrinsic_image_deref_load:
|
||||
case nir_intrinsic_image_deref_store:
|
||||
p2_unpacked.op = V3D_TMU_OP_REGULAR;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_add:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_min:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
|
||||
break;
|
||||
|
||||
case nir_intrinsic_image_deref_atomic_max:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_UMAX;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_and:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_AND_READ_INC;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_or:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_OR_READ_DEC;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_xor:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_XOR_READ_NOT;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_exchange:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
|
||||
break;
|
||||
case nir_intrinsic_image_deref_atomic_comp_swap:
|
||||
p2_unpacked.op = V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
|
||||
break;
|
||||
default:
|
||||
unreachable("unknown image intrinsic");
|
||||
};
|
||||
|
||||
bool is_1d = false;
|
||||
switch (glsl_get_sampler_dim(sampler_type)) {
|
||||
case GLSL_SAMPLER_DIM_1D:
|
||||
is_1d = true;
|
||||
break;
|
||||
case GLSL_SAMPLER_DIM_BUF:
|
||||
break;
|
||||
case GLSL_SAMPLER_DIM_2D:
|
||||
case GLSL_SAMPLER_DIM_RECT:
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
|
||||
ntq_get_src(c, instr->src[1], 1), &tmu_writes);
|
||||
break;
|
||||
case GLSL_SAMPLER_DIM_3D:
|
||||
case GLSL_SAMPLER_DIM_CUBE:
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
|
||||
ntq_get_src(c, instr->src[1], 1), &tmu_writes);
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
|
||||
ntq_get_src(c, instr->src[1], 2), &tmu_writes);
|
||||
break;
|
||||
default:
|
||||
unreachable("bad image sampler dim");
|
||||
}
|
||||
|
||||
if (glsl_sampler_type_is_array(sampler_type)) {
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
|
||||
ntq_get_src(c, instr->src[1],
|
||||
is_1d ? 1 : 2), &tmu_writes);
|
||||
}
|
||||
|
||||
/* Limit the number of channels returned to both how many the NIR
|
||||
* instruction writes and how many the instruction could produce.
|
||||
*/
|
||||
uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
|
||||
if (!p1_unpacked.output_type_32_bit)
|
||||
instr_return_channels = (instr_return_channels + 1) / 2;
|
||||
|
||||
p0_unpacked.return_words_of_texture_data =
|
||||
(1 << instr_return_channels) - 1;
|
||||
|
||||
uint32_t p0_packed;
|
||||
V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
|
||||
(uint8_t *)&p0_packed,
|
||||
&p0_unpacked);
|
||||
|
||||
uint32_t p1_packed;
|
||||
V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
|
||||
(uint8_t *)&p1_packed,
|
||||
&p1_unpacked);
|
||||
|
||||
uint32_t p2_packed;
|
||||
V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
|
||||
(uint8_t *)&p2_packed,
|
||||
&p2_unpacked);
|
||||
|
||||
/* Load unit number into the high bits of the texture or sampler
|
||||
* address field, which will be be used by the driver to decide which
|
||||
* texture to put in the actual address field.
|
||||
*/
|
||||
p0_packed |= unit << 24;
|
||||
|
||||
vir_WRTMUC(c, QUNIFORM_IMAGE_TMU_CONFIG_P0, p0_packed);
|
||||
if (memcmp(&p1_unpacked, &p1_unpacked_default, sizeof(p1_unpacked)) != 0)
|
||||
vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed);
|
||||
if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0)
|
||||
vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
|
||||
|
||||
/* Emit the data writes for atomics or image store. */
|
||||
if (instr->intrinsic != nir_intrinsic_image_deref_load) {
|
||||
/* Vector for stores, or first atomic argument */
|
||||
struct qreg src[4];
|
||||
for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
|
||||
src[i] = ntq_get_src(c, instr->src[3], i);
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, src[i],
|
||||
&tmu_writes);
|
||||
}
|
||||
|
||||
/* Second atomic argument */
|
||||
if (instr->intrinsic ==
|
||||
nir_intrinsic_image_deref_atomic_comp_swap) {
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD,
|
||||
ntq_get_src(c, instr->src[4], 0),
|
||||
&tmu_writes);
|
||||
}
|
||||
}
|
||||
|
||||
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
|
||||
&tmu_writes);
|
||||
|
||||
vir_emit_thrsw(c);
|
||||
|
||||
/* The input FIFO has 16 slots across all threads, so make sure we
|
||||
* don't overfill our allocation.
|
||||
*/
|
||||
while (tmu_writes > 16 / c->threads)
|
||||
c->threads /= 2;
|
||||
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if (p0_unpacked.return_words_of_texture_data & (1 << i))
|
||||
ntq_store_dest(c, &instr->dest, i, vir_LDTMU(c));
|
||||
}
|
||||
|
||||
if (nir_intrinsic_dest_components(instr) == 0)
|
||||
vir_TMUWT(c);
|
||||
}
|
||||
|
@@ -230,6 +230,8 @@ enum quniform_contents {
|
||||
QUNIFORM_TMU_CONFIG_P0,
|
||||
QUNIFORM_TMU_CONFIG_P1,
|
||||
|
||||
QUNIFORM_IMAGE_TMU_CONFIG_P0,
|
||||
|
||||
QUNIFORM_TEXTURE_FIRST_LEVEL,
|
||||
|
||||
QUNIFORM_TEXTURE_WIDTH,
|
||||
@@ -249,6 +251,12 @@ enum quniform_contents {
|
||||
/* Returns the size of the SSBO given by the data value. */
|
||||
QUNIFORM_GET_BUFFER_SIZE,
|
||||
|
||||
/* Sizes (in pixels) of a shader image given by the data value. */
|
||||
QUNIFORM_IMAGE_WIDTH,
|
||||
QUNIFORM_IMAGE_HEIGHT,
|
||||
QUNIFORM_IMAGE_DEPTH,
|
||||
QUNIFORM_IMAGE_ARRAY_SIZE,
|
||||
|
||||
QUNIFORM_ALPHA_REF,
|
||||
|
||||
/**
|
||||
@@ -792,12 +800,15 @@ bool vir_opt_vpm(struct v3d_compile *c);
|
||||
void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
|
||||
void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
|
||||
void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
|
||||
void v3d_nir_lower_image_load_store(nir_shader *s);
|
||||
void vir_lower_uniforms(struct v3d_compile *c);
|
||||
|
||||
void v3d33_vir_vpm_read_setup(struct v3d_compile *c, int num_components);
|
||||
void v3d33_vir_vpm_write_setup(struct v3d_compile *c);
|
||||
void v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
|
||||
void v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
|
||||
void v3d40_vir_emit_image_load_store(struct v3d_compile *c,
|
||||
nir_intrinsic_instr *instr);
|
||||
|
||||
void v3d_vir_to_qpu(struct v3d_compile *c, struct qpu_reg *temp_registers);
|
||||
uint32_t v3d_qpu_schedule_instructions(struct v3d_compile *c);
|
||||
@@ -805,6 +816,8 @@ void qpu_validate(struct v3d_compile *c);
|
||||
struct qpu_reg *v3d_register_allocate(struct v3d_compile *c, bool *spilled);
|
||||
bool vir_init_reg_sets(struct v3d_compiler *compiler);
|
||||
|
||||
bool v3d_gl_format_is_return_32(GLenum format);
|
||||
|
||||
void vir_PF(struct v3d_compile *c, struct qreg src, enum v3d_qpu_pf pf);
|
||||
|
||||
static inline bool
|
||||
|
390
src/broadcom/compiler/v3d_nir_lower_image_load_store.c
Normal file
390
src/broadcom/compiler/v3d_nir_lower_image_load_store.c
Normal file
@@ -0,0 +1,390 @@
|
||||
/*
|
||||
* Copyright © 2018 Intel Corporation
|
||||
* Copyright © 2018 Broadcom
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "v3d_compiler.h"
|
||||
#include "compiler/nir/nir_builder.h"
|
||||
#include "compiler/nir/nir_format_convert.h"
|
||||
|
||||
/** @file v3d_nir_lower_image_load_store.c
|
||||
*
|
||||
* Performs any necessary lowering of GL_ARB_shader_image_load_store
|
||||
* operations.
|
||||
*
|
||||
* On V3D 4.x, we just need to do format conversion for stores such that the
|
||||
* GPU can effectively memcpy the arguments (in increments of 32-bit words)
|
||||
* into the texel. Loads are the same as texturing, where we may need to
|
||||
* unpack from 16-bit ints or floats.
|
||||
*
|
||||
* On V3D 3.x, to implement image load store we would need to do manual tiling
|
||||
* calculations and load/store using the TMU general memory access path.
|
||||
*/
|
||||
|
||||
bool
|
||||
v3d_gl_format_is_return_32(GLenum format)
|
||||
{
|
||||
switch (format) {
|
||||
case GL_R8:
|
||||
case GL_R8_SNORM:
|
||||
case GL_R8UI:
|
||||
case GL_R8I:
|
||||
case GL_RG8:
|
||||
case GL_RG8_SNORM:
|
||||
case GL_RG8UI:
|
||||
case GL_RG8I:
|
||||
case GL_RGBA8:
|
||||
case GL_RGBA8_SNORM:
|
||||
case GL_RGBA8UI:
|
||||
case GL_RGBA8I:
|
||||
case GL_R11F_G11F_B10F:
|
||||
case GL_RGB10_A2:
|
||||
case GL_RGB10_A2UI:
|
||||
case GL_R16F:
|
||||
case GL_R16UI:
|
||||
case GL_R16I:
|
||||
case GL_RG16F:
|
||||
case GL_RG16UI:
|
||||
case GL_RG16I:
|
||||
case GL_RGBA16F:
|
||||
case GL_RGBA16UI:
|
||||
case GL_RGBA16I:
|
||||
return false;
|
||||
case GL_R16:
|
||||
case GL_R16_SNORM:
|
||||
case GL_RG16:
|
||||
case GL_RG16_SNORM:
|
||||
case GL_RGBA16:
|
||||
case GL_RGBA16_SNORM:
|
||||
case GL_R32F:
|
||||
case GL_R32UI:
|
||||
case GL_R32I:
|
||||
case GL_RG32F:
|
||||
case GL_RG32UI:
|
||||
case GL_RG32I:
|
||||
case GL_RGBA32F:
|
||||
case GL_RGBA32UI:
|
||||
case GL_RGBA32I:
|
||||
return true;
|
||||
default:
|
||||
unreachable("Invalid image format");
|
||||
}
|
||||
}
|
||||
|
||||
/* Packs a 32-bit vector of colors in the range [0, (1 << bits[i]) - 1] to a
|
||||
* 32-bit SSA value, with as many channels as necessary to store all the bits
|
||||
*/
|
||||
static nir_ssa_def *
|
||||
pack_bits(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
|
||||
int num_components, bool mask)
|
||||
{
|
||||
nir_ssa_def *results[4];
|
||||
int offset = 0;
|
||||
for (int i = 0; i < num_components; i++) {
|
||||
nir_ssa_def *chan = nir_channel(b, color, i);
|
||||
|
||||
/* Channels being stored shouldn't cross a 32-bit boundary. */
|
||||
assert((offset & ~31) == ((offset + bits[i] - 1) & ~31));
|
||||
|
||||
if (mask) {
|
||||
chan = nir_iand(b, chan,
|
||||
nir_imm_int(b, (1 << bits[i]) - 1));
|
||||
}
|
||||
|
||||
if (offset % 32 == 0) {
|
||||
results[offset / 32] = chan;
|
||||
} else {
|
||||
results[offset / 32] =
|
||||
nir_ior(b, results[offset / 32],
|
||||
nir_ishl(b, chan,
|
||||
nir_imm_int(b, offset % 32)));
|
||||
}
|
||||
offset += bits[i];
|
||||
}
|
||||
|
||||
return nir_vec(b, results, DIV_ROUND_UP(offset, 32));
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
pack_unorm(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
|
||||
int num_components)
|
||||
{
|
||||
color = nir_channels(b, color, (1 << num_components) - 1);
|
||||
color = nir_format_float_to_unorm(b, color, bits);
|
||||
return pack_bits(b, color, bits, color->num_components, false);
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
pack_snorm(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
|
||||
int num_components)
|
||||
{
|
||||
color = nir_channels(b, color, (1 << num_components) - 1);
|
||||
color = nir_format_float_to_snorm(b, color, bits);
|
||||
return pack_bits(b, color, bits, color->num_components, true);
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
pack_uint(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
|
||||
int num_components)
|
||||
{
|
||||
color = nir_channels(b, color, (1 << num_components) - 1);
|
||||
color = nir_format_clamp_uint(b, color, bits);
|
||||
return pack_bits(b, color, bits, num_components, false);
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
pack_sint(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
|
||||
int num_components)
|
||||
{
|
||||
color = nir_channels(b, color, (1 << num_components) - 1);
|
||||
color = nir_format_clamp_uint(b, color, bits);
|
||||
return pack_bits(b, color, bits, num_components, true);
|
||||
}
|
||||
|
||||
static nir_ssa_def *
|
||||
pack_half(nir_builder *b, nir_ssa_def *color, const unsigned *bits,
|
||||
int num_components)
|
||||
{
|
||||
color = nir_channels(b, color, (1 << num_components) - 1);
|
||||
color = nir_format_float_to_half(b, color);
|
||||
return pack_bits(b, color, bits, color->num_components, false);
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
{
|
||||
nir_variable *var = nir_intrinsic_get_var(instr, 0);
|
||||
GLenum format = var->data.image.format;
|
||||
static const unsigned bits_8[4] = {8, 8, 8, 8};
|
||||
static const unsigned bits_16[4] = {16, 16, 16, 16};
|
||||
static const unsigned bits_1010102[4] = {10, 10, 10, 2};
|
||||
|
||||
b->cursor = nir_before_instr(&instr->instr);
|
||||
|
||||
nir_ssa_def *unformatted = nir_ssa_for_src(b, instr->src[3], 4);
|
||||
nir_ssa_def *formatted = NULL;
|
||||
switch (format) {
|
||||
case GL_RGBA32F:
|
||||
case GL_RGBA32UI:
|
||||
case GL_RGBA32I:
|
||||
/* For 4-component 32-bit components, there's no packing to be
|
||||
* done.
|
||||
*/
|
||||
return;
|
||||
|
||||
case GL_R32F:
|
||||
case GL_R32UI:
|
||||
case GL_R32I:
|
||||
/* For other 32-bit components, just reduce the size of
|
||||
* the input vector.
|
||||
*/
|
||||
formatted = nir_channels(b, unformatted, 1);
|
||||
break;
|
||||
case GL_RG32F:
|
||||
case GL_RG32UI:
|
||||
case GL_RG32I:
|
||||
formatted = nir_channels(b, unformatted, 2);
|
||||
break;
|
||||
|
||||
case GL_R8:
|
||||
formatted = pack_unorm(b, unformatted, bits_8, 1);
|
||||
break;
|
||||
case GL_RG8:
|
||||
formatted = pack_unorm(b, unformatted, bits_8, 2);
|
||||
break;
|
||||
case GL_RGBA8:
|
||||
formatted = pack_unorm(b, unformatted, bits_8, 4);
|
||||
break;
|
||||
|
||||
case GL_R8_SNORM:
|
||||
formatted = pack_snorm(b, unformatted, bits_8, 1);
|
||||
break;
|
||||
case GL_RG8_SNORM:
|
||||
formatted = pack_snorm(b, unformatted, bits_8, 2);
|
||||
break;
|
||||
case GL_RGBA8_SNORM:
|
||||
formatted = pack_snorm(b, unformatted, bits_8, 4);
|
||||
break;
|
||||
|
||||
case GL_R16:
|
||||
formatted = pack_unorm(b, unformatted, bits_16, 1);
|
||||
break;
|
||||
case GL_RG16:
|
||||
formatted = pack_unorm(b, unformatted, bits_16, 2);
|
||||
break;
|
||||
case GL_RGBA16:
|
||||
formatted = pack_unorm(b, unformatted, bits_16, 4);
|
||||
break;
|
||||
|
||||
case GL_R16_SNORM:
|
||||
formatted = pack_snorm(b, unformatted, bits_16, 1);
|
||||
break;
|
||||
case GL_RG16_SNORM:
|
||||
formatted = pack_snorm(b, unformatted, bits_16, 2);
|
||||
break;
|
||||
case GL_RGBA16_SNORM:
|
||||
formatted = pack_snorm(b, unformatted, bits_16, 4);
|
||||
break;
|
||||
|
||||
case GL_R16F:
|
||||
formatted = pack_half(b, unformatted, bits_16, 1);
|
||||
break;
|
||||
case GL_RG16F:
|
||||
formatted = pack_half(b, unformatted, bits_16, 2);
|
||||
break;
|
||||
case GL_RGBA16F:
|
||||
formatted = pack_half(b, unformatted, bits_16, 4);
|
||||
break;
|
||||
|
||||
case GL_R8UI:
|
||||
formatted = pack_uint(b, unformatted, bits_8, 1);
|
||||
break;
|
||||
case GL_R8I:
|
||||
formatted = pack_sint(b, unformatted, bits_8, 1);
|
||||
break;
|
||||
case GL_RG8UI:
|
||||
formatted = pack_uint(b, unformatted, bits_8, 2);
|
||||
break;
|
||||
case GL_RG8I:
|
||||
formatted = pack_sint(b, unformatted, bits_8, 2);
|
||||
break;
|
||||
case GL_RGBA8UI:
|
||||
formatted = pack_uint(b, unformatted, bits_8, 4);
|
||||
break;
|
||||
case GL_RGBA8I:
|
||||
formatted = pack_sint(b, unformatted, bits_8, 4);
|
||||
break;
|
||||
|
||||
case GL_R16UI:
|
||||
formatted = pack_uint(b, unformatted, bits_16, 1);
|
||||
break;
|
||||
case GL_R16I:
|
||||
formatted = pack_sint(b, unformatted, bits_16, 1);
|
||||
break;
|
||||
case GL_RG16UI:
|
||||
formatted = pack_uint(b, unformatted, bits_16, 2);
|
||||
break;
|
||||
case GL_RG16I:
|
||||
formatted = pack_sint(b, unformatted, bits_16, 2);
|
||||
break;
|
||||
case GL_RGBA16UI:
|
||||
formatted = pack_uint(b, unformatted, bits_16, 4);
|
||||
break;
|
||||
case GL_RGBA16I:
|
||||
formatted = pack_sint(b, unformatted, bits_16, 4);
|
||||
break;
|
||||
|
||||
case GL_R11F_G11F_B10F:
|
||||
formatted = nir_format_pack_11f11f10f(b, unformatted);
|
||||
break;
|
||||
case GL_RGB9_E5:
|
||||
formatted = nir_format_pack_r9g9b9e5(b, unformatted);
|
||||
break;
|
||||
|
||||
case GL_RGB10_A2:
|
||||
formatted = pack_unorm(b, unformatted, bits_1010102, 4);
|
||||
break;
|
||||
|
||||
case GL_RGB10_A2UI:
|
||||
formatted = pack_uint(b, unformatted, bits_1010102, 4);
|
||||
break;
|
||||
|
||||
default:
|
||||
unreachable("bad format");
|
||||
}
|
||||
|
||||
nir_instr_rewrite_src(&instr->instr, &instr->src[3],
|
||||
nir_src_for_ssa(formatted));
|
||||
instr->num_components = formatted->num_components;
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
|
||||
{
|
||||
static const unsigned bits16[] = {16, 16, 16, 16};
|
||||
nir_variable *var = nir_intrinsic_get_var(instr, 0);
|
||||
const struct glsl_type *sampler_type = glsl_without_array(var->type);
|
||||
enum glsl_base_type base_type =
|
||||
glsl_get_sampler_result_type(sampler_type);
|
||||
|
||||
if (v3d_gl_format_is_return_32(var->data.image.format))
|
||||
return;
|
||||
|
||||
b->cursor = nir_after_instr(&instr->instr);
|
||||
|
||||
assert(instr->dest.is_ssa);
|
||||
nir_ssa_def *result = &instr->dest.ssa;
|
||||
if (base_type == GLSL_TYPE_FLOAT) {
|
||||
nir_ssa_def *rg = nir_channel(b, result, 0);
|
||||
nir_ssa_def *ba = nir_channel(b, result, 1);
|
||||
result = nir_vec4(b,
|
||||
nir_unpack_half_2x16_split_x(b, rg),
|
||||
nir_unpack_half_2x16_split_y(b, rg),
|
||||
nir_unpack_half_2x16_split_x(b, ba),
|
||||
nir_unpack_half_2x16_split_y(b, ba));
|
||||
} else if (base_type == GLSL_TYPE_INT) {
|
||||
result = nir_format_unpack_sint(b, result, bits16, 4);
|
||||
} else {
|
||||
assert(base_type == GLSL_TYPE_UINT);
|
||||
result = nir_format_unpack_uint(b, result, bits16, 4);
|
||||
}
|
||||
|
||||
nir_ssa_def_rewrite_uses_after(&instr->dest.ssa, nir_src_for_ssa(result),
|
||||
result->parent_instr);
|
||||
}
|
||||
|
||||
void
|
||||
v3d_nir_lower_image_load_store(nir_shader *s)
|
||||
{
|
||||
nir_foreach_function(function, s) {
|
||||
if (!function->impl)
|
||||
continue;
|
||||
|
||||
nir_builder b;
|
||||
nir_builder_init(&b, function->impl);
|
||||
|
||||
nir_foreach_block(block, function->impl) {
|
||||
nir_foreach_instr_safe(instr, block) {
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
continue;
|
||||
|
||||
nir_intrinsic_instr *intr =
|
||||
nir_instr_as_intrinsic(instr);
|
||||
|
||||
switch (intr->intrinsic) {
|
||||
case nir_intrinsic_image_deref_load:
|
||||
v3d_nir_lower_image_load(&b, intr);
|
||||
break;
|
||||
case nir_intrinsic_image_deref_store:
|
||||
v3d_nir_lower_image_store(&b, intr);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nir_metadata_preserve(function->impl,
|
||||
nir_metadata_block_index |
|
||||
nir_metadata_dominance);
|
||||
}
|
||||
}
|
@@ -976,6 +976,7 @@ uint64_t *v3d_compile(const struct v3d_compiler *compiler,
|
||||
|
||||
NIR_PASS_V(c->s, v3d_nir_lower_io, c);
|
||||
NIR_PASS_V(c->s, v3d_nir_lower_txf_ms, c);
|
||||
NIR_PASS_V(c->s, v3d_nir_lower_image_load_store);
|
||||
NIR_PASS_V(c->s, nir_lower_idiv);
|
||||
|
||||
v3d_optimize_nir(c->s);
|
||||
|
@@ -61,6 +61,12 @@ vir_dump_uniform(enum quniform_contents contents,
|
||||
v3d_tmu_config_data_get_value(data));
|
||||
break;
|
||||
|
||||
case QUNIFORM_IMAGE_TMU_CONFIG_P0:
|
||||
fprintf(stderr, "img[%d].p0 | 0x%x",
|
||||
v3d_tmu_config_data_get_unit(data),
|
||||
v3d_tmu_config_data_get_value(data));
|
||||
break;
|
||||
|
||||
case QUNIFORM_TEXTURE_WIDTH:
|
||||
fprintf(stderr, "tex[%d].width", data);
|
||||
break;
|
||||
@@ -77,6 +83,19 @@ vir_dump_uniform(enum quniform_contents contents,
|
||||
fprintf(stderr, "tex[%d].levels", data);
|
||||
break;
|
||||
|
||||
case QUNIFORM_IMAGE_WIDTH:
|
||||
fprintf(stderr, "img[%d].width", data);
|
||||
break;
|
||||
case QUNIFORM_IMAGE_HEIGHT:
|
||||
fprintf(stderr, "img[%d].height", data);
|
||||
break;
|
||||
case QUNIFORM_IMAGE_DEPTH:
|
||||
fprintf(stderr, "img[%d].depth", data);
|
||||
break;
|
||||
case QUNIFORM_IMAGE_ARRAY_SIZE:
|
||||
fprintf(stderr, "img[%d].array_size", data);
|
||||
break;
|
||||
|
||||
case QUNIFORM_UBO_ADDR:
|
||||
fprintf(stderr, "ubo[%d]", data);
|
||||
break;
|
||||
|
@@ -58,6 +58,7 @@ void v3d_job_add_bo(struct v3d_job *job, struct v3d_bo *bo);
|
||||
#define VC5_DIRTY_ZSA (1 << 2)
|
||||
#define VC5_DIRTY_FRAGTEX (1 << 3)
|
||||
#define VC5_DIRTY_VERTTEX (1 << 4)
|
||||
#define VC5_DIRTY_SHADER_IMAGE (1 << 5)
|
||||
|
||||
#define VC5_DIRTY_BLEND_COLOR (1 << 7)
|
||||
#define VC5_DIRTY_STENCIL_REF (1 << 8)
|
||||
@@ -222,6 +223,18 @@ enum v3d_ez_state {
|
||||
VC5_EZ_DISABLED,
|
||||
};
|
||||
|
||||
struct v3d_image_view {
|
||||
struct pipe_image_view base;
|
||||
/* V3D 4.x texture shader state struct */
|
||||
struct pipe_resource *tex_state;
|
||||
uint32_t tex_state_offset;
|
||||
};
|
||||
|
||||
struct v3d_shaderimg_stateobj {
|
||||
struct v3d_image_view si[PIPE_MAX_SHADER_IMAGES];
|
||||
uint32_t enabled_mask;
|
||||
};
|
||||
|
||||
/**
|
||||
* A complete bin/render job.
|
||||
*
|
||||
@@ -440,6 +453,7 @@ struct v3d_context {
|
||||
struct pipe_clip_state clip;
|
||||
struct pipe_viewport_state viewport;
|
||||
struct v3d_ssbo_stateobj ssbo[PIPE_SHADER_TYPES];
|
||||
struct v3d_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
|
||||
struct v3d_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
|
||||
struct v3d_texture_stateobj tex[PIPE_SHADER_TYPES];
|
||||
struct v3d_vertexbuf_stateobj vertexbuf;
|
||||
|
@@ -240,6 +240,8 @@ static int
|
||||
v3d_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
|
||||
enum pipe_shader_cap param)
|
||||
{
|
||||
struct v3d_screen *screen = v3d_screen(pscreen);
|
||||
|
||||
if (shader != PIPE_SHADER_VERTEX &&
|
||||
shader != PIPE_SHADER_FRAGMENT) {
|
||||
return 0;
|
||||
@@ -298,12 +300,17 @@ v3d_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
|
||||
return 1;
|
||||
case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
|
||||
case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
|
||||
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
|
||||
return VC5_MAX_TEXTURE_SAMPLERS;
|
||||
|
||||
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
|
||||
return PIPE_MAX_SHADER_BUFFERS;
|
||||
|
||||
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
|
||||
if (screen->devinfo.ver < 41)
|
||||
return 0;
|
||||
else
|
||||
return PIPE_MAX_SHADER_IMAGES;
|
||||
|
||||
case PIPE_SHADER_CAP_PREFERRED_IR:
|
||||
return PIPE_SHADER_IR_NIR;
|
||||
case PIPE_SHADER_CAP_SUPPORTED_IRS:
|
||||
|
@@ -71,6 +71,30 @@ get_texture_size(struct v3d_texture_stateobj *texstate,
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
get_image_size(struct v3d_shaderimg_stateobj *shaderimg,
|
||||
enum quniform_contents contents,
|
||||
uint32_t data)
|
||||
{
|
||||
struct v3d_image_view *image = &shaderimg->si[data];
|
||||
|
||||
switch (contents) {
|
||||
case QUNIFORM_IMAGE_WIDTH:
|
||||
return u_minify(image->base.resource->width0,
|
||||
image->base.u.tex.level);
|
||||
case QUNIFORM_IMAGE_HEIGHT:
|
||||
return u_minify(image->base.resource->height0,
|
||||
image->base.u.tex.level);
|
||||
case QUNIFORM_IMAGE_DEPTH:
|
||||
return u_minify(image->base.resource->depth0,
|
||||
image->base.u.tex.level);
|
||||
case QUNIFORM_IMAGE_ARRAY_SIZE:
|
||||
return image->base.resource->array_size;
|
||||
default:
|
||||
unreachable("Bad texture size field");
|
||||
}
|
||||
}
|
||||
|
||||
static struct v3d_bo *
|
||||
v3d_upload_ubo(struct v3d_context *v3d,
|
||||
struct v3d_compiled_shader *shader,
|
||||
@@ -158,6 +182,27 @@ write_tmu_p0(struct v3d_job *job,
|
||||
v3d_job_add_bo(job, rsc->bo);
|
||||
}
|
||||
|
||||
static void
|
||||
write_image_tmu_p0(struct v3d_job *job,
|
||||
struct v3d_cl_out **uniforms,
|
||||
struct v3d_shaderimg_stateobj *img,
|
||||
uint32_t data)
|
||||
{
|
||||
/* Extract the image unit from the top bits, and the compiler's
|
||||
* packed p0 from the bottom.
|
||||
*/
|
||||
uint32_t unit = data >> 24;
|
||||
uint32_t p0 = data & 0x00ffffff;
|
||||
|
||||
struct v3d_image_view *iview = &img->si[unit];
|
||||
struct v3d_resource *rsc = v3d_resource(iview->base.resource);
|
||||
|
||||
cl_aligned_reloc(&job->indirect, uniforms,
|
||||
v3d_resource(iview->tex_state)->bo,
|
||||
iview->tex_state_offset | p0);
|
||||
v3d_job_add_bo(job, rsc->bo);
|
||||
}
|
||||
|
||||
/** Writes the V3D 4.x TMU configuration parameter 1. */
|
||||
static void
|
||||
write_tmu_p1(struct v3d_job *job,
|
||||
@@ -232,6 +277,11 @@ v3d_write_uniforms(struct v3d_context *v3d, struct v3d_compiled_shader *shader,
|
||||
write_tmu_p1(job, &uniforms, texstate, data);
|
||||
break;
|
||||
|
||||
case QUNIFORM_IMAGE_TMU_CONFIG_P0:
|
||||
write_image_tmu_p0(job, &uniforms,
|
||||
&v3d->shaderimg[stage], data);
|
||||
break;
|
||||
|
||||
case QUNIFORM_TEXTURE_CONFIG_P1:
|
||||
write_texture_p1(job, &uniforms, texstate,
|
||||
data);
|
||||
@@ -256,6 +306,16 @@ v3d_write_uniforms(struct v3d_context *v3d, struct v3d_compiled_shader *shader,
|
||||
data));
|
||||
break;
|
||||
|
||||
case QUNIFORM_IMAGE_WIDTH:
|
||||
case QUNIFORM_IMAGE_HEIGHT:
|
||||
case QUNIFORM_IMAGE_DEPTH:
|
||||
case QUNIFORM_IMAGE_ARRAY_SIZE:
|
||||
cl_aligned_u32(&uniforms,
|
||||
get_image_size(&v3d->shaderimg[stage],
|
||||
uinfo->contents[i],
|
||||
data));
|
||||
break;
|
||||
|
||||
case QUNIFORM_ALPHA_REF:
|
||||
cl_aligned_f(&uniforms,
|
||||
v3d->zsa->base.alpha.ref_value);
|
||||
@@ -382,6 +442,14 @@ v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader)
|
||||
dirty |= VC5_DIRTY_SSBO;
|
||||
break;
|
||||
|
||||
case QUNIFORM_IMAGE_TMU_CONFIG_P0:
|
||||
case QUNIFORM_IMAGE_WIDTH:
|
||||
case QUNIFORM_IMAGE_HEIGHT:
|
||||
case QUNIFORM_IMAGE_DEPTH:
|
||||
case QUNIFORM_IMAGE_ARRAY_SIZE:
|
||||
dirty |= VC5_DIRTY_SHADER_IMAGE;
|
||||
break;
|
||||
|
||||
case QUNIFORM_ALPHA_REF:
|
||||
dirty |= VC5_DIRTY_ZSA;
|
||||
break;
|
||||
|
@@ -148,6 +148,13 @@ v3d_predraw_check_stage_inputs(struct pipe_context *pctx,
|
||||
if (cb->buffer)
|
||||
v3d_flush_jobs_writing_resource(v3d, cb->buffer);
|
||||
}
|
||||
|
||||
/* Flush writes to our image views */
|
||||
foreach_bit(i, v3d->shaderimg[s].enabled_mask) {
|
||||
struct v3d_image_view *view = &v3d->shaderimg[s].si[i];
|
||||
|
||||
v3d_flush_jobs_writing_resource(v3d, view->base.resource);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -487,6 +494,12 @@ v3d_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
|
||||
v3d->ssbo[s].sb[i].buffer);
|
||||
job->tmu_dirty_rcl = true;
|
||||
}
|
||||
|
||||
foreach_bit(i, v3d->shaderimg[s].enabled_mask) {
|
||||
v3d_job_add_write_resource(job,
|
||||
v3d->shaderimg[s].si[i].base.resource);
|
||||
job->tmu_dirty_rcl = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get space to emit our draw call into the BCL, using a branch to
|
||||
|
@@ -884,7 +884,7 @@ v3d_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
tex.srgb = false;
|
||||
} else {
|
||||
tex.texture_type = v3d_get_tex_format(&screen->devinfo,
|
||||
cso->format);
|
||||
cso->format);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1033,6 +1033,97 @@ v3d_set_shader_buffers(struct pipe_context *pctx,
|
||||
v3d->dirty |= VC5_DIRTY_SSBO;
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_create_image_view_texture_shader_state(struct v3d_context *v3d,
|
||||
struct v3d_shaderimg_stateobj *so,
|
||||
int img)
|
||||
{
|
||||
#if V3D_VERSION >= 40
|
||||
struct v3d_image_view *iview = &so->si[img];
|
||||
|
||||
void *map;
|
||||
u_upload_alloc(v3d->uploader, 0, cl_packet_length(TEXTURE_SHADER_STATE),
|
||||
32,
|
||||
&iview->tex_state_offset,
|
||||
&iview->tex_state,
|
||||
&map);
|
||||
|
||||
struct pipe_resource *prsc = iview->base.resource;
|
||||
|
||||
v3dx_pack(map, TEXTURE_SHADER_STATE, tex) {
|
||||
v3d_setup_texture_shader_state(&tex, prsc,
|
||||
iview->base.u.tex.level,
|
||||
iview->base.u.tex.level,
|
||||
iview->base.u.tex.first_layer,
|
||||
iview->base.u.tex.last_layer);
|
||||
|
||||
tex.swizzle_r = translate_swizzle(PIPE_SWIZZLE_X);
|
||||
tex.swizzle_g = translate_swizzle(PIPE_SWIZZLE_Y);
|
||||
tex.swizzle_b = translate_swizzle(PIPE_SWIZZLE_Z);
|
||||
tex.swizzle_a = translate_swizzle(PIPE_SWIZZLE_W);
|
||||
|
||||
tex.texture_type = v3d_get_tex_format(&v3d->screen->devinfo,
|
||||
iview->base.format);
|
||||
};
|
||||
#else /* V3D_VERSION < 40 */
|
||||
/* V3D 3.x doesn't use support shader image load/store operations on
|
||||
* textures, so it would get lowered in the shader to general memory
|
||||
* acceses.
|
||||
*/
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
v3d_set_shader_images(struct pipe_context *pctx,
|
||||
enum pipe_shader_type shader,
|
||||
unsigned start, unsigned count,
|
||||
const struct pipe_image_view *images)
|
||||
{
|
||||
struct v3d_context *v3d = v3d_context(pctx);
|
||||
struct v3d_shaderimg_stateobj *so = &v3d->shaderimg[shader];
|
||||
|
||||
if (images) {
|
||||
for (unsigned i = 0; i < count; i++) {
|
||||
unsigned n = i + start;
|
||||
struct v3d_image_view *iview = &so->si[n];
|
||||
|
||||
if ((iview->base.resource == images[i].resource) &&
|
||||
(iview->base.format == images[i].format) &&
|
||||
(iview->base.access == images[i].access) &&
|
||||
!memcmp(&iview->base.u, &images[i].u,
|
||||
sizeof(iview->base.u)))
|
||||
continue;
|
||||
|
||||
util_copy_image_view(&iview->base, &images[i]);
|
||||
|
||||
if (iview->base.resource) {
|
||||
so->enabled_mask |= 1 << n;
|
||||
v3d_create_image_view_texture_shader_state(v3d,
|
||||
so,
|
||||
n);
|
||||
} else {
|
||||
so->enabled_mask &= ~(1 << n);
|
||||
pipe_resource_reference(&iview->tex_state, NULL);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (unsigned i = 0; i < count; i++) {
|
||||
unsigned n = i + start;
|
||||
struct v3d_image_view *iview = &so->si[n];
|
||||
|
||||
pipe_resource_reference(&iview->base.resource, NULL);
|
||||
pipe_resource_reference(&iview->tex_state, NULL);
|
||||
}
|
||||
|
||||
if (count == 32)
|
||||
so->enabled_mask = 0;
|
||||
else
|
||||
so->enabled_mask &= ~(((1 << count) - 1) << start);
|
||||
}
|
||||
|
||||
v3d->dirty |= VC5_DIRTY_SHADER_IMAGE;
|
||||
}
|
||||
|
||||
void
|
||||
v3dX(state_init)(struct pipe_context *pctx)
|
||||
{
|
||||
@@ -1073,6 +1164,7 @@ v3dX(state_init)(struct pipe_context *pctx)
|
||||
pctx->set_sampler_views = v3d_set_sampler_views;
|
||||
|
||||
pctx->set_shader_buffers = v3d_set_shader_buffers;
|
||||
pctx->set_shader_images = v3d_set_shader_images;
|
||||
|
||||
pctx->create_stream_output_target = v3d_create_stream_output_target;
|
||||
pctx->stream_output_target_destroy = v3d_stream_output_target_destroy;
|
||||
|
Reference in New Issue
Block a user