v3d: Add Compute Shader support
Now that the UAPI has landed, add the pipe_context function for dispatching compute shaders. This is the last major feature for GLES 3.1, though it's not enabled quite yet.
This commit is contained in:

committed by
Jose Maria Casanova Crespo

parent
2d8b51ea4d
commit
66e2d3b69f
@@ -625,11 +625,14 @@ v3d_transform_feedback_enabled(struct v3d_context *v3d)
|
||||
|
||||
void v3d_set_shader_uniform_dirty_flags(struct v3d_compiled_shader *shader);
|
||||
struct v3d_cl_reloc v3d_write_uniforms(struct v3d_context *v3d,
|
||||
struct v3d_job *job,
|
||||
struct v3d_compiled_shader *shader,
|
||||
enum pipe_shader_type stage);
|
||||
|
||||
void v3d_flush(struct pipe_context *pctx);
|
||||
void v3d_job_init(struct v3d_context *v3d);
|
||||
struct v3d_job *v3d_job_create(struct v3d_context *v3d);
|
||||
void v3d_job_free(struct v3d_context *v3d, struct v3d_job *job);
|
||||
struct v3d_job *v3d_get_job(struct v3d_context *v3d,
|
||||
struct pipe_surface **cbufs,
|
||||
struct pipe_surface *zsbuf);
|
||||
|
@@ -37,7 +37,7 @@
|
||||
#include "util/set.h"
|
||||
#include "broadcom/clif/clif_dump.h"
|
||||
|
||||
static void
|
||||
void
|
||||
v3d_job_free(struct v3d_context *v3d, struct v3d_job *job)
|
||||
{
|
||||
set_foreach(job->bos, entry) {
|
||||
@@ -85,7 +85,7 @@ v3d_job_free(struct v3d_context *v3d, struct v3d_job *job)
|
||||
ralloc_free(job);
|
||||
}
|
||||
|
||||
static struct v3d_job *
|
||||
struct v3d_job *
|
||||
v3d_job_create(struct v3d_context *v3d)
|
||||
{
|
||||
struct v3d_job *job = rzalloc(v3d, struct v3d_job);
|
||||
|
@@ -669,7 +669,7 @@ v3d_screen_create(int fd, const struct pipe_screen_config *config,
|
||||
|
||||
slab_create_parent(&screen->transfer_pool, sizeof(struct v3d_transfer), 16);
|
||||
|
||||
screen->has_csd = false; /* until the UABI is enabled. */
|
||||
screen->has_csd = v3d_has_feature(screen, DRM_V3D_PARAM_SUPPORTS_CSD);
|
||||
|
||||
v3d_fence_init(screen);
|
||||
|
||||
|
@@ -515,6 +515,28 @@ v3d_simulator_submit_tfu_ioctl(int fd, struct drm_v3d_submit_tfu *args)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
v3d_simulator_submit_csd_ioctl(int fd, struct drm_v3d_submit_csd *args)
|
||||
{
|
||||
struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
|
||||
uint32_t *bo_handles = (uint32_t *)(uintptr_t)args->bo_handles;
|
||||
int ret;
|
||||
|
||||
for (int i = 0; i < args->bo_handle_count; i++)
|
||||
v3d_simulator_copy_in_handle(file, bo_handles[i]);
|
||||
|
||||
if (sim_state.ver >= 41)
|
||||
ret = v3d41_simulator_submit_csd_ioctl(sim_state.v3d, args,
|
||||
file->gmp->ofs);
|
||||
else
|
||||
ret = -1;
|
||||
|
||||
for (int i = 0; i < args->bo_handle_count; i++)
|
||||
v3d_simulator_copy_out_handle(file, bo_handles[i]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
v3d_simulator_ioctl(int fd, unsigned long request, void *args)
|
||||
{
|
||||
@@ -545,6 +567,9 @@ v3d_simulator_ioctl(int fd, unsigned long request, void *args)
|
||||
case DRM_IOCTL_V3D_SUBMIT_TFU:
|
||||
return v3d_simulator_submit_tfu_ioctl(fd, args);
|
||||
|
||||
case DRM_IOCTL_V3D_SUBMIT_CSD:
|
||||
return v3d_simulator_submit_csd_ioctl(fd, args);
|
||||
|
||||
case DRM_IOCTL_GEM_OPEN:
|
||||
case DRM_IOCTL_GEM_FLINK:
|
||||
return drmIoctl(fd, request, args);
|
||||
|
@@ -206,13 +206,13 @@ write_tmu_p1(struct v3d_job *job,
|
||||
}
|
||||
|
||||
struct v3d_cl_reloc
|
||||
v3d_write_uniforms(struct v3d_context *v3d, struct v3d_compiled_shader *shader,
|
||||
v3d_write_uniforms(struct v3d_context *v3d, struct v3d_job *job,
|
||||
struct v3d_compiled_shader *shader,
|
||||
enum pipe_shader_type stage)
|
||||
{
|
||||
struct v3d_constbuf_stateobj *cb = &v3d->constbuf[stage];
|
||||
struct v3d_texture_stateobj *texstate = &v3d->tex[stage];
|
||||
struct v3d_uniform_list *uinfo = &shader->prog_data.base->uniforms;
|
||||
struct v3d_job *job = v3d->job;
|
||||
const uint32_t *gallium_uniforms = cb->cb[0].user_buffer;
|
||||
|
||||
/* We always need to return some space for uniforms, because the HW
|
||||
|
@@ -44,6 +44,9 @@ void v3dX(simulator_submit_cl_ioctl)(struct v3d_hw *v3d,
|
||||
uint32_t gmp_offset);
|
||||
int v3dX(simulator_submit_tfu_ioctl)(struct v3d_hw *v3d,
|
||||
struct drm_v3d_submit_tfu *args);
|
||||
int v3dX(simulator_submit_csd_ioctl)(struct v3d_hw *v3d,
|
||||
struct drm_v3d_submit_csd *args,
|
||||
uint32_t gmp_offset);
|
||||
const struct v3d_format *v3dX(get_format_desc)(enum pipe_format f);
|
||||
void v3dX(get_internal_type_bpp_for_output_format)(uint32_t format,
|
||||
uint32_t *type,
|
||||
|
@@ -340,13 +340,13 @@ v3d_emit_gl_shader_state(struct v3d_context *v3d,
|
||||
|
||||
/* Upload the uniforms to the indirect CL first */
|
||||
struct v3d_cl_reloc fs_uniforms =
|
||||
v3d_write_uniforms(v3d, v3d->prog.fs,
|
||||
v3d_write_uniforms(v3d, job, v3d->prog.fs,
|
||||
PIPE_SHADER_FRAGMENT);
|
||||
struct v3d_cl_reloc vs_uniforms =
|
||||
v3d_write_uniforms(v3d, v3d->prog.vs,
|
||||
v3d_write_uniforms(v3d, job, v3d->prog.vs,
|
||||
PIPE_SHADER_VERTEX);
|
||||
struct v3d_cl_reloc cs_uniforms =
|
||||
v3d_write_uniforms(v3d, v3d->prog.cs,
|
||||
v3d_write_uniforms(v3d, job, v3d->prog.cs,
|
||||
PIPE_SHADER_VERTEX);
|
||||
|
||||
/* Update the cache dirty flag based on the shader progs data */
|
||||
@@ -958,6 +958,176 @@ v3d_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
|
||||
v3d_flush(pctx);
|
||||
}
|
||||
|
||||
#if V3D_VERSION >= 41
|
||||
#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
|
||||
#define V3D_CSD_CFG012_WG_OFFSET_SHIFT 0
|
||||
/* Allow this dispatch to start while the last one is still running. */
|
||||
#define V3D_CSD_CFG3_OVERLAP_WITH_PREV (1 << 26)
|
||||
/* Maximum supergroup ID. 6 bits. */
|
||||
#define V3D_CSD_CFG3_MAX_SG_ID_SHIFT 20
|
||||
/* Batches per supergroup minus 1. 8 bits. */
|
||||
#define V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT 12
|
||||
/* Workgroups per supergroup, 0 means 16 */
|
||||
#define V3D_CSD_CFG3_WGS_PER_SG_SHIFT 8
|
||||
#define V3D_CSD_CFG3_WG_SIZE_SHIFT 0
|
||||
|
||||
#define V3D_CSD_CFG5_PROPAGATE_NANS (1 << 2)
|
||||
#define V3D_CSD_CFG5_SINGLE_SEG (1 << 1)
|
||||
#define V3D_CSD_CFG5_THREADING (1 << 0)
|
||||
|
||||
static void
|
||||
v3d_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
|
||||
{
|
||||
struct v3d_context *v3d = v3d_context(pctx);
|
||||
struct v3d_screen *screen = v3d->screen;
|
||||
|
||||
v3d_predraw_check_stage_inputs(pctx, PIPE_SHADER_COMPUTE);
|
||||
|
||||
v3d_update_compiled_cs(v3d);
|
||||
|
||||
if (!v3d->prog.compute->resource) {
|
||||
static bool warned = false;
|
||||
if (!warned) {
|
||||
fprintf(stderr,
|
||||
"Compute shader failed to compile. "
|
||||
"Expect corruption.\n");
|
||||
warned = true;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* Some of the units of scale:
|
||||
*
|
||||
* - Batches of 16 work items (shader invocations) that will be queued
|
||||
* to the run on a QPU at once.
|
||||
*
|
||||
* - Workgroups composed of work items based on the shader's layout
|
||||
* declaration.
|
||||
*
|
||||
* - Supergroups of 1-16 workgroups. There can only be 16 supergroups
|
||||
* running at a time on the core, so we want to keep them large to
|
||||
* keep the QPUs busy, but a whole supergroup will sync at a barrier
|
||||
* so we want to keep them small if one is present.
|
||||
*/
|
||||
struct drm_v3d_submit_csd submit = { 0 };
|
||||
struct v3d_job *job = v3d_job_create(v3d);
|
||||
|
||||
/* Set up the actual number of workgroups, synchronously mapping the
|
||||
* indirect buffer if necessary to get the dimensions.
|
||||
*/
|
||||
if (info->indirect) {
|
||||
struct pipe_transfer *transfer;
|
||||
uint32_t *map = pipe_buffer_map_range(pctx, info->indirect,
|
||||
info->indirect_offset,
|
||||
3 * sizeof(uint32_t),
|
||||
PIPE_TRANSFER_READ,
|
||||
&transfer);
|
||||
memcpy(v3d->compute_num_workgroups, map, 3 * sizeof(uint32_t));
|
||||
pipe_buffer_unmap(pctx, transfer);
|
||||
|
||||
if (v3d->compute_num_workgroups[0] == 0 ||
|
||||
v3d->compute_num_workgroups[1] == 0 ||
|
||||
v3d->compute_num_workgroups[2] == 0) {
|
||||
/* Nothing to dispatch, so skip the draw (CSD can't
|
||||
* handle 0 workgroups).
|
||||
*/
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
v3d->compute_num_workgroups[0] = info->grid[0];
|
||||
v3d->compute_num_workgroups[1] = info->grid[1];
|
||||
v3d->compute_num_workgroups[2] = info->grid[2];
|
||||
}
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
submit.cfg[i] |= (v3d->compute_num_workgroups[i] <<
|
||||
V3D_CSD_CFG012_WG_COUNT_SHIFT);
|
||||
}
|
||||
|
||||
perf_debug("CSD only using single WG per SG currently, "
|
||||
"should increase that when possible.");
|
||||
int wgs_per_sg = 1;
|
||||
int wg_size = info->block[0] * info->block[1] * info->block[2];
|
||||
submit.cfg[3] |= wgs_per_sg << V3D_CSD_CFG3_WGS_PER_SG_SHIFT;
|
||||
submit.cfg[3] |= ((DIV_ROUND_UP(wgs_per_sg * wg_size, 16) - 1) <<
|
||||
V3D_CSD_CFG3_BATCHES_PER_SG_M1_SHIFT);
|
||||
submit.cfg[3] |= (wg_size & 0xff) << V3D_CSD_CFG3_WG_SIZE_SHIFT;
|
||||
|
||||
int batches_per_wg = DIV_ROUND_UP(wg_size, 16);
|
||||
/* Number of batches the dispatch will invoke (minus 1). */
|
||||
submit.cfg[4] = batches_per_wg * (v3d->compute_num_workgroups[0] *
|
||||
v3d->compute_num_workgroups[1] *
|
||||
v3d->compute_num_workgroups[2]) - 1;
|
||||
|
||||
/* Make sure we didn't accidentally underflow. */
|
||||
assert(submit.cfg[4] != ~0);
|
||||
|
||||
v3d_job_add_bo(job, v3d_resource(v3d->prog.compute->resource)->bo);
|
||||
submit.cfg[5] = (v3d_resource(v3d->prog.compute->resource)->bo->offset +
|
||||
v3d->prog.compute->offset);
|
||||
submit.cfg[5] |= V3D_CSD_CFG5_PROPAGATE_NANS;
|
||||
if (v3d->prog.compute->prog_data.base->single_seg)
|
||||
submit.cfg[5] |= V3D_CSD_CFG5_SINGLE_SEG;
|
||||
if (v3d->prog.compute->prog_data.base->threads == 4)
|
||||
submit.cfg[5] |= V3D_CSD_CFG5_THREADING;
|
||||
|
||||
if (v3d->prog.compute->prog_data.compute->shared_size) {
|
||||
v3d->compute_shared_memory =
|
||||
v3d_bo_alloc(v3d->screen,
|
||||
v3d->prog.compute->prog_data.compute->shared_size *
|
||||
wgs_per_sg,
|
||||
"shared_vars");
|
||||
}
|
||||
|
||||
struct v3d_cl_reloc uniforms = v3d_write_uniforms(v3d, job,
|
||||
v3d->prog.compute,
|
||||
PIPE_SHADER_COMPUTE);
|
||||
v3d_job_add_bo(job, uniforms.bo);
|
||||
submit.cfg[6] = uniforms.bo->offset + uniforms.offset;
|
||||
|
||||
/* Pull some job state that was stored in a SUBMIT_CL struct out to
|
||||
* our SUBMIT_CSD struct
|
||||
*/
|
||||
submit.bo_handles = job->submit.bo_handles;
|
||||
submit.bo_handle_count = job->submit.bo_handle_count;
|
||||
|
||||
/* Serialize this in the rest of our command stream. */
|
||||
submit.in_sync = v3d->out_sync;
|
||||
submit.out_sync = v3d->out_sync;
|
||||
|
||||
if (!(V3D_DEBUG & V3D_DEBUG_NORAST)) {
|
||||
int ret = v3d_ioctl(screen->fd, DRM_IOCTL_V3D_SUBMIT_CSD,
|
||||
&submit);
|
||||
static bool warned = false;
|
||||
if (ret && !warned) {
|
||||
fprintf(stderr, "CSD submit call returned %s. "
|
||||
"Expect corruption.\n", strerror(errno));
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
|
||||
v3d_job_free(v3d, job);
|
||||
|
||||
/* Mark SSBOs as being written.. we don't actually know which ones are
|
||||
* read vs written, so just assume the worst
|
||||
*/
|
||||
foreach_bit(i, v3d->ssbo[PIPE_SHADER_COMPUTE].enabled_mask) {
|
||||
struct v3d_resource *rsc = v3d_resource(
|
||||
v3d->ssbo[PIPE_SHADER_COMPUTE].sb[i].buffer);
|
||||
rsc->writes++; /* XXX */
|
||||
}
|
||||
|
||||
foreach_bit(i, v3d->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
|
||||
struct v3d_resource *rsc = v3d_resource(
|
||||
v3d->shaderimg[PIPE_SHADER_COMPUTE].si[i].base.resource);
|
||||
rsc->writes++;
|
||||
}
|
||||
|
||||
v3d_bo_unreference(&uniforms.bo);
|
||||
v3d_bo_unreference(&v3d->compute_shared_memory);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Implements gallium's clear() hook (glClear()) by drawing a pair of triangles.
|
||||
*/
|
||||
@@ -1134,4 +1304,8 @@ v3dX(draw_init)(struct pipe_context *pctx)
|
||||
pctx->clear = v3d_clear;
|
||||
pctx->clear_render_target = v3d_clear_render_target;
|
||||
pctx->clear_depth_stencil = v3d_clear_depth_stencil;
|
||||
#if V3D_VERSION >= 41
|
||||
if (v3d_context(pctx)->screen->has_csd)
|
||||
pctx->launch_grid = v3d_launch_grid;
|
||||
#endif
|
||||
}
|
||||
|
@@ -85,6 +85,29 @@ v3d_invalidate_l2t(struct v3d_hw *v3d)
|
||||
(0 << V3D_CTL_0_L2TCACTL_L2TFLM_LSB));
|
||||
}
|
||||
|
||||
/* Flushes dirty texture cachelines from the L1 write combiner */
|
||||
static void
|
||||
v3d_flush_l1td(struct v3d_hw *v3d)
|
||||
{
|
||||
V3D_WRITE(V3D_CTL_0_L2TCACTL,
|
||||
V3D_CTL_0_L2TCACTL_TMUWCF_SET);
|
||||
|
||||
assert(!(V3D_READ(V3D_CTL_0_L2TCACTL) & V3D_CTL_0_L2TCACTL_L2TFLS_SET));
|
||||
}
|
||||
|
||||
/* Flushes dirty texture L2 cachelines */
|
||||
static void
|
||||
v3d_flush_l2t(struct v3d_hw *v3d)
|
||||
{
|
||||
V3D_WRITE(V3D_CTL_0_L2TFLSTA, 0);
|
||||
V3D_WRITE(V3D_CTL_0_L2TFLEND, ~0);
|
||||
V3D_WRITE(V3D_CTL_0_L2TCACTL,
|
||||
V3D_CTL_0_L2TCACTL_L2TFLS_SET |
|
||||
(2 << V3D_CTL_0_L2TCACTL_L2TFLM_LSB));
|
||||
|
||||
assert(!(V3D_READ(V3D_CTL_0_L2TCACTL) & V3D_CTL_0_L2TCACTL_L2TFLS_SET));
|
||||
}
|
||||
|
||||
/* Invalidates the slice caches. These are read-only caches. */
|
||||
static void
|
||||
v3d_invalidate_slices(struct v3d_hw *v3d)
|
||||
@@ -116,6 +139,13 @@ v3d_reload_gmp(struct v3d_hw *v3d)
|
||||
}
|
||||
}
|
||||
|
||||
static UNUSED void
|
||||
v3d_flush_caches(struct v3d_hw *v3d)
|
||||
{
|
||||
v3d_flush_l1td(v3d);
|
||||
v3d_flush_l2t(v3d);
|
||||
}
|
||||
|
||||
int
|
||||
v3dX(simulator_submit_tfu_ioctl)(struct v3d_hw *v3d,
|
||||
struct drm_v3d_submit_tfu *args)
|
||||
@@ -142,6 +172,38 @@ v3dX(simulator_submit_tfu_ioctl)(struct v3d_hw *v3d,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if V3D_VERSION >= 41
|
||||
int
|
||||
v3dX(simulator_submit_csd_ioctl)(struct v3d_hw *v3d,
|
||||
struct drm_v3d_submit_csd *args,
|
||||
uint32_t gmp_ofs)
|
||||
{
|
||||
g_gmp_ofs = gmp_ofs;
|
||||
v3d_reload_gmp(v3d);
|
||||
|
||||
v3d_invalidate_caches(v3d);
|
||||
|
||||
V3D_WRITE(V3D_CSD_0_QUEUED_CFG1, args->cfg[1]);
|
||||
V3D_WRITE(V3D_CSD_0_QUEUED_CFG2, args->cfg[2]);
|
||||
V3D_WRITE(V3D_CSD_0_QUEUED_CFG3, args->cfg[3]);
|
||||
V3D_WRITE(V3D_CSD_0_QUEUED_CFG4, args->cfg[4]);
|
||||
V3D_WRITE(V3D_CSD_0_QUEUED_CFG5, args->cfg[5]);
|
||||
V3D_WRITE(V3D_CSD_0_QUEUED_CFG6, args->cfg[6]);
|
||||
/* CFG0 kicks off the job */
|
||||
V3D_WRITE(V3D_CSD_0_QUEUED_CFG0, args->cfg[0]);
|
||||
|
||||
while (V3D_READ(V3D_CSD_0_STATUS) &
|
||||
(V3D_CSD_0_STATUS_HAVE_CURRENT_DISPATCH_SET |
|
||||
V3D_CSD_0_STATUS_HAVE_QUEUED_DISPATCH_SET)) {
|
||||
v3d_hw_tick(v3d);
|
||||
}
|
||||
|
||||
v3d_flush_caches(v3d);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
v3dX(simulator_get_param_ioctl)(struct v3d_hw *v3d,
|
||||
struct drm_v3d_get_param *args)
|
||||
@@ -160,6 +222,9 @@ v3dX(simulator_get_param_ioctl)(struct v3d_hw *v3d,
|
||||
case DRM_V3D_PARAM_SUPPORTS_TFU:
|
||||
args->value = 1;
|
||||
return 0;
|
||||
case DRM_V3D_PARAM_SUPPORTS_CSD:
|
||||
args->value = V3D_VERSION >= 41;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (args->param < ARRAY_SIZE(reg_map) && reg_map[args->param]) {
|
||||
|
Reference in New Issue
Block a user