freedreno/drm/virtio: Support ring_idx
ring_idx zero is the CPU ring, others map to the priority level, as each priority level for a given drm_file on the host kernel side maps to a single fence timeline. Signed-off-by: Rob Clark <robdclark@chromium.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16086>
This commit is contained in:
@@ -163,7 +163,6 @@ struct drm_virtgpu_resource_create_blob {
|
||||
#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
|
||||
#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
|
||||
#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
|
||||
#define VIRTGPU_BLOB_FLAG_USE_INTERNAL 0x0008 /* not-mappable, not-shareable */
|
||||
/* zero is invalid blob_mem */
|
||||
__u32 blob_mem;
|
||||
__u32 blob_flags;
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "util/libsync.h"
|
||||
#include "util/u_process.h"
|
||||
|
||||
#include "virtio_priv.h"
|
||||
@@ -63,6 +64,7 @@ set_context(int fd)
|
||||
{
|
||||
struct drm_virtgpu_context_set_param params[] = {
|
||||
{ VIRTGPU_CONTEXT_PARAM_CAPSET_ID, VIRGL_RENDERER_CAPSET_DRM },
|
||||
{ VIRTGPU_CONTEXT_PARAM_NUM_RINGS, 64 },
|
||||
};
|
||||
struct drm_virtgpu_context_init args = {
|
||||
.num_params = ARRAY_SIZE(params),
|
||||
@@ -219,10 +221,13 @@ virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *req, uint32_t sz)
|
||||
* Helper for "execbuf" ioctl.. note that in virtgpu execbuf is just
|
||||
* a generic "send commands to host", not necessarily specific to
|
||||
* cmdstream execution.
|
||||
*
|
||||
* Note that ring_idx 0 is the "CPU ring", ie. for synchronizing btwn
|
||||
* guest and host CPU.
|
||||
*/
|
||||
int
|
||||
virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
|
||||
int in_fence_fd, int *out_fence_fd)
|
||||
int in_fence_fd, int *out_fence_fd, int ring_idx)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
|
||||
@@ -232,10 +237,12 @@ virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
|
||||
#define COND(bool, val) ((bool) ? (val) : 0)
|
||||
struct drm_virtgpu_execbuffer eb = {
|
||||
.flags = COND(out_fence_fd, VIRTGPU_EXECBUF_FENCE_FD_OUT) |
|
||||
COND(in_fence_fd != -1, VIRTGPU_EXECBUF_FENCE_FD_IN),
|
||||
COND(in_fence_fd != -1, VIRTGPU_EXECBUF_FENCE_FD_IN) |
|
||||
VIRTGPU_EXECBUF_RING_IDX,
|
||||
.fence_fd = in_fence_fd,
|
||||
.size = req->len,
|
||||
.command = VOID2U64(req),
|
||||
.ring_idx = ring_idx,
|
||||
};
|
||||
|
||||
int ret = drmIoctl(dev->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
|
||||
@@ -254,13 +261,17 @@ virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
|
||||
int
|
||||
virtio_execbuf(struct fd_device *dev, struct msm_ccmd_req *req, bool sync)
|
||||
{
|
||||
int ret = virtio_execbuf_fenced(dev, req, -1, NULL);
|
||||
int fence_fd;
|
||||
int ret = virtio_execbuf_fenced(dev, req, -1, sync ? &fence_fd : NULL, 0);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sync)
|
||||
if (sync) {
|
||||
sync_wait(fence_fd, -1);
|
||||
close(fence_fd);
|
||||
virtio_host_sync(dev, req);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -95,11 +95,7 @@ virtio_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
|
||||
case FD_TIMESTAMP:
|
||||
return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
|
||||
case FD_NR_RINGS:
|
||||
/* TODO need to not rely on host egl ctx for fence if
|
||||
* we want to support multiple priority levels
|
||||
*/
|
||||
return 1;
|
||||
// return query_param(pipe, MSM_PARAM_NR_RINGS, value);
|
||||
return query_param(pipe, MSM_PARAM_NR_RINGS, value);
|
||||
case FD_CTX_FAULTS:
|
||||
return query_queue_param(pipe, MSM_SUBMITQUEUE_PARAM_FAULTS, value);
|
||||
case FD_GLOBAL_FAULTS:
|
||||
@@ -138,6 +134,8 @@ out:
|
||||
static int
|
||||
open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
|
||||
{
|
||||
struct virtio_pipe *virtio_pipe = to_virtio_pipe(pipe);
|
||||
|
||||
struct drm_msm_submitqueue req = {
|
||||
.flags = 0,
|
||||
.prio = prio,
|
||||
@@ -155,7 +153,8 @@ open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
|
||||
return ret;
|
||||
}
|
||||
|
||||
to_virtio_pipe(pipe)->queue_id = req.id;
|
||||
virtio_pipe->queue_id = req.id;
|
||||
virtio_pipe->ring_idx = req.prio + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -66,6 +66,7 @@ struct virtio_pipe {
|
||||
uint64_t gmem_base;
|
||||
uint32_t gmem;
|
||||
uint32_t queue_id;
|
||||
uint32_t ring_idx;
|
||||
struct slab_parent_pool ring_pool;
|
||||
|
||||
/**
|
||||
@@ -113,7 +114,7 @@ struct fd_bo *virtio_bo_from_handle(struct fd_device *dev, uint32_t size,
|
||||
*/
|
||||
void *virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *hdr, uint32_t sz);
|
||||
int virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
|
||||
int in_fence_fd, int *out_fence_fd);
|
||||
int in_fence_fd, int *out_fence_fd, int ring_idx);
|
||||
int virtio_execbuf(struct fd_device *dev, struct msm_ccmd_req *req, bool sync);
|
||||
void virtio_host_sync(struct fd_device *dev, const struct msm_ccmd_req *req);
|
||||
int virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *req);
|
||||
|
@@ -166,7 +166,8 @@ flush_submit_list(struct list_head *submit_list)
|
||||
req->flags |= MSM_SUBMIT_NO_IMPLICIT;
|
||||
}
|
||||
|
||||
virtio_execbuf_fenced(dev, &req->hdr, fd_submit->in_fence_fd, out_fence_fd);
|
||||
virtio_execbuf_fenced(dev, &req->hdr, fd_submit->in_fence_fd, out_fence_fd,
|
||||
virtio_pipe->ring_idx);
|
||||
|
||||
free(req);
|
||||
|
||||
|
Reference in New Issue
Block a user