pan/kmod: Add drmIoctl() wrapper pan_kmod_ioctl() with CPU trace
Signed-off-by: Loïc Molinari <loic.molinari@collabora.com> Co-authored-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Alejandro Piñeiro <apinheiro@igalia.com> Reviewed-by: Benjamin Lee <benjamin.lee@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34385>
This commit is contained in:
@@ -324,8 +324,8 @@ csf_submit_gsubmit(struct panfrost_context *ctx,
|
||||
int ret = 0;
|
||||
|
||||
if (!ctx->is_noop) {
|
||||
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_SUBMIT,
|
||||
gsubmit);
|
||||
ret = pan_kmod_ioctl(panfrost_device_fd(dev),
|
||||
DRM_IOCTL_PANTHOR_GROUP_SUBMIT, gsubmit);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
@@ -523,8 +523,8 @@ csf_check_ctx_state_and_reinit(struct panfrost_context *ctx)
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_GET_STATE,
|
||||
&state);
|
||||
ret = pan_kmod_ioctl(panfrost_device_fd(dev),
|
||||
DRM_IOCTL_PANTHOR_GROUP_GET_STATE, &state);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_GROUP_GET_STATE failed (err=%d)", errno);
|
||||
return;
|
||||
@@ -1422,7 +1422,8 @@ GENX(csf_init_context)(struct panfrost_context *ctx)
|
||||
};
|
||||
|
||||
int ret =
|
||||
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_CREATE, &gc);
|
||||
pan_kmod_ioctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_CREATE,
|
||||
&gc);
|
||||
|
||||
if (ret)
|
||||
goto err_group_create;
|
||||
@@ -1442,8 +1443,8 @@ GENX(csf_init_context)(struct panfrost_context *ctx)
|
||||
.max_chunks = pan_screen(ctx->base.screen)->csf_tiler_heap.max_chunks,
|
||||
.target_in_flight = 65535,
|
||||
};
|
||||
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE,
|
||||
&thc);
|
||||
ret = pan_kmod_ioctl(panfrost_device_fd(dev),
|
||||
DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE, &thc);
|
||||
|
||||
if (ret)
|
||||
goto err_tiler_heap;
|
||||
@@ -1551,10 +1552,11 @@ err_tiler_heap_cs_bo:
|
||||
err_tiler_heap_tmp_geom_bo:
|
||||
panfrost_bo_unreference(ctx->csf.heap.desc_bo);
|
||||
err_tiler_heap_desc_bo:
|
||||
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY,
|
||||
&thd);
|
||||
pan_kmod_ioctl(panfrost_device_fd(dev),
|
||||
DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY, &thd);
|
||||
err_tiler_heap:
|
||||
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_DESTROY, &gd);
|
||||
pan_kmod_ioctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_DESTROY,
|
||||
&gd);
|
||||
err_group_create:
|
||||
return -1;
|
||||
}
|
||||
@@ -1576,8 +1578,8 @@ GENX(csf_cleanup_context)(struct panfrost_context *ctx)
|
||||
NULL);
|
||||
assert(!ret);
|
||||
|
||||
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY,
|
||||
&thd);
|
||||
ret = pan_kmod_ioctl(panfrost_device_fd(dev),
|
||||
DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY, &thd);
|
||||
assert(!ret);
|
||||
|
||||
struct drm_panthor_group_destroy gd = {
|
||||
@@ -1585,7 +1587,8 @@ GENX(csf_cleanup_context)(struct panfrost_context *ctx)
|
||||
};
|
||||
|
||||
ret =
|
||||
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_DESTROY, &gd);
|
||||
pan_kmod_ioctl(panfrost_device_fd(dev), DRM_IOCTL_PANTHOR_GROUP_DESTROY,
|
||||
&gd);
|
||||
assert(!ret);
|
||||
|
||||
panfrost_bo_unreference(ctx->csf.tmp_geom_bo);
|
||||
|
@@ -168,7 +168,8 @@ jm_submit_jc(struct panfrost_batch *batch, uint64_t first_job_desc,
|
||||
if (ctx->is_noop)
|
||||
ret = 0;
|
||||
else
|
||||
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_SUBMIT, &submit);
|
||||
ret = pan_kmod_ioctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_SUBMIT,
|
||||
&submit);
|
||||
free(bo_handles);
|
||||
|
||||
if (ret)
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include "util/simple_mtx.h"
|
||||
#include "util/sparse_array.h"
|
||||
#include "util/u_atomic.h"
|
||||
#include "util/perf/cpu_trace.h"
|
||||
|
||||
#include "kmod/panthor_kmod.h"
|
||||
|
||||
@@ -477,6 +478,12 @@ struct pan_kmod_dev {
|
||||
void *user_priv;
|
||||
};
|
||||
|
||||
#define pan_kmod_ioctl(fd, op, arg) \
|
||||
({ \
|
||||
MESA_TRACE_SCOPE("pan_kmod_ioctl op=" #op); \
|
||||
drmIoctl(fd, op, arg); \
|
||||
})
|
||||
|
||||
struct pan_kmod_dev *
|
||||
pan_kmod_dev_create(int fd, uint32_t flags,
|
||||
const struct pan_kmod_allocator *allocator);
|
||||
|
@@ -83,7 +83,7 @@ panfrost_query_raw(int fd, enum drm_panfrost_param param, bool required,
|
||||
ASSERTED int ret;
|
||||
|
||||
get_param.param = param;
|
||||
ret = drmIoctl(fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
|
||||
ret = pan_kmod_ioctl(fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
|
||||
|
||||
if (ret) {
|
||||
assert(!required);
|
||||
@@ -251,7 +251,7 @@ panfrost_kmod_bo_alloc(struct pan_kmod_dev *dev,
|
||||
.flags = to_panfrost_bo_flags(dev, flags),
|
||||
};
|
||||
|
||||
int ret = drmIoctl(dev->fd, DRM_IOCTL_PANFROST_CREATE_BO, &req);
|
||||
int ret = pan_kmod_ioctl(dev->fd, DRM_IOCTL_PANFROST_CREATE_BO, &req);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANFROST_CREATE_BO failed (err=%d)", errno);
|
||||
goto err_free_bo;
|
||||
@@ -286,7 +286,8 @@ panfrost_kmod_bo_import(struct pan_kmod_dev *dev, uint32_t handle, size_t size,
|
||||
|
||||
struct drm_panfrost_get_bo_offset get_bo_offset = {.handle = handle, 0};
|
||||
int ret =
|
||||
drmIoctl(dev->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
|
||||
pan_kmod_ioctl(dev->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET,
|
||||
&get_bo_offset);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANFROST_GET_BO_OFFSET failed (err=%d)", errno);
|
||||
goto err_free_bo;
|
||||
@@ -307,7 +308,8 @@ static off_t
|
||||
panfrost_kmod_bo_get_mmap_offset(struct pan_kmod_bo *bo)
|
||||
{
|
||||
struct drm_panfrost_mmap_bo mmap_bo = {.handle = bo->handle};
|
||||
int ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
|
||||
int ret = pan_kmod_ioctl(bo->dev->fd, DRM_IOCTL_PANFROST_MMAP_BO,
|
||||
&mmap_bo);
|
||||
if (ret) {
|
||||
fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
|
||||
assert(0);
|
||||
@@ -328,7 +330,7 @@ panfrost_kmod_bo_wait(struct pan_kmod_bo *bo, int64_t timeout_ns,
|
||||
/* The ioctl returns >= 0 value when the BO we are waiting for is ready
|
||||
* -1 otherwise.
|
||||
*/
|
||||
if (drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_WAIT_BO, &req) != -1)
|
||||
if (pan_kmod_ioctl(bo->dev->fd, DRM_IOCTL_PANFROST_WAIT_BO, &req) != -1)
|
||||
return true;
|
||||
|
||||
assert(errno == ETIMEDOUT || errno == EBUSY);
|
||||
@@ -343,7 +345,7 @@ panfrost_kmod_bo_make_evictable(struct pan_kmod_bo *bo)
|
||||
.madv = PANFROST_MADV_DONTNEED,
|
||||
};
|
||||
|
||||
drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_MADVISE, &req);
|
||||
pan_kmod_ioctl(bo->dev->fd, DRM_IOCTL_PANFROST_MADVISE, &req);
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -354,7 +356,7 @@ panfrost_kmod_bo_make_unevictable(struct pan_kmod_bo *bo)
|
||||
.madv = PANFROST_MADV_WILLNEED,
|
||||
};
|
||||
|
||||
if (drmIoctl(bo->dev->fd, DRM_IOCTL_PANFROST_MADVISE, &req) == 0 &&
|
||||
if (pan_kmod_ioctl(bo->dev->fd, DRM_IOCTL_PANFROST_MADVISE, &req) == 0 &&
|
||||
req.retained == 0)
|
||||
return false;
|
||||
|
||||
|
@@ -117,7 +117,7 @@ panthor_kmod_dev_create(int fd, uint32_t flags, drmVersionPtr version,
|
||||
.pointer = (uint64_t)(uintptr_t)&panthor_dev->props.gpu,
|
||||
};
|
||||
|
||||
int ret = drmIoctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
int ret = pan_kmod_ioctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
|
||||
goto err_free_dev;
|
||||
@@ -129,7 +129,7 @@ panthor_kmod_dev_create(int fd, uint32_t flags, drmVersionPtr version,
|
||||
.pointer = (uint64_t)(uintptr_t)&panthor_dev->props.csif,
|
||||
};
|
||||
|
||||
ret = drmIoctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
ret = pan_kmod_ioctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
|
||||
goto err_free_dev;
|
||||
@@ -142,7 +142,7 @@ panthor_kmod_dev_create(int fd, uint32_t flags, drmVersionPtr version,
|
||||
.pointer = (uint64_t)(uintptr_t)&panthor_dev->props.timestamp,
|
||||
};
|
||||
|
||||
ret = drmIoctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
ret = pan_kmod_ioctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
|
||||
goto err_free_dev;
|
||||
@@ -164,7 +164,7 @@ panthor_kmod_dev_create(int fd, uint32_t flags, drmVersionPtr version,
|
||||
.pointer = (uint64_t)(uintptr_t)&panthor_dev->props.group_priorities,
|
||||
};
|
||||
|
||||
ret = drmIoctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
ret = pan_kmod_ioctl(fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
|
||||
goto err_free_dev;
|
||||
@@ -341,7 +341,7 @@ panthor_kmod_bo_alloc(struct pan_kmod_dev *dev,
|
||||
.exclusive_vm_id = panthor_vm ? panthor_vm->base.handle : 0,
|
||||
};
|
||||
|
||||
int ret = drmIoctl(dev->fd, DRM_IOCTL_PANTHOR_BO_CREATE, &req);
|
||||
int ret = pan_kmod_ioctl(dev->fd, DRM_IOCTL_PANTHOR_BO_CREATE, &req);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_BO_CREATE failed (err=%d)", errno);
|
||||
goto err_free_bo;
|
||||
@@ -438,7 +438,8 @@ panthor_kmod_bo_export(struct pan_kmod_bo *bo, int dmabuf_fd)
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &isync);
|
||||
ret = pan_kmod_ioctl(dmabuf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE,
|
||||
&isync);
|
||||
close(isync.fd);
|
||||
if (ret) {
|
||||
mesa_loge("DMA_BUF_IOCTL_IMPORT_SYNC_FILE failed (err=%d)", errno);
|
||||
@@ -467,7 +468,8 @@ static off_t
|
||||
panthor_kmod_bo_get_mmap_offset(struct pan_kmod_bo *bo)
|
||||
{
|
||||
struct drm_panthor_bo_mmap_offset req = {.handle = bo->handle};
|
||||
int ret = drmIoctl(bo->dev->fd, DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET, &req);
|
||||
int ret = pan_kmod_ioctl(bo->dev->fd, DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET,
|
||||
&req);
|
||||
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET failed (err=%d)", errno);
|
||||
@@ -505,7 +507,7 @@ panthor_kmod_bo_wait(struct pan_kmod_bo *bo, int64_t timeout_ns,
|
||||
.flags = for_read_only_access ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW,
|
||||
};
|
||||
|
||||
ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &esync);
|
||||
ret = pan_kmod_ioctl(dmabuf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &esync);
|
||||
close(dmabuf_fd);
|
||||
|
||||
if (ret) {
|
||||
@@ -582,7 +584,7 @@ panthor_kmod_bo_attach_sync_point(struct pan_kmod_bo *bo, uint32_t sync_handle,
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &isync);
|
||||
ret = pan_kmod_ioctl(dmabuf_fd, DMA_BUF_IOCTL_IMPORT_SYNC_FILE, &isync);
|
||||
close(dmabuf_fd);
|
||||
close(isync.fd);
|
||||
if (ret) {
|
||||
@@ -647,7 +649,7 @@ panthor_kmod_bo_get_sync_point(struct pan_kmod_bo *bo, uint32_t *sync_handle,
|
||||
.flags = for_read_only_access ? DMA_BUF_SYNC_READ : DMA_BUF_SYNC_RW,
|
||||
};
|
||||
|
||||
ret = drmIoctl(dmabuf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &esync);
|
||||
ret = pan_kmod_ioctl(dmabuf_fd, DMA_BUF_IOCTL_EXPORT_SYNC_FILE, &esync);
|
||||
close(dmabuf_fd);
|
||||
if (ret) {
|
||||
mesa_loge("DMA_BUF_IOCTL_EXPORT_SYNC_FILE failed (err=%d)", errno);
|
||||
@@ -717,7 +719,7 @@ panthor_kmod_vm_create(struct pan_kmod_dev *dev, uint32_t flags,
|
||||
.user_va_range = user_va_start + user_va_range,
|
||||
};
|
||||
|
||||
if (drmIoctl(dev->fd, DRM_IOCTL_PANTHOR_VM_CREATE, &req)) {
|
||||
if (pan_kmod_ioctl(dev->fd, DRM_IOCTL_PANTHOR_VM_CREATE, &req)) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_VM_CREATE failed (err=%d)", errno);
|
||||
goto err_destroy_sync;
|
||||
}
|
||||
@@ -776,7 +778,7 @@ panthor_kmod_vm_destroy(struct pan_kmod_vm *vm)
|
||||
struct panthor_kmod_vm *panthor_vm =
|
||||
container_of(vm, struct panthor_kmod_vm, base);
|
||||
struct drm_panthor_vm_destroy req = {.id = vm->handle};
|
||||
int ret = drmIoctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_DESTROY, &req);
|
||||
int ret = pan_kmod_ioctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_DESTROY, &req);
|
||||
if (ret)
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_VM_DESTROY failed (err=%d)", errno);
|
||||
|
||||
@@ -1041,7 +1043,7 @@ panthor_kmod_vm_bind(struct pan_kmod_vm *vm, enum pan_kmod_vm_op_mode mode,
|
||||
}
|
||||
}
|
||||
|
||||
ret = drmIoctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_BIND, &req);
|
||||
ret = pan_kmod_ioctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_BIND, &req);
|
||||
if (ret)
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_VM_BIND failed (err=%d)", errno);
|
||||
|
||||
@@ -1097,7 +1099,8 @@ static enum pan_kmod_vm_state
|
||||
panthor_kmod_vm_query_state(struct pan_kmod_vm *vm)
|
||||
{
|
||||
struct drm_panthor_vm_get_state query = {.vm_id = vm->handle};
|
||||
int ret = drmIoctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_GET_STATE, &query);
|
||||
int ret = pan_kmod_ioctl(vm->dev->fd, DRM_IOCTL_PANTHOR_VM_GET_STATE,
|
||||
&query);
|
||||
|
||||
if (ret || query.state == DRM_PANTHOR_VM_STATE_UNUSABLE)
|
||||
return PAN_KMOD_VM_FAULTY;
|
||||
@@ -1178,7 +1181,7 @@ panthor_kmod_query_timestamp(const struct pan_kmod_dev *dev)
|
||||
.pointer = (uint64_t)(uintptr_t)×tamp_info,
|
||||
};
|
||||
|
||||
int ret = drmIoctl(dev->fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
int ret = pan_kmod_ioctl(dev->fd, DRM_IOCTL_PANTHOR_DEV_QUERY, &query);
|
||||
if (ret) {
|
||||
mesa_loge("DRM_IOCTL_PANTHOR_DEV_QUERY failed (err=%d)", errno);
|
||||
return 0;
|
||||
|
@@ -115,8 +115,8 @@ static int
|
||||
panfrost_perf_query(struct panfrost_perf *perf, uint32_t enable)
|
||||
{
|
||||
struct drm_panfrost_perfcnt_enable perfcnt_enable = {enable, 0};
|
||||
return drmIoctl(perf->dev->fd, DRM_IOCTL_PANFROST_PERFCNT_ENABLE,
|
||||
&perfcnt_enable);
|
||||
return pan_kmod_ioctl(perf->dev->fd, DRM_IOCTL_PANFROST_PERFCNT_ENABLE,
|
||||
&perfcnt_enable);
|
||||
}
|
||||
|
||||
int
|
||||
@@ -138,6 +138,6 @@ panfrost_perf_dump(struct panfrost_perf *perf)
|
||||
// counter_values
|
||||
struct drm_panfrost_perfcnt_dump perfcnt_dump = {
|
||||
(uint64_t)(uintptr_t)perf->counter_values};
|
||||
return drmIoctl(perf->dev->fd, DRM_IOCTL_PANFROST_PERFCNT_DUMP,
|
||||
&perfcnt_dump);
|
||||
return pan_kmod_ioctl(perf->dev->fd, DRM_IOCTL_PANFROST_PERFCNT_DUMP,
|
||||
&perfcnt_dump);
|
||||
}
|
||||
|
@@ -463,7 +463,8 @@ init_subqueue(struct panvk_queue *queue, enum panvk_subqueue_id subqueue)
|
||||
.queue_submits = DRM_PANTHOR_OBJ_ARRAY(1, &qsubmit),
|
||||
};
|
||||
|
||||
int ret = drmIoctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_SUBMIT, &gsubmit);
|
||||
int ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_SUBMIT,
|
||||
&gsubmit);
|
||||
if (ret)
|
||||
return panvk_errorf(dev->vk.physical, VK_ERROR_INITIALIZATION_FAILED,
|
||||
"Failed to initialized subqueue: %m");
|
||||
@@ -609,7 +610,7 @@ create_group(struct panvk_queue *queue,
|
||||
.vm_id = pan_kmod_vm_handle(dev->kmod.vm),
|
||||
};
|
||||
|
||||
int ret = drmIoctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_CREATE, &gc);
|
||||
int ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_CREATE, &gc);
|
||||
if (ret)
|
||||
return panvk_errorf(dev, VK_ERROR_INITIALIZATION_FAILED,
|
||||
"Failed to create a scheduling group");
|
||||
@@ -627,7 +628,7 @@ destroy_group(struct panvk_queue *queue)
|
||||
};
|
||||
|
||||
ASSERTED int ret =
|
||||
drmIoctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_DESTROY, &gd);
|
||||
pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_DESTROY, &gd);
|
||||
assert(!ret);
|
||||
}
|
||||
|
||||
@@ -663,8 +664,8 @@ init_tiler(struct panvk_queue *queue)
|
||||
.target_in_flight = 65535,
|
||||
};
|
||||
|
||||
int ret =
|
||||
drmIoctl(dev->drm_fd, DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE, &thc);
|
||||
int ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE,
|
||||
&thc);
|
||||
if (ret) {
|
||||
result = panvk_errorf(dev, VK_ERROR_INITIALIZATION_FAILED,
|
||||
"Failed to create a tiler heap context");
|
||||
@@ -698,7 +699,7 @@ cleanup_tiler(struct panvk_queue *queue)
|
||||
.handle = tiler_heap->context.handle,
|
||||
};
|
||||
ASSERTED int ret =
|
||||
drmIoctl(dev->drm_fd, DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY, &thd);
|
||||
pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY, &thd);
|
||||
assert(!ret);
|
||||
|
||||
panvk_pool_free_mem(&tiler_heap->desc);
|
||||
@@ -1056,7 +1057,7 @@ panvk_queue_submit_ioctl(struct panvk_queue_submit *submit)
|
||||
DRM_PANTHOR_OBJ_ARRAY(submit->qsubmit_count, submit->qsubmits),
|
||||
};
|
||||
|
||||
ret = drmIoctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_SUBMIT, &gsubmit);
|
||||
ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_SUBMIT, &gsubmit);
|
||||
if (ret)
|
||||
return vk_queue_set_lost(&queue->vk, "GROUP_SUBMIT: %m");
|
||||
|
||||
@@ -1298,8 +1299,8 @@ panvk_per_arch(queue_check_status)(struct panvk_queue *queue)
|
||||
.group_handle = queue->group_handle,
|
||||
};
|
||||
|
||||
int ret =
|
||||
drmIoctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_GET_STATE, &state);
|
||||
int ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANTHOR_GROUP_GET_STATE,
|
||||
&state);
|
||||
if (!ret && !state.state)
|
||||
return VK_SUCCESS;
|
||||
|
||||
|
@@ -25,7 +25,8 @@ panvk_per_arch(CreateEvent)(VkDevice _device,
|
||||
.flags = 0,
|
||||
};
|
||||
|
||||
int ret = drmIoctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
|
||||
int ret = pan_kmod_ioctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_CREATE,
|
||||
&create);
|
||||
if (ret)
|
||||
return panvk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
@@ -46,7 +47,7 @@ panvk_per_arch(DestroyEvent)(VkDevice _device, VkEvent _event,
|
||||
return;
|
||||
|
||||
struct drm_syncobj_destroy destroy = {.handle = event->syncobj};
|
||||
drmIoctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
|
||||
pan_kmod_ioctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
|
||||
|
||||
vk_object_free(&device->vk, pAllocator, event);
|
||||
}
|
||||
@@ -65,7 +66,7 @@ panvk_per_arch(GetEventStatus)(VkDevice _device, VkEvent _event)
|
||||
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
|
||||
};
|
||||
|
||||
int ret = drmIoctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
|
||||
int ret = pan_kmod_ioctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
|
||||
if (ret) {
|
||||
if (errno == ETIME)
|
||||
signaled = false;
|
||||
@@ -95,7 +96,7 @@ panvk_per_arch(SetEvent)(VkDevice _device, VkEvent _event)
|
||||
* command executes.
|
||||
* https://docs.vulkan.org/spec/latest/chapters/cmdbuffers.html#commandbuffers-submission-progress
|
||||
*/
|
||||
if (drmIoctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &objs))
|
||||
if (pan_kmod_ioctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_SIGNAL, &objs))
|
||||
return VK_ERROR_DEVICE_LOST;
|
||||
|
||||
return VK_SUCCESS;
|
||||
@@ -111,7 +112,7 @@ panvk_per_arch(ResetEvent)(VkDevice _device, VkEvent _event)
|
||||
.handles = (uint64_t)(uintptr_t)&event->syncobj,
|
||||
.count_handles = 1};
|
||||
|
||||
if (drmIoctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_RESET, &objs))
|
||||
if (pan_kmod_ioctl(device->drm_fd, DRM_IOCTL_SYNCOBJ_RESET, &objs))
|
||||
return VK_ERROR_DEVICE_LOST;
|
||||
|
||||
return VK_SUCCESS;
|
||||
|
@@ -69,7 +69,7 @@ panvk_queue_submit_batch(struct panvk_queue *queue, struct panvk_batch *batch,
|
||||
.jc = batch->vtc_jc.first_job,
|
||||
};
|
||||
|
||||
ret = drmIoctl(dev->drm_fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
|
||||
ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
|
||||
assert(!ret);
|
||||
|
||||
if (debug & (PANVK_DEBUG_TRACE | PANVK_DEBUG_SYNC)) {
|
||||
@@ -108,7 +108,7 @@ panvk_queue_submit_batch(struct panvk_queue *queue, struct panvk_batch *batch,
|
||||
submit.in_sync_count = nr_in_fences;
|
||||
}
|
||||
|
||||
ret = drmIoctl(dev->drm_fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
|
||||
ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
|
||||
assert(!ret);
|
||||
if (debug & (PANVK_DEBUG_TRACE | PANVK_DEBUG_SYNC)) {
|
||||
ret = drmSyncobjWait(dev->drm_fd, &submit.out_sync, 1, INT64_MAX, 0,
|
||||
@@ -146,12 +146,12 @@ panvk_queue_transfer_sync(struct panvk_queue *queue, uint32_t syncobj)
|
||||
.fd = -1,
|
||||
};
|
||||
|
||||
ret = drmIoctl(dev->drm_fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
|
||||
ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &handle);
|
||||
assert(!ret);
|
||||
assert(handle.fd >= 0);
|
||||
|
||||
handle.handle = syncobj;
|
||||
ret = drmIoctl(dev->drm_fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
|
||||
ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &handle);
|
||||
assert(!ret);
|
||||
|
||||
close(handle.fd);
|
||||
@@ -197,7 +197,8 @@ panvk_signal_event_syncobjs(struct panvk_queue *queue,
|
||||
.handles = (uint64_t)(uintptr_t)&event->syncobj,
|
||||
.count_handles = 1};
|
||||
|
||||
int ret = drmIoctl(dev->drm_fd, DRM_IOCTL_SYNCOBJ_RESET, &objs);
|
||||
int ret = pan_kmod_ioctl(dev->drm_fd, DRM_IOCTL_SYNCOBJ_RESET,
|
||||
&objs);
|
||||
assert(!ret);
|
||||
break;
|
||||
}
|
||||
|
Reference in New Issue
Block a user