nvk: Use nvkmd_mem in nvk_upload_queue

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30033>
This commit is contained in:
Faith Ekstrand
2024-06-28 19:54:11 -05:00
committed by Marge Bot
parent bf180d2bbf
commit a87ee75737
2 changed files with 70 additions and 70 deletions

View File

@@ -6,6 +6,7 @@
#include "nvk_upload_queue.h" #include "nvk_upload_queue.h"
#include "nvk_device.h" #include "nvk_device.h"
#include "nvkmd/nvkmd.h"
#include "vk_alloc.h" #include "vk_alloc.h"
#include <xf86drm.h> #include <xf86drm.h>
@@ -15,13 +16,12 @@
#include "nv_push.h" #include "nv_push.h"
#include "nv_push_cl90b5.h" #include "nv_push_cl90b5.h"
#define NVK_UPLOAD_BO_SIZE 64*1024 #define NVK_UPLOAD_MEM_SIZE 64*1024
struct nvk_upload_bo { struct nvk_upload_mem {
struct nouveau_ws_bo *bo; struct nvkmd_mem *mem;
void *map;
/** Link in nvk_upload_queue::bos */ /** Link in nvk_upload_queue::recycle */
struct list_head link; struct list_head link;
/** Time point at which point this BO will be idle */ /** Time point at which point this BO will be idle */
@@ -29,37 +29,37 @@ struct nvk_upload_bo {
}; };
static VkResult static VkResult
nvk_upload_bo_create(struct nvk_device *dev, nvk_upload_mem_create(struct nvk_device *dev,
struct nvk_upload_bo **bo_out) struct nvk_upload_mem **mem_out)
{ {
struct nvk_upload_bo *bo; struct nvk_upload_mem *mem;
VkResult result;
bo = vk_zalloc(&dev->vk.alloc, sizeof(*bo), 8, mem = vk_zalloc(&dev->vk.alloc, sizeof(*mem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (bo == NULL) if (mem == NULL)
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY); return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t flags = NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP | uint32_t flags = NVKMD_MEM_GART | NVKMD_MEM_CAN_MAP | NVKMD_MEM_NO_SHARE;
NOUVEAU_WS_BO_NO_SHARE; result = nvkmd_dev_alloc_mapped_mem(dev->nvkmd, &dev->vk.base,
bo->bo = nouveau_ws_bo_new_mapped(dev->ws_dev, NVK_UPLOAD_BO_SIZE, 0, NVK_UPLOAD_MEM_SIZE, 0,
flags, NOUVEAU_WS_BO_WR, &bo->map); flags, NVKMD_MEM_MAP_WR, &mem->mem);
if (bo->bo == NULL) { if (result != VK_SUCCESS) {
vk_free(&dev->vk.alloc, bo); vk_free(&dev->vk.alloc, mem);
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY); return result;
} }
*bo_out = bo; *mem_out = mem;
return VK_SUCCESS; return VK_SUCCESS;
} }
static void static void
nvk_upload_bo_destroy(struct nvk_device *dev, nvk_upload_mem_destroy(struct nvk_device *dev,
struct nvk_upload_bo *bo) struct nvk_upload_mem *mem)
{ {
nouveau_ws_bo_unmap(bo->bo, bo->map); nvkmd_mem_unref(mem->mem);
nouveau_ws_bo_destroy(bo->bo); vk_free(&dev->vk.alloc, mem);
vk_free(&dev->vk.alloc, bo);
} }
VkResult VkResult
@@ -88,7 +88,7 @@ nvk_upload_queue_init(struct nvk_device *dev,
goto fail_context; goto fail_context;
} }
list_inithead(&queue->bos); list_inithead(&queue->recycle);
return VK_SUCCESS; return VK_SUCCESS;
@@ -104,11 +104,11 @@ void
nvk_upload_queue_finish(struct nvk_device *dev, nvk_upload_queue_finish(struct nvk_device *dev,
struct nvk_upload_queue *queue) struct nvk_upload_queue *queue)
{ {
list_for_each_entry_safe(struct nvk_upload_bo, bo, &queue->bos, link) list_for_each_entry_safe(struct nvk_upload_mem, mem, &queue->recycle, link)
nvk_upload_bo_destroy(dev, bo); nvk_upload_mem_destroy(dev, mem);
if (queue->bo != NULL) if (queue->mem != NULL)
nvk_upload_bo_destroy(dev, queue->bo); nvk_upload_mem_destroy(dev, queue->mem);
drmSyncobjDestroy(dev->ws_dev->fd, queue->drm.syncobj); drmSyncobjDestroy(dev->ws_dev->fd, queue->drm.syncobj);
nouveau_ws_context_destroy(queue->drm.ws_ctx); nouveau_ws_context_destroy(queue->drm.ws_ctx);
@@ -120,7 +120,7 @@ nvk_upload_queue_flush_locked(struct nvk_device *dev,
struct nvk_upload_queue *queue, struct nvk_upload_queue *queue,
uint64_t *time_point_out) uint64_t *time_point_out)
{ {
if (queue->bo == NULL || queue->bo_push_start == queue->bo_push_end) { if (queue->mem == NULL || queue->mem_push_start == queue->mem_push_end) {
if (time_point_out != NULL) if (time_point_out != NULL)
*time_point_out = queue->last_time_point; *time_point_out = queue->last_time_point;
return VK_SUCCESS; return VK_SUCCESS;
@@ -131,8 +131,8 @@ nvk_upload_queue_flush_locked(struct nvk_device *dev,
abort(); abort();
struct drm_nouveau_exec_push push = { struct drm_nouveau_exec_push push = {
.va = queue->bo->bo->offset + queue->bo_push_start, .va = queue->mem->mem->va->addr + queue->mem_push_start,
.va_len = queue->bo_push_end - queue->bo_push_start, .va_len = queue->mem_push_end - queue->mem_push_start,
}; };
struct drm_nouveau_sync sig = { struct drm_nouveau_sync sig = {
@@ -160,8 +160,8 @@ nvk_upload_queue_flush_locked(struct nvk_device *dev,
*/ */
queue->last_time_point = time_point; queue->last_time_point = time_point;
queue->bo->idle_time_point = time_point; queue->mem->idle_time_point = time_point;
queue->bo_push_start = queue->bo_push_end; queue->mem_push_start = queue->mem_push_end;
if (time_point_out != NULL) if (time_point_out != NULL)
*time_point_out = time_point; *time_point_out = time_point;
@@ -222,15 +222,15 @@ nvk_upload_queue_sync(struct nvk_device *dev,
static VkResult static VkResult
nvk_upload_queue_reserve(struct nvk_device *dev, nvk_upload_queue_reserve(struct nvk_device *dev,
struct nvk_upload_queue *queue, struct nvk_upload_queue *queue,
uint32_t min_bo_size) uint32_t min_mem_size)
{ {
VkResult result; VkResult result;
assert(min_bo_size <= NVK_UPLOAD_BO_SIZE); assert(min_mem_size <= NVK_UPLOAD_MEM_SIZE);
assert(queue->bo_push_end <= queue->bo_data_start); assert(queue->mem_push_end <= queue->mem_data_start);
if (queue->bo != NULL) { if (queue->mem != NULL) {
if (queue->bo_data_start - queue->bo_push_end >= min_bo_size) if (queue->mem_data_start - queue->mem_push_end >= min_mem_size)
return VK_SUCCESS; return VK_SUCCESS;
/* Not enough room in the BO. Flush and add it to the recycle list */ /* Not enough room in the BO. Flush and add it to the recycle list */
@@ -238,17 +238,17 @@ nvk_upload_queue_reserve(struct nvk_device *dev,
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
assert(queue->bo_push_start == queue->bo_push_end); assert(queue->mem_push_start == queue->mem_push_end);
list_addtail(&queue->bo->link, &queue->bos); list_addtail(&queue->mem->link, &queue->recycle);
queue->bo = NULL; queue->mem = NULL;
} }
assert(queue->bo == NULL); assert(queue->mem == NULL);
queue->bo_push_start = queue->bo_push_end = 0; queue->mem_push_start = queue->mem_push_end = 0;
queue->bo_data_start = NVK_UPLOAD_BO_SIZE; queue->mem_data_start = NVK_UPLOAD_MEM_SIZE;
/* Try to pop an idle BO off the recycle list */ /* Try to pop an idle BO off the recycle list */
if (!list_is_empty(&queue->bos)) { if (!list_is_empty(&queue->recycle)) {
uint64_t time_point_passed = 0; uint64_t time_point_passed = 0;
int err = drmSyncobjQuery(dev->ws_dev->fd, &queue->drm.syncobj, int err = drmSyncobjQuery(dev->ws_dev->fd, &queue->drm.syncobj,
&time_point_passed, 1); &time_point_passed, 1);
@@ -257,16 +257,16 @@ nvk_upload_queue_reserve(struct nvk_device *dev,
"DRM_IOCTL_SYNCOBJ_QUERY failed: %m"); "DRM_IOCTL_SYNCOBJ_QUERY failed: %m");
} }
struct nvk_upload_bo *bo = struct nvk_upload_mem *mem =
list_first_entry(&queue->bos, struct nvk_upload_bo, link); list_first_entry(&queue->recycle, struct nvk_upload_mem, link);
if (time_point_passed >= bo->idle_time_point) { if (time_point_passed >= mem->idle_time_point) {
list_del(&bo->link); list_del(&mem->link);
queue->bo = bo; queue->mem = mem;
return VK_SUCCESS; return VK_SUCCESS;
} }
} }
return nvk_upload_bo_create(dev, &queue->bo); return nvk_upload_mem_create(dev, &queue->mem);
} }
static VkResult static VkResult
@@ -292,21 +292,21 @@ nvk_upload_queue_upload_locked(struct nvk_device *dev,
if (result != VK_SUCCESS) if (result != VK_SUCCESS)
return result; return result;
assert(queue->bo != NULL); assert(queue->mem != NULL);
assert(queue->bo_data_start > queue->bo_push_end); assert(queue->mem_data_start > queue->mem_push_end);
const uint32_t avail = queue->bo_data_start - queue->bo_push_end; const uint32_t avail = queue->mem_data_start - queue->mem_push_end;
assert(avail >= min_size); assert(avail >= min_size);
const uint32_t data_size = MIN2(size, avail - cmd_size); const uint32_t data_size = MIN2(size, avail - cmd_size);
const uint32_t data_bo_offset = queue->bo_data_start - data_size; const uint32_t data_mem_offset = queue->mem_data_start - data_size;
assert(queue->bo_push_end + cmd_size <= data_bo_offset); assert(queue->mem_push_end + cmd_size <= data_mem_offset);
const uint64_t data_addr = queue->bo->bo->offset + data_bo_offset; const uint64_t data_addr = queue->mem->mem->va->addr + data_mem_offset;
memcpy(queue->bo->map + data_bo_offset, src, data_size); memcpy(queue->mem->mem->map + data_mem_offset, src, data_size);
queue->bo_data_start = data_bo_offset; queue->mem_data_start = data_mem_offset;
struct nv_push p; struct nv_push p;
nv_push_init(&p, queue->bo->map + queue->bo_push_end, cmd_size_dw); nv_push_init(&p, queue->mem->mem->map + queue->mem_push_end, cmd_size_dw);
assert(data_size <= (1 << 17)); assert(data_size <= (1 << 17));
@@ -329,7 +329,7 @@ nvk_upload_queue_upload_locked(struct nvk_device *dev,
}); });
assert(nv_push_dw_count(&p) <= cmd_size_dw); assert(nv_push_dw_count(&p) <= cmd_size_dw);
queue->bo_push_end += nv_push_dw_count(&p) * 4; queue->mem_push_end += nv_push_dw_count(&p) * 4;
dst_addr += data_size; dst_addr += data_size;
src += data_size; src += data_size;
@@ -384,7 +384,7 @@ nvk_upload_queue_fill_locked(struct nvk_device *dev,
assert(width_B * height <= size); assert(width_B * height <= size);
struct nv_push p; struct nv_push p;
nv_push_init(&p, queue->bo->map + queue->bo_push_end, cmd_size_dw); nv_push_init(&p, queue->mem->mem->map + queue->mem_push_end, cmd_size_dw);
P_MTHD(&p, NV90B5, OFFSET_OUT_UPPER); P_MTHD(&p, NV90B5, OFFSET_OUT_UPPER);
P_NV90B5_OFFSET_OUT_UPPER(&p, dst_addr >> 32); P_NV90B5_OFFSET_OUT_UPPER(&p, dst_addr >> 32);
@@ -415,7 +415,7 @@ nvk_upload_queue_fill_locked(struct nvk_device *dev,
}); });
assert(nv_push_dw_count(&p) <= cmd_size_dw); assert(nv_push_dw_count(&p) <= cmd_size_dw);
queue->bo_push_end += nv_push_dw_count(&p) * 4; queue->mem_push_end += nv_push_dw_count(&p) * 4;
dst_addr += width_B * height; dst_addr += width_B * height;
size -= width_B * height; size -= width_B * height;

View File

@@ -12,7 +12,7 @@
struct nouveau_ws_context; struct nouveau_ws_context;
struct nvk_device; struct nvk_device;
struct nvk_upload_bo; struct nvk_upload_mem;
struct nvk_upload_queue { struct nvk_upload_queue {
simple_mtx_t mutex; simple_mtx_t mutex;
@@ -24,17 +24,17 @@ struct nvk_upload_queue {
uint64_t last_time_point; uint64_t last_time_point;
struct nvk_upload_bo *bo; struct nvk_upload_mem *mem;
/* We grow the buffer from both ends. Pushbuf data goes at the start of /* We grow the buffer from both ends. Pushbuf data goes at the start of
* the buffer and upload data at the tail. * the buffer and upload data at the tail.
*/ */
uint32_t bo_push_start; uint32_t mem_push_start;
uint32_t bo_push_end; uint32_t mem_push_end;
uint32_t bo_data_start; uint32_t mem_data_start;
/* BO recycle pool */ /* list of nvk_upload_mem */
struct list_head bos; struct list_head recycle;
}; };
VkResult nvk_upload_queue_init(struct nvk_device *dev, VkResult nvk_upload_queue_init(struct nvk_device *dev,