panvk: Keep tiler_heap and sample_positions BOs at the panvk_device level
We wrap panfrost_device's BOs with a panvk_priv_bo object and store the result in panvk_device. This way we'll be able to transition to explicit BO allocation without changing BO users code. Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Constantine Shablya <constantine.shablya@collabora.com> Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26698>
This commit is contained in:

committed by
Marge Bot

parent
c05104c71f
commit
ab6a61829c
@@ -899,6 +899,25 @@ struct panvk_priv_bo *panvk_priv_bo_create(struct panvk_device *dev,
|
|||||||
return priv_bo;
|
return priv_bo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct panvk_priv_bo *
|
||||||
|
panvk_priv_bo_from_pan_bo(struct panvk_device *dev, struct panfrost_bo *bo,
|
||||||
|
const struct VkAllocationCallbacks *alloc,
|
||||||
|
VkSystemAllocationScope scope)
|
||||||
|
{
|
||||||
|
struct panvk_priv_bo *priv_bo =
|
||||||
|
vk_zalloc2(&dev->vk.alloc, alloc, sizeof(*priv_bo), 8, scope);
|
||||||
|
|
||||||
|
if (!priv_bo)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
panfrost_bo_reference(bo);
|
||||||
|
priv_bo->bo = bo->kmod_bo;
|
||||||
|
priv_bo->dev = dev;
|
||||||
|
priv_bo->addr.host = bo->ptr.cpu;
|
||||||
|
priv_bo->addr.dev = bo->ptr.gpu;
|
||||||
|
return priv_bo;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
panvk_priv_bo_destroy(struct panvk_priv_bo *priv_bo,
|
panvk_priv_bo_destroy(struct panvk_priv_bo *priv_bo,
|
||||||
const VkAllocationCallbacks *alloc)
|
const VkAllocationCallbacks *alloc)
|
||||||
@@ -993,6 +1012,12 @@ panvk_CreateDevice(VkPhysicalDevice physicalDevice,
|
|||||||
&device->pdev);
|
&device->pdev);
|
||||||
device->kmod.dev = device->pdev.kmod.dev;
|
device->kmod.dev = device->pdev.kmod.dev;
|
||||||
device->kmod.vm = device->pdev.kmod.vm;
|
device->kmod.vm = device->pdev.kmod.vm;
|
||||||
|
device->tiler_heap = panvk_priv_bo_from_pan_bo(
|
||||||
|
device, device->pdev.tiler_heap, &device->vk.alloc,
|
||||||
|
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
||||||
|
device->sample_positions = panvk_priv_bo_from_pan_bo(
|
||||||
|
device, device->pdev.sample_positions, &device->vk.alloc,
|
||||||
|
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
|
||||||
vk_device_set_drm_fd(&device->vk, device->pdev.kmod.dev->fd);
|
vk_device_set_drm_fd(&device->vk, device->pdev.kmod.dev->fd);
|
||||||
|
|
||||||
panvk_arch_dispatch(arch, meta_init, device);
|
panvk_arch_dispatch(arch, meta_init, device);
|
||||||
@@ -1038,6 +1063,8 @@ fail:
|
|||||||
|
|
||||||
panvk_arch_dispatch(pan_arch(physical_device->kmod.props.gpu_prod_id),
|
panvk_arch_dispatch(pan_arch(physical_device->kmod.props.gpu_prod_id),
|
||||||
meta_cleanup, device);
|
meta_cleanup, device);
|
||||||
|
panvk_priv_bo_destroy(device->tiler_heap, &device->vk.alloc);
|
||||||
|
panvk_priv_bo_destroy(device->sample_positions, &device->vk.alloc);
|
||||||
panfrost_close_device(&device->pdev);
|
panfrost_close_device(&device->pdev);
|
||||||
|
|
||||||
vk_free(&device->vk.alloc, device);
|
vk_free(&device->vk.alloc, device);
|
||||||
@@ -1062,6 +1089,8 @@ panvk_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
|
|||||||
|
|
||||||
panvk_arch_dispatch(pan_arch(physical_device->kmod.props.gpu_prod_id),
|
panvk_arch_dispatch(pan_arch(physical_device->kmod.props.gpu_prod_id),
|
||||||
meta_cleanup, device);
|
meta_cleanup, device);
|
||||||
|
panvk_priv_bo_destroy(device->tiler_heap, &device->vk.alloc);
|
||||||
|
panvk_priv_bo_destroy(device->sample_positions, &device->vk.alloc);
|
||||||
panfrost_close_device(&device->pdev);
|
panfrost_close_device(&device->pdev);
|
||||||
vk_free(&device->vk.alloc, device);
|
vk_free(&device->vk.alloc, device);
|
||||||
}
|
}
|
||||||
|
@@ -279,6 +279,9 @@ struct panvk_device {
|
|||||||
struct pan_kmod_dev *dev;
|
struct pan_kmod_dev *dev;
|
||||||
} kmod;
|
} kmod;
|
||||||
|
|
||||||
|
struct panvk_priv_bo *tiler_heap;
|
||||||
|
struct panvk_priv_bo *sample_positions;
|
||||||
|
|
||||||
struct panfrost_device pdev;
|
struct panfrost_device pdev;
|
||||||
|
|
||||||
struct panvk_meta meta;
|
struct panvk_meta meta;
|
||||||
|
@@ -142,9 +142,7 @@ panvk_per_arch(cmd_close_batch)(struct panvk_cmd_buffer *cmdbuf)
|
|||||||
GENX(pan_emit_tls)(&batch->tlsinfo, batch->tls.cpu);
|
GENX(pan_emit_tls)(&batch->tlsinfo, batch->tls.cpu);
|
||||||
|
|
||||||
if (batch->fb.desc.cpu) {
|
if (batch->fb.desc.cpu) {
|
||||||
struct panfrost_device *pdev = &cmdbuf->device->pdev;
|
fbinfo->sample_positions = cmdbuf->device->sample_positions->addr.dev +
|
||||||
|
|
||||||
fbinfo->sample_positions = pdev->sample_positions->ptr.gpu +
|
|
||||||
panfrost_sample_positions_offset(
|
panfrost_sample_positions_offset(
|
||||||
pan_sample_pattern(fbinfo->nr_samples));
|
pan_sample_pattern(fbinfo->nr_samples));
|
||||||
|
|
||||||
|
@@ -827,13 +827,11 @@ panvk_per_arch(emit_tiler_context)(const struct panvk_device *dev,
|
|||||||
unsigned width, unsigned height,
|
unsigned width, unsigned height,
|
||||||
const struct panfrost_ptr *descs)
|
const struct panfrost_ptr *descs)
|
||||||
{
|
{
|
||||||
const struct panfrost_device *pdev = &dev->pdev;
|
|
||||||
|
|
||||||
pan_pack(descs->cpu + pan_size(TILER_CONTEXT), TILER_HEAP, cfg) {
|
pan_pack(descs->cpu + pan_size(TILER_CONTEXT), TILER_HEAP, cfg) {
|
||||||
cfg.size = panfrost_bo_size(pdev->tiler_heap);
|
cfg.size = pan_kmod_bo_size(dev->tiler_heap->bo);
|
||||||
cfg.base = pdev->tiler_heap->ptr.gpu;
|
cfg.base = dev->tiler_heap->addr.dev;
|
||||||
cfg.bottom = pdev->tiler_heap->ptr.gpu;
|
cfg.bottom = dev->tiler_heap->addr.dev;
|
||||||
cfg.top = pdev->tiler_heap->ptr.gpu + panfrost_bo_size(pdev->tiler_heap);
|
cfg.top = cfg.base + cfg.size;
|
||||||
}
|
}
|
||||||
|
|
||||||
pan_pack(descs->cpu, TILER_CONTEXT, cfg) {
|
pan_pack(descs->cpu, TILER_CONTEXT, cfg) {
|
||||||
|
@@ -204,7 +204,7 @@ panvk_per_arch(queue_submit)(struct vk_queue *vk_queue,
|
|||||||
struct vk_queue_submit *submit)
|
struct vk_queue_submit *submit)
|
||||||
{
|
{
|
||||||
struct panvk_queue *queue = container_of(vk_queue, struct panvk_queue, vk);
|
struct panvk_queue *queue = container_of(vk_queue, struct panvk_queue, vk);
|
||||||
const struct panfrost_device *pdev = &queue->device->pdev;
|
struct panvk_device *dev = queue->device;
|
||||||
|
|
||||||
unsigned nr_semaphores = submit->wait_count + 1;
|
unsigned nr_semaphores = submit->wait_count + 1;
|
||||||
uint32_t semaphores[nr_semaphores];
|
uint32_t semaphores[nr_semaphores];
|
||||||
@@ -261,9 +261,9 @@ panvk_per_arch(queue_submit)(struct vk_queue *vk_queue,
|
|||||||
bos[bo_idx++] = pan_kmod_bo_handle(batch->blit.dst);
|
bos[bo_idx++] = pan_kmod_bo_handle(batch->blit.dst);
|
||||||
|
|
||||||
if (batch->jc.first_tiler)
|
if (batch->jc.first_tiler)
|
||||||
bos[bo_idx++] = panfrost_bo_handle(pdev->tiler_heap);
|
bos[bo_idx++] = pan_kmod_bo_handle(dev->tiler_heap->bo);
|
||||||
|
|
||||||
bos[bo_idx++] = panfrost_bo_handle(pdev->sample_positions);
|
bos[bo_idx++] = pan_kmod_bo_handle(dev->sample_positions->bo);
|
||||||
assert(bo_idx == nr_bos);
|
assert(bo_idx == nr_bos);
|
||||||
|
|
||||||
/* Merge identical BO entries. */
|
/* Merge identical BO entries. */
|
||||||
|
Reference in New Issue
Block a user