panvk: Move panfrost_device and panvk_meta to panvk_device
Each logical device should come with its own GPU VM space, and given the GPU VM space is attached the FD in panfrost, we need to move the panfrost_device to panvk_device. As a result, we also need to move the meta stuff there, because the meta logic is allocating GPU buffers. Note that we instantiate a new kmod_dev at the physical device level so we can query device properties. This device shouldn't be used for any buffer allocation or GPU submission though. Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Constantine Shablya <constantine.shablya@collabora.com> Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26698>
This commit is contained in:

committed by
Marge Bot

parent
ea4dc54d72
commit
acdcf5c0a1
@@ -297,9 +297,7 @@ panvk_physical_device_finish(struct panvk_physical_device *device)
|
||||
{
|
||||
panvk_wsi_finish(device);
|
||||
|
||||
panvk_arch_dispatch(pan_arch(device->kmod.props.gpu_prod_id), meta_cleanup,
|
||||
device);
|
||||
panfrost_close_device(&device->pdev);
|
||||
pan_kmod_dev_destroy(device->kmod.dev);
|
||||
if (device->master_fd != -1)
|
||||
close(device->master_fd);
|
||||
|
||||
@@ -313,6 +311,25 @@ panvk_destroy_physical_device(struct vk_physical_device *device)
|
||||
vk_free(&device->instance->alloc, device);
|
||||
}
|
||||
|
||||
static void *
|
||||
panvk_kmod_zalloc(const struct pan_kmod_allocator *allocator,
|
||||
size_t size, bool transient)
|
||||
{
|
||||
const VkAllocationCallbacks *vkalloc = allocator->priv;
|
||||
|
||||
return vk_zalloc(vkalloc, size, 8,
|
||||
transient ? VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
|
||||
: VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
}
|
||||
|
||||
static void
|
||||
panvk_kmod_free(const struct pan_kmod_allocator *allocator, void *data)
|
||||
{
|
||||
const VkAllocationCallbacks *vkalloc = allocator->priv;
|
||||
|
||||
return vk_free(vkalloc, data);
|
||||
}
|
||||
|
||||
VkResult
|
||||
panvk_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
@@ -342,6 +359,12 @@ panvk_CreateInstance(const VkInstanceCreateInfo *pCreateInfo,
|
||||
return vk_error(NULL, result);
|
||||
}
|
||||
|
||||
instance->kmod.allocator = (struct pan_kmod_allocator){
|
||||
.zalloc = panvk_kmod_zalloc,
|
||||
.free = panvk_kmod_free,
|
||||
.priv = &instance->vk.alloc,
|
||||
};
|
||||
|
||||
instance->vk.physical_devices.try_create_for_drm =
|
||||
panvk_physical_device_try_create;
|
||||
instance->vk.physical_devices.destroy = panvk_destroy_physical_device;
|
||||
@@ -448,14 +471,10 @@ panvk_physical_device_init(struct panvk_physical_device *device,
|
||||
}
|
||||
|
||||
device->master_fd = master_fd;
|
||||
if (instance->debug_flags & PANVK_DEBUG_TRACE)
|
||||
device->pdev.debug |= PAN_DBG_TRACE;
|
||||
|
||||
device->pdev.debug |= PAN_DBG_NO_CACHE;
|
||||
panfrost_open_device(NULL, fd, &device->pdev);
|
||||
fd = -1;
|
||||
|
||||
pan_kmod_dev_query_props(device->pdev.kmod.dev, &device->kmod.props);
|
||||
device->kmod.dev = pan_kmod_dev_create(fd, PAN_KMOD_DEV_FLAG_OWNS_FD,
|
||||
&instance->kmod.allocator);
|
||||
pan_kmod_dev_query_props(device->kmod.dev, &device->kmod.props);
|
||||
|
||||
unsigned arch = pan_arch(device->kmod.props.gpu_prod_id);
|
||||
|
||||
@@ -469,8 +488,6 @@ panvk_physical_device_init(struct panvk_physical_device *device,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
panvk_arch_dispatch(arch, meta_init, device);
|
||||
|
||||
memset(device->name, 0, sizeof(device->name));
|
||||
sprintf(device->name, "%s", device->model->name);
|
||||
|
||||
@@ -487,7 +504,7 @@ panvk_physical_device_init(struct panvk_physical_device *device,
|
||||
panvk_get_device_uuid(&device->device_uuid);
|
||||
|
||||
device->drm_syncobj_type =
|
||||
vk_drm_syncobj_get_type(panfrost_device_fd(&device->pdev));
|
||||
vk_drm_syncobj_get_type(device->kmod.dev->fd);
|
||||
/* We don't support timelines in the uAPI yet and we don't want it getting
|
||||
* suddenly turned on by vk_drm_syncobj_get_type() without us adding panvk
|
||||
* code for it first.
|
||||
@@ -507,7 +524,7 @@ panvk_physical_device_init(struct panvk_physical_device *device,
|
||||
return VK_SUCCESS;
|
||||
|
||||
fail_close_device:
|
||||
panfrost_close_device(&device->pdev);
|
||||
pan_kmod_dev_destroy(device->kmod.dev);
|
||||
fail:
|
||||
if (fd != -1)
|
||||
close(fd);
|
||||
@@ -849,6 +866,7 @@ panvk_CreateDevice(VkPhysicalDevice physicalDevice,
|
||||
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice)
|
||||
{
|
||||
VK_FROM_HANDLE(panvk_physical_device, physical_device, physicalDevice);
|
||||
struct panvk_instance *instance = physical_device->instance;
|
||||
VkResult result;
|
||||
struct panvk_device *device;
|
||||
|
||||
@@ -913,8 +931,15 @@ panvk_CreateDevice(VkPhysicalDevice physicalDevice,
|
||||
device->instance = physical_device->instance;
|
||||
device->physical_device = physical_device;
|
||||
|
||||
const struct panfrost_device *pdev = &physical_device->pdev;
|
||||
vk_device_set_drm_fd(&device->vk, panfrost_device_fd(pdev));
|
||||
device->pdev.debug |= PAN_DBG_NO_CACHE;
|
||||
if (instance->debug_flags & PANVK_DEBUG_TRACE)
|
||||
device->pdev.debug |= PAN_DBG_TRACE;
|
||||
|
||||
panfrost_open_device(NULL, dup(physical_device->kmod.dev->fd),
|
||||
&device->pdev);
|
||||
vk_device_set_drm_fd(&device->vk, device->pdev.kmod.dev->fd);
|
||||
|
||||
panvk_arch_dispatch(arch, meta_init, device);
|
||||
|
||||
for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
|
||||
const VkDeviceQueueCreateInfo *queue_create =
|
||||
@@ -953,6 +978,10 @@ fail:
|
||||
vk_object_free(&device->vk, NULL, device->queues[i]);
|
||||
}
|
||||
|
||||
panvk_arch_dispatch(pan_arch(physical_device->kmod.props.gpu_prod_id),
|
||||
meta_cleanup, device);
|
||||
panfrost_close_device(&device->pdev);
|
||||
|
||||
vk_free(&device->vk.alloc, device);
|
||||
return result;
|
||||
}
|
||||
@@ -961,6 +990,7 @@ void
|
||||
panvk_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
|
||||
{
|
||||
VK_FROM_HANDLE(panvk_device, device, _device);
|
||||
struct panvk_physical_device *physical_device = device->physical_device;
|
||||
|
||||
if (!device)
|
||||
return;
|
||||
@@ -972,6 +1002,9 @@ panvk_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
|
||||
vk_object_free(&device->vk, NULL, device->queues[i]);
|
||||
}
|
||||
|
||||
panvk_arch_dispatch(pan_arch(physical_device->kmod.props.gpu_prod_id),
|
||||
meta_cleanup, device);
|
||||
panfrost_close_device(&device->pdev);
|
||||
vk_free(&device->vk.alloc, device);
|
||||
}
|
||||
|
||||
@@ -1086,13 +1119,13 @@ panvk_AllocateMemory(VkDevice _device,
|
||||
* reference counting. We need to maintain a per-instance handle-to-bo
|
||||
* table and add reference count to panvk_bo.
|
||||
*/
|
||||
mem->bo = panfrost_bo_import(&device->physical_device->pdev, fd_info->fd);
|
||||
mem->bo = panfrost_bo_import(&device->pdev, fd_info->fd);
|
||||
/* take ownership and close the fd */
|
||||
close(fd_info->fd);
|
||||
} else {
|
||||
mem->bo = panfrost_bo_create(
|
||||
&device->physical_device->pdev, pAllocateInfo->allocationSize,
|
||||
can_be_exported ? PAN_BO_SHAREABLE : 0, "User-requested memory");
|
||||
mem->bo = panfrost_bo_create(&device->pdev, pAllocateInfo->allocationSize,
|
||||
can_be_exported ? PAN_BO_SHAREABLE : 0,
|
||||
"User-requested memory");
|
||||
}
|
||||
|
||||
assert(mem->bo);
|
||||
|
@@ -75,6 +75,8 @@
|
||||
#include "panvk_varyings.h"
|
||||
#include "vk_extensions.h"
|
||||
|
||||
#include "kmod/pan_kmod.h"
|
||||
|
||||
/* Pre-declarations needed for WSI entrypoints */
|
||||
struct wl_surface;
|
||||
struct wl_display;
|
||||
@@ -129,6 +131,7 @@ panvk_meta_copy_tex_type(unsigned dim, bool isarray)
|
||||
}
|
||||
|
||||
struct panvk_meta {
|
||||
|
||||
struct panvk_pool bin_pool;
|
||||
struct panvk_pool desc_pool;
|
||||
|
||||
@@ -172,10 +175,8 @@ struct panvk_meta {
|
||||
struct panvk_physical_device {
|
||||
struct vk_physical_device vk;
|
||||
|
||||
/* The API agnostic device object. */
|
||||
struct panfrost_device pdev;
|
||||
|
||||
struct {
|
||||
struct pan_kmod_dev *dev;
|
||||
struct pan_kmod_dev_props props;
|
||||
} kmod;
|
||||
|
||||
@@ -196,7 +197,6 @@ struct panvk_physical_device {
|
||||
const struct vk_sync_type *sync_types[2];
|
||||
|
||||
struct wsi_device wsi_device;
|
||||
struct panvk_meta meta;
|
||||
|
||||
int master_fd;
|
||||
};
|
||||
@@ -218,6 +218,10 @@ struct panvk_instance {
|
||||
uint32_t api_version;
|
||||
|
||||
enum panvk_debug_flags debug_flags;
|
||||
|
||||
struct {
|
||||
struct pan_kmod_allocator allocator;
|
||||
} kmod;
|
||||
};
|
||||
|
||||
VkResult panvk_wsi_init(struct panvk_physical_device *physical_device);
|
||||
@@ -245,6 +249,10 @@ struct panvk_queue {
|
||||
struct panvk_device {
|
||||
struct vk_device vk;
|
||||
|
||||
struct panfrost_device pdev;
|
||||
|
||||
struct panvk_meta meta;
|
||||
|
||||
struct vk_device_dispatch_table cmd_dispatch;
|
||||
|
||||
struct panvk_instance *instance;
|
||||
|
@@ -103,7 +103,7 @@ panvk_per_arch(cmd_close_batch)(struct panvk_cmd_buffer *cmdbuf)
|
||||
}
|
||||
|
||||
struct panvk_device *dev = cmdbuf->device;
|
||||
struct panfrost_device *pdev = &cmdbuf->device->physical_device->pdev;
|
||||
struct panfrost_device *pdev = &cmdbuf->device->pdev;
|
||||
|
||||
list_addtail(&batch->node, &cmdbuf->batches);
|
||||
|
||||
@@ -142,7 +142,7 @@ panvk_per_arch(cmd_close_batch)(struct panvk_cmd_buffer *cmdbuf)
|
||||
GENX(pan_emit_tls)(&batch->tlsinfo, batch->tls.cpu);
|
||||
|
||||
if (batch->fb.desc.cpu) {
|
||||
struct panfrost_device *pdev = &cmdbuf->device->physical_device->pdev;
|
||||
struct panfrost_device *pdev = &cmdbuf->device->pdev;
|
||||
|
||||
fbinfo->sample_positions = pdev->sample_positions->ptr.gpu +
|
||||
panfrost_sample_positions_offset(
|
||||
@@ -1122,14 +1122,14 @@ panvk_create_cmdbuf(struct vk_command_pool *vk_pool,
|
||||
|
||||
cmdbuf->device = device;
|
||||
|
||||
panvk_pool_init(&cmdbuf->desc_pool, &device->physical_device->pdev,
|
||||
panvk_pool_init(&cmdbuf->desc_pool, &device->pdev,
|
||||
&pool->desc_bo_pool, 0, 64 * 1024,
|
||||
"Command buffer descriptor pool", true);
|
||||
panvk_pool_init(&cmdbuf->tls_pool, &device->physical_device->pdev,
|
||||
panvk_pool_init(&cmdbuf->tls_pool, &device->pdev,
|
||||
&pool->tls_bo_pool,
|
||||
panvk_debug_adjust_bo_flags(device, PAN_BO_INVISIBLE),
|
||||
64 * 1024, "TLS pool", false);
|
||||
panvk_pool_init(&cmdbuf->varying_pool, &device->physical_device->pdev,
|
||||
panvk_pool_init(&cmdbuf->varying_pool, &device->pdev,
|
||||
&pool->varying_bo_pool,
|
||||
panvk_debug_adjust_bo_flags(device, PAN_BO_INVISIBLE),
|
||||
64 * 1024, "Varyings pool", false);
|
||||
|
@@ -827,7 +827,7 @@ panvk_per_arch(emit_tiler_context)(const struct panvk_device *dev,
|
||||
unsigned width, unsigned height,
|
||||
const struct panfrost_ptr *descs)
|
||||
{
|
||||
const struct panfrost_device *pdev = &dev->physical_device->pdev;
|
||||
const struct panfrost_device *pdev = &dev->pdev;
|
||||
|
||||
pan_pack(descs->cpu + pan_size(TILER_CONTEXT), TILER_HEAP, cfg) {
|
||||
cfg.size = panfrost_bo_size(pdev->tiler_heap);
|
||||
|
@@ -303,7 +303,7 @@ panvk_per_arch(descriptor_set_create)(
|
||||
|
||||
if (layout->desc_ubo_size) {
|
||||
set->desc_bo =
|
||||
panfrost_bo_create(&device->physical_device->pdev,
|
||||
panfrost_bo_create(&device->pdev,
|
||||
layout->desc_ubo_size, 0, "Descriptor set");
|
||||
if (!set->desc_bo)
|
||||
goto err_free_set;
|
||||
|
@@ -42,7 +42,7 @@ panvk_queue_submit_batch(struct panvk_queue *queue, struct panvk_batch *batch,
|
||||
{
|
||||
const struct panvk_device *dev = queue->device;
|
||||
unsigned debug = dev->physical_device->instance->debug_flags;
|
||||
const struct panfrost_device *pdev = &dev->physical_device->pdev;
|
||||
const struct panfrost_device *pdev = &dev->pdev;
|
||||
int ret;
|
||||
|
||||
/* Reset the batch if it's already been issued */
|
||||
@@ -205,7 +205,7 @@ panvk_per_arch(queue_submit)(struct vk_queue *vk_queue,
|
||||
struct vk_queue_submit *submit)
|
||||
{
|
||||
struct panvk_queue *queue = container_of(vk_queue, struct panvk_queue, vk);
|
||||
const struct panfrost_device *pdev = &queue->device->physical_device->pdev;
|
||||
const struct panfrost_device *pdev = &queue->device->pdev;
|
||||
|
||||
unsigned nr_semaphores = submit->wait_count + 1;
|
||||
uint32_t semaphores[nr_semaphores];
|
||||
|
@@ -113,7 +113,7 @@ panvk_per_arch(CreateImageView)(VkDevice _device,
|
||||
};
|
||||
panvk_convert_swizzle(&view->vk.swizzle, view->pview.swizzle);
|
||||
|
||||
struct panfrost_device *pdev = &device->physical_device->pdev;
|
||||
struct panfrost_device *pdev = &device->pdev;
|
||||
|
||||
if (view->vk.usage &
|
||||
(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
|
||||
@@ -185,7 +185,7 @@ panvk_per_arch(CreateBufferView)(VkDevice _device,
|
||||
|
||||
view->fmt = vk_format_to_pipe_format(pCreateInfo->format);
|
||||
|
||||
struct panfrost_device *pdev = &device->physical_device->pdev;
|
||||
struct panfrost_device *pdev = &device->pdev;
|
||||
mali_ptr address = panvk_buffer_gpu_ptr(buffer, pCreateInfo->offset);
|
||||
unsigned size =
|
||||
panvk_buffer_range(buffer, pCreateInfo->offset, pCreateInfo->range);
|
||||
|
@@ -48,7 +48,7 @@ panvk_per_arch(meta_emit_viewport)(struct pan_pool *pool, uint16_t minx,
|
||||
}
|
||||
|
||||
void
|
||||
panvk_per_arch(meta_init)(struct panvk_physical_device *dev)
|
||||
panvk_per_arch(meta_init)(struct panvk_device *dev)
|
||||
{
|
||||
panvk_pool_init(&dev->meta.bin_pool, &dev->pdev, NULL, PAN_BO_EXECUTE,
|
||||
16 * 1024, "panvk_meta binary pool", false);
|
||||
@@ -60,7 +60,7 @@ panvk_per_arch(meta_init)(struct panvk_physical_device *dev)
|
||||
}
|
||||
|
||||
void
|
||||
panvk_per_arch(meta_cleanup)(struct panvk_physical_device *dev)
|
||||
panvk_per_arch(meta_cleanup)(struct panvk_device *dev)
|
||||
{
|
||||
panvk_per_arch(meta_blit_cleanup)(dev);
|
||||
panvk_pool_cleanup(&dev->meta.desc_pool);
|
||||
|
@@ -29,18 +29,18 @@
|
||||
#error "no arch"
|
||||
#endif
|
||||
|
||||
void panvk_per_arch(meta_init)(struct panvk_physical_device *dev);
|
||||
void panvk_per_arch(meta_init)(struct panvk_device *dev);
|
||||
|
||||
void panvk_per_arch(meta_cleanup)(struct panvk_physical_device *dev);
|
||||
void panvk_per_arch(meta_cleanup)(struct panvk_device *dev);
|
||||
|
||||
mali_ptr panvk_per_arch(meta_emit_viewport)(struct pan_pool *pool,
|
||||
uint16_t minx, uint16_t miny,
|
||||
uint16_t maxx, uint16_t maxy);
|
||||
|
||||
void panvk_per_arch(meta_clear_init)(struct panvk_physical_device *dev);
|
||||
void panvk_per_arch(meta_clear_init)(struct panvk_device *dev);
|
||||
|
||||
void panvk_per_arch(meta_blit_init)(struct panvk_physical_device *dev);
|
||||
void panvk_per_arch(meta_blit_init)(struct panvk_device *dev);
|
||||
|
||||
void panvk_per_arch(meta_blit_cleanup)(struct panvk_physical_device *dev);
|
||||
void panvk_per_arch(meta_blit_cleanup)(struct panvk_device *dev);
|
||||
|
||||
void panvk_per_arch(meta_copy_init)(struct panvk_physical_device *dev);
|
||||
void panvk_per_arch(meta_copy_init)(struct panvk_device *dev);
|
||||
|
@@ -34,7 +34,7 @@ panvk_meta_blit(struct panvk_cmd_buffer *cmdbuf,
|
||||
const struct panvk_image *src_img,
|
||||
const struct panvk_image *dst_img)
|
||||
{
|
||||
struct panfrost_device *pdev = &cmdbuf->device->physical_device->pdev;
|
||||
struct panfrost_device *pdev = &cmdbuf->device->pdev;
|
||||
struct pan_fb_info *fbinfo = &cmdbuf->state.fb.info;
|
||||
struct pan_blit_context ctx;
|
||||
struct pan_image_view views[2] = {
|
||||
@@ -225,7 +225,7 @@ panvk_per_arch(CmdResolveImage2)(VkCommandBuffer commandBuffer,
|
||||
}
|
||||
|
||||
void
|
||||
panvk_per_arch(meta_blit_init)(struct panvk_physical_device *dev)
|
||||
panvk_per_arch(meta_blit_init)(struct panvk_device *dev)
|
||||
{
|
||||
panvk_pool_init(&dev->meta.blitter.bin_pool, &dev->pdev, NULL,
|
||||
PAN_BO_EXECUTE, 16 * 1024, "panvk_meta blitter binary pool",
|
||||
@@ -233,14 +233,15 @@ panvk_per_arch(meta_blit_init)(struct panvk_physical_device *dev)
|
||||
panvk_pool_init(&dev->meta.blitter.desc_pool, &dev->pdev, NULL, 0, 16 * 1024,
|
||||
"panvk_meta blitter descriptor pool", false);
|
||||
pan_blend_shader_cache_init(&dev->pdev.blend_shaders,
|
||||
dev->kmod.props.gpu_prod_id);
|
||||
dev->physical_device->kmod.props.gpu_prod_id);
|
||||
GENX(pan_blitter_cache_init)
|
||||
(&dev->pdev.blitter, dev->kmod.props.gpu_prod_id, &dev->pdev.blend_shaders,
|
||||
&dev->meta.blitter.bin_pool.base, &dev->meta.blitter.desc_pool.base);
|
||||
(&dev->pdev.blitter, dev->physical_device->kmod.props.gpu_prod_id,
|
||||
&dev->pdev.blend_shaders, &dev->meta.blitter.bin_pool.base,
|
||||
&dev->meta.blitter.desc_pool.base);
|
||||
}
|
||||
|
||||
void
|
||||
panvk_per_arch(meta_blit_cleanup)(struct panvk_physical_device *dev)
|
||||
panvk_per_arch(meta_blit_cleanup)(struct panvk_device *dev)
|
||||
{
|
||||
GENX(pan_blitter_cache_cleanup)(&dev->pdev.blitter);
|
||||
pan_blend_shader_cache_cleanup(&dev->pdev.blend_shaders);
|
||||
|
@@ -33,7 +33,7 @@
|
||||
#include "vk_format.h"
|
||||
|
||||
static mali_ptr
|
||||
panvk_meta_clear_color_attachment_shader(struct panvk_physical_device *dev,
|
||||
panvk_meta_clear_color_attachment_shader(struct panvk_device *dev,
|
||||
enum glsl_base_type base_type,
|
||||
struct pan_shader_info *shader_info)
|
||||
{
|
||||
@@ -53,7 +53,7 @@ panvk_meta_clear_color_attachment_shader(struct panvk_physical_device *dev,
|
||||
nir_store_var(&b, out, clear_values, 0xff);
|
||||
|
||||
struct panfrost_compile_inputs inputs = {
|
||||
.gpu_id = dev->kmod.props.gpu_prod_id,
|
||||
.gpu_id = dev->physical_device->kmod.props.gpu_prod_id,
|
||||
.is_blit = true,
|
||||
.no_ubo_to_push = true,
|
||||
};
|
||||
@@ -255,7 +255,7 @@ panvk_meta_clear_attachment(struct panvk_cmd_buffer *cmdbuf,
|
||||
const VkClearValue *clear_value,
|
||||
const VkClearRect *clear_rect)
|
||||
{
|
||||
struct panvk_meta *meta = &cmdbuf->device->physical_device->meta;
|
||||
struct panvk_meta *meta = &cmdbuf->device->meta;
|
||||
struct panvk_batch *batch = cmdbuf->state.batch;
|
||||
const struct panvk_render_pass *pass = cmdbuf->state.pass;
|
||||
const struct panvk_render_pass_attachment *att =
|
||||
@@ -493,7 +493,7 @@ panvk_per_arch(CmdClearAttachments)(VkCommandBuffer commandBuffer,
|
||||
}
|
||||
|
||||
static void
|
||||
panvk_meta_clear_attachment_init(struct panvk_physical_device *dev)
|
||||
panvk_meta_clear_attachment_init(struct panvk_device *dev)
|
||||
{
|
||||
dev->meta.clear_attachment.color[GLSL_TYPE_UINT].shader =
|
||||
panvk_meta_clear_color_attachment_shader(
|
||||
@@ -512,7 +512,7 @@ panvk_meta_clear_attachment_init(struct panvk_physical_device *dev)
|
||||
}
|
||||
|
||||
void
|
||||
panvk_per_arch(meta_clear_init)(struct panvk_physical_device *dev)
|
||||
panvk_per_arch(meta_clear_init)(struct panvk_device *dev)
|
||||
{
|
||||
panvk_meta_clear_attachment_init(dev);
|
||||
}
|
||||
|
@@ -283,7 +283,7 @@ panvk_meta_copy_to_buf_emit_rsd(struct pan_pool *desc_pool, mali_ptr shader,
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
panvk_meta_copy_img2img_shader(struct panvk_physical_device *dev,
|
||||
panvk_meta_copy_img2img_shader(struct panvk_device *dev,
|
||||
enum pipe_format srcfmt, enum pipe_format dstfmt,
|
||||
unsigned dstmask, unsigned texdim,
|
||||
bool texisarray, bool is_ms,
|
||||
@@ -416,7 +416,7 @@ panvk_meta_copy_img2img_shader(struct panvk_physical_device *dev,
|
||||
nir_store_var(&b, out, texel, 0xff);
|
||||
|
||||
struct panfrost_compile_inputs inputs = {
|
||||
.gpu_id = dev->kmod.props.gpu_prod_id,
|
||||
.gpu_id = dev->physical_device->kmod.props.gpu_prod_id,
|
||||
.is_blit = true,
|
||||
.no_ubo_to_push = true,
|
||||
};
|
||||
@@ -570,7 +570,7 @@ panvk_meta_copy_img2img(struct panvk_cmd_buffer *cmdbuf,
|
||||
unsigned ms = dst->pimage.layout.nr_samples > 1 ? 1 : 0;
|
||||
|
||||
mali_ptr rsd =
|
||||
cmdbuf->device->physical_device->meta.copy.img2img[ms][texdimidx][fmtidx]
|
||||
cmdbuf->device->meta.copy.img2img[ms][texdimidx][fmtidx]
|
||||
.rsd;
|
||||
|
||||
struct pan_image_view srcview = {
|
||||
@@ -692,7 +692,7 @@ panvk_meta_copy_img2img(struct panvk_cmd_buffer *cmdbuf,
|
||||
}
|
||||
|
||||
static void
|
||||
panvk_meta_copy_img2img_init(struct panvk_physical_device *dev, bool is_ms)
|
||||
panvk_meta_copy_img2img_init(struct panvk_device *dev, bool is_ms)
|
||||
{
|
||||
STATIC_ASSERT(ARRAY_SIZE(panvk_meta_copy_img2img_fmts) ==
|
||||
PANVK_META_COPY_IMG2IMG_NUM_FORMATS);
|
||||
@@ -849,7 +849,7 @@ struct panvk_meta_copy_buf2img_info {
|
||||
.range = ~0)
|
||||
|
||||
static mali_ptr
|
||||
panvk_meta_copy_buf2img_shader(struct panvk_physical_device *dev,
|
||||
panvk_meta_copy_buf2img_shader(struct panvk_device *dev,
|
||||
struct panvk_meta_copy_format_info key,
|
||||
struct pan_shader_info *shader_info)
|
||||
{
|
||||
@@ -957,7 +957,7 @@ panvk_meta_copy_buf2img_shader(struct panvk_physical_device *dev,
|
||||
nir_store_var(&b, out, texel, 0xff);
|
||||
|
||||
struct panfrost_compile_inputs inputs = {
|
||||
.gpu_id = dev->kmod.props.gpu_prod_id,
|
||||
.gpu_id = dev->physical_device->kmod.props.gpu_prod_id,
|
||||
.is_blit = true,
|
||||
.no_ubo_to_push = true,
|
||||
};
|
||||
@@ -1027,7 +1027,7 @@ panvk_meta_copy_buf2img(struct panvk_cmd_buffer *cmdbuf,
|
||||
unsigned fmtidx = panvk_meta_copy_buf2img_format_idx(key);
|
||||
|
||||
mali_ptr rsd =
|
||||
cmdbuf->device->physical_device->meta.copy.buf2img[fmtidx].rsd;
|
||||
cmdbuf->device->meta.copy.buf2img[fmtidx].rsd;
|
||||
|
||||
const struct vk_image_buffer_layout buflayout =
|
||||
vk_image_buffer_copy_layout(&img->vk, region);
|
||||
@@ -1129,7 +1129,7 @@ panvk_meta_copy_buf2img(struct panvk_cmd_buffer *cmdbuf,
|
||||
}
|
||||
|
||||
static void
|
||||
panvk_meta_copy_buf2img_init(struct panvk_physical_device *dev)
|
||||
panvk_meta_copy_buf2img_init(struct panvk_device *dev)
|
||||
{
|
||||
STATIC_ASSERT(ARRAY_SIZE(panvk_meta_copy_buf2img_fmts) ==
|
||||
PANVK_META_COPY_BUF2IMG_NUM_FORMATS);
|
||||
@@ -1237,7 +1237,7 @@ struct panvk_meta_copy_img2buf_info {
|
||||
.range = ~0)
|
||||
|
||||
static mali_ptr
|
||||
panvk_meta_copy_img2buf_shader(struct panvk_physical_device *dev,
|
||||
panvk_meta_copy_img2buf_shader(struct panvk_device *dev,
|
||||
struct panvk_meta_copy_format_info key,
|
||||
unsigned texdim, unsigned texisarray,
|
||||
struct pan_shader_info *shader_info)
|
||||
@@ -1416,7 +1416,7 @@ panvk_meta_copy_img2buf_shader(struct panvk_physical_device *dev,
|
||||
nir_pop_if(&b, NULL);
|
||||
|
||||
struct panfrost_compile_inputs inputs = {
|
||||
.gpu_id = dev->kmod.props.gpu_prod_id,
|
||||
.gpu_id = dev->physical_device->kmod.props.gpu_prod_id,
|
||||
.is_blit = true,
|
||||
.no_ubo_to_push = true,
|
||||
};
|
||||
@@ -1467,7 +1467,7 @@ panvk_meta_copy_img2buf(struct panvk_cmd_buffer *cmdbuf,
|
||||
unsigned fmtidx = panvk_meta_copy_img2buf_format_idx(key);
|
||||
|
||||
mali_ptr rsd =
|
||||
cmdbuf->device->physical_device->meta.copy.img2buf[texdimidx][fmtidx].rsd;
|
||||
cmdbuf->device->meta.copy.img2buf[texdimidx][fmtidx].rsd;
|
||||
|
||||
struct panvk_meta_copy_img2buf_info info = {
|
||||
.buf.ptr = panvk_buffer_gpu_ptr(buf, region->bufferOffset),
|
||||
@@ -1555,7 +1555,7 @@ panvk_meta_copy_img2buf(struct panvk_cmd_buffer *cmdbuf,
|
||||
}
|
||||
|
||||
static void
|
||||
panvk_meta_copy_img2buf_init(struct panvk_physical_device *dev)
|
||||
panvk_meta_copy_img2buf_init(struct panvk_device *dev)
|
||||
{
|
||||
STATIC_ASSERT(ARRAY_SIZE(panvk_meta_copy_img2buf_fmts) ==
|
||||
PANVK_META_COPY_IMG2BUF_NUM_FORMATS);
|
||||
@@ -1615,7 +1615,7 @@ struct panvk_meta_copy_buf2buf_info {
|
||||
.range = ~0)
|
||||
|
||||
static mali_ptr
|
||||
panvk_meta_copy_buf2buf_shader(struct panvk_physical_device *dev,
|
||||
panvk_meta_copy_buf2buf_shader(struct panvk_device *dev,
|
||||
unsigned blksz,
|
||||
struct pan_shader_info *shader_info)
|
||||
{
|
||||
@@ -1644,7 +1644,7 @@ panvk_meta_copy_buf2buf_shader(struct panvk_physical_device *dev,
|
||||
(1 << ncomps) - 1);
|
||||
|
||||
struct panfrost_compile_inputs inputs = {
|
||||
.gpu_id = dev->kmod.props.gpu_prod_id,
|
||||
.gpu_id = dev->physical_device->kmod.props.gpu_prod_id,
|
||||
.is_blit = true,
|
||||
.no_ubo_to_push = true,
|
||||
};
|
||||
@@ -1668,7 +1668,7 @@ panvk_meta_copy_buf2buf_shader(struct panvk_physical_device *dev,
|
||||
}
|
||||
|
||||
static void
|
||||
panvk_meta_copy_buf2buf_init(struct panvk_physical_device *dev)
|
||||
panvk_meta_copy_buf2buf_init(struct panvk_device *dev)
|
||||
{
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(dev->meta.copy.buf2buf); i++) {
|
||||
struct pan_shader_info shader_info;
|
||||
@@ -1694,9 +1694,9 @@ panvk_meta_copy_buf2buf(struct panvk_cmd_buffer *cmdbuf,
|
||||
unsigned log2blksz = alignment ? alignment - 1 : 4;
|
||||
|
||||
assert(log2blksz <
|
||||
ARRAY_SIZE(cmdbuf->device->physical_device->meta.copy.buf2buf));
|
||||
ARRAY_SIZE(cmdbuf->device->meta.copy.buf2buf));
|
||||
mali_ptr rsd =
|
||||
cmdbuf->device->physical_device->meta.copy.buf2buf[log2blksz].rsd;
|
||||
cmdbuf->device->meta.copy.buf2buf[log2blksz].rsd;
|
||||
|
||||
mali_ptr pushconsts =
|
||||
pan_pool_upload_aligned(&cmdbuf->desc_pool.base, &info, sizeof(info), 16);
|
||||
@@ -1748,7 +1748,7 @@ struct panvk_meta_fill_buf_info {
|
||||
.base = offsetof(struct panvk_meta_fill_buf_info, field), .range = ~0)
|
||||
|
||||
static mali_ptr
|
||||
panvk_meta_fill_buf_shader(struct panvk_physical_device *dev,
|
||||
panvk_meta_fill_buf_shader(struct panvk_device *dev,
|
||||
struct pan_shader_info *shader_info)
|
||||
{
|
||||
struct pan_pool *bin_pool = &dev->meta.bin_pool.base;
|
||||
@@ -1771,7 +1771,7 @@ panvk_meta_fill_buf_shader(struct panvk_physical_device *dev,
|
||||
nir_store_global(&b, ptr, sizeof(uint32_t), val, 1);
|
||||
|
||||
struct panfrost_compile_inputs inputs = {
|
||||
.gpu_id = dev->kmod.props.gpu_prod_id,
|
||||
.gpu_id = dev->physical_device->kmod.props.gpu_prod_id,
|
||||
.is_blit = true,
|
||||
.no_ubo_to_push = true,
|
||||
};
|
||||
@@ -1795,7 +1795,7 @@ panvk_meta_fill_buf_shader(struct panvk_physical_device *dev,
|
||||
}
|
||||
|
||||
static mali_ptr
|
||||
panvk_meta_fill_buf_emit_rsd(struct panvk_physical_device *dev)
|
||||
panvk_meta_fill_buf_emit_rsd(struct panvk_device *dev)
|
||||
{
|
||||
struct pan_pool *desc_pool = &dev->meta.desc_pool.base;
|
||||
struct pan_shader_info shader_info;
|
||||
@@ -1813,7 +1813,7 @@ panvk_meta_fill_buf_emit_rsd(struct panvk_physical_device *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
panvk_meta_fill_buf_init(struct panvk_physical_device *dev)
|
||||
panvk_meta_fill_buf_init(struct panvk_device *dev)
|
||||
{
|
||||
dev->meta.copy.fillbuf.rsd = panvk_meta_fill_buf_emit_rsd(dev);
|
||||
}
|
||||
@@ -1842,7 +1842,7 @@ panvk_meta_fill_buf(struct panvk_cmd_buffer *cmdbuf,
|
||||
assert(!(offset & 3) && !(size & 3));
|
||||
|
||||
unsigned nwords = size / sizeof(uint32_t);
|
||||
mali_ptr rsd = cmdbuf->device->physical_device->meta.copy.fillbuf.rsd;
|
||||
mali_ptr rsd = cmdbuf->device->meta.copy.fillbuf.rsd;
|
||||
|
||||
mali_ptr pushconsts =
|
||||
pan_pool_upload_aligned(&cmdbuf->desc_pool.base, &info, sizeof(info), 16);
|
||||
@@ -1891,7 +1891,7 @@ panvk_meta_update_buf(struct panvk_cmd_buffer *cmdbuf,
|
||||
unsigned log2blksz = ffs(sizeof(uint32_t)) - 1;
|
||||
|
||||
mali_ptr rsd =
|
||||
cmdbuf->device->physical_device->meta.copy.buf2buf[log2blksz].rsd;
|
||||
cmdbuf->device->meta.copy.buf2buf[log2blksz].rsd;
|
||||
|
||||
mali_ptr pushconsts =
|
||||
pan_pool_upload_aligned(&cmdbuf->desc_pool.base, &info, sizeof(info), 16);
|
||||
@@ -1929,7 +1929,7 @@ panvk_per_arch(CmdUpdateBuffer)(VkCommandBuffer commandBuffer,
|
||||
}
|
||||
|
||||
void
|
||||
panvk_per_arch(meta_copy_init)(struct panvk_physical_device *dev)
|
||||
panvk_per_arch(meta_copy_init)(struct panvk_device *dev)
|
||||
{
|
||||
panvk_meta_copy_img2img_init(dev, false);
|
||||
panvk_meta_copy_img2img_init(dev, true);
|
||||
|
@@ -161,7 +161,7 @@ panvk_pipeline_builder_upload_shaders(struct panvk_pipeline_builder *builder,
|
||||
return VK_SUCCESS;
|
||||
|
||||
struct panfrost_bo *bin_bo =
|
||||
panfrost_bo_create(&builder->device->physical_device->pdev,
|
||||
panfrost_bo_create(&builder->device->pdev,
|
||||
builder->shader_total_size, PAN_BO_EXECUTE, "Shader");
|
||||
|
||||
pipeline->binary_bo = bin_bo;
|
||||
@@ -184,7 +184,7 @@ static void
|
||||
panvk_pipeline_builder_alloc_static_state_bo(
|
||||
struct panvk_pipeline_builder *builder, struct panvk_pipeline *pipeline)
|
||||
{
|
||||
struct panfrost_device *pdev = &builder->device->physical_device->pdev;
|
||||
struct panfrost_device *pdev = &builder->device->pdev;
|
||||
unsigned bo_size = 0;
|
||||
|
||||
for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
|
||||
|
Reference in New Issue
Block a user