vk/queue: Embed the queue in and allocate it with the device

This commit is contained in:
Jason Ekstrand
2015-06-09 12:28:58 -07:00
parent 38f5eef59d
commit 66b00d5e5a
2 changed files with 55 additions and 35 deletions

View File

@@ -303,6 +303,33 @@ parse_debug_flags(struct anv_device *device)
} }
} }
static VkResult
anv_queue_init(struct anv_device *device, struct anv_queue *queue)
{
queue->device = device;
queue->pool = &device->surface_state_pool;
queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
if (queue->completed_serial.map == NULL)
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
*(uint32_t *)queue->completed_serial.map = 0;
queue->next_serial = 1;
return VK_SUCCESS;
}
static void
anv_queue_finish(struct anv_queue *queue)
{
#ifdef HAVE_VALGRIND
/* This gets torn down with the device so we only need to do this if
* valgrind is present.
*/
anv_state_pool_free(queue->pool, queue->completed_serial);
#endif
}
static void static void
anv_device_init_border_colors(struct anv_device *device) anv_device_init_border_colors(struct anv_device *device)
{ {
@@ -384,6 +411,8 @@ VkResult anv_CreateDevice(
pthread_mutex_init(&device->mutex, NULL); pthread_mutex_init(&device->mutex, NULL);
anv_queue_init(device, &device->queue);
anv_device_init_meta(device); anv_device_init_meta(device);
anv_device_init_border_colors(device); anv_device_init_border_colors(device);
@@ -410,6 +439,8 @@ VkResult anv_DestroyDevice(
anv_compiler_destroy(device->compiler); anv_compiler_destroy(device->compiler);
anv_queue_finish(&device->queue);
anv_device_finish_meta(device); anv_device_finish_meta(device);
#ifdef HAVE_VALGRIND #ifdef HAVE_VALGRIND
@@ -516,23 +547,10 @@ VkResult anv_GetDeviceQueue(
VkQueue* pQueue) VkQueue* pQueue)
{ {
struct anv_device *device = (struct anv_device *) _device; struct anv_device *device = (struct anv_device *) _device;
struct anv_queue *queue;
/* FIXME: Should allocate these at device create time. */ assert(queueIndex == 0);
queue = anv_device_alloc(device, sizeof(*queue), 8, *pQueue = (VkQueue) &device->queue;
VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (queue == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
queue->device = device;
queue->pool = &device->surface_state_pool;
queue->completed_serial = anv_state_pool_alloc(queue->pool, 4, 4);
*(uint32_t *)queue->completed_serial.map = 0;
queue->next_serial = 1;
*pQueue = (VkQueue) queue;
return VK_SUCCESS; return VK_SUCCESS;
} }

View File

@@ -313,6 +313,26 @@ struct anv_meta_state {
} shared; } shared;
}; };
struct anv_queue {
struct anv_device * device;
struct anv_state_pool * pool;
/**
* Serial number of the most recently completed batch executed on the
* engine.
*/
struct anv_state completed_serial;
/**
* The next batch submitted to the engine will be assigned this serial
* number.
*/
uint32_t next_serial;
uint32_t last_collected_serial;
};
struct anv_device { struct anv_device {
struct anv_instance * instance; struct anv_instance * instance;
uint32_t chipset_id; uint32_t chipset_id;
@@ -336,31 +356,13 @@ struct anv_device {
struct anv_state float_border_colors; struct anv_state float_border_colors;
struct anv_state uint32_border_colors; struct anv_state uint32_border_colors;
struct anv_queue queue;
struct anv_compiler * compiler; struct anv_compiler * compiler;
struct anv_aub_writer * aub_writer; struct anv_aub_writer * aub_writer;
pthread_mutex_t mutex; pthread_mutex_t mutex;
}; };
struct anv_queue {
struct anv_device * device;
struct anv_state_pool * pool;
/**
* Serial number of the most recently completed batch executed on the
* engine.
*/
struct anv_state completed_serial;
/**
* The next batch submitted to the engine will be assigned this serial
* number.
*/
uint32_t next_serial;
uint32_t last_collected_serial;
};
void * void *
anv_device_alloc(struct anv_device * device, anv_device_alloc(struct anv_device * device,
size_t size, size_t size,