radv: Modify radv_descriptor_set for MSVC legality
Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7793>
This commit is contained in:
@@ -789,7 +789,7 @@ radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
|
||||
struct radv_descriptor_set *set =
|
||||
descriptors_state->sets[start + i];
|
||||
|
||||
radv_emit_shader_pointer_body(device, cs, set->va, true);
|
||||
radv_emit_shader_pointer_body(device, cs, set->header.va, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2630,16 +2630,17 @@ radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
|
||||
{
|
||||
struct radv_descriptor_state *descriptors_state =
|
||||
radv_get_descriptors_state(cmd_buffer, bind_point);
|
||||
struct radv_descriptor_set *set = &descriptors_state->push_set.set;
|
||||
struct radv_descriptor_set *set =
|
||||
(struct radv_descriptor_set *)&descriptors_state->push_set.set;
|
||||
unsigned bo_offset;
|
||||
|
||||
if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
|
||||
set->mapped_ptr,
|
||||
if (!radv_cmd_buffer_upload_data(cmd_buffer, set->header.size, 32,
|
||||
set->header.mapped_ptr,
|
||||
&bo_offset))
|
||||
return;
|
||||
|
||||
set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
|
||||
set->va += bo_offset;
|
||||
set->header.va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
|
||||
set->header.va += bo_offset;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -2661,7 +2662,7 @@ radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
|
||||
uint64_t set_va = 0;
|
||||
struct radv_descriptor_set *set = descriptors_state->sets[i];
|
||||
if (descriptors_state->valid & (1u << i))
|
||||
set_va = set->va;
|
||||
set_va = set->header.va;
|
||||
uptr[0] = set_va & 0xffffffff;
|
||||
}
|
||||
|
||||
@@ -4115,13 +4116,13 @@ radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
|
||||
assert(set);
|
||||
|
||||
if (!cmd_buffer->device->use_global_bo_list) {
|
||||
for (unsigned j = 0; j < set->buffer_count; ++j)
|
||||
for (unsigned j = 0; j < set->header.buffer_count; ++j)
|
||||
if (set->descriptors[j])
|
||||
radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
|
||||
}
|
||||
|
||||
if(set->bo)
|
||||
radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
|
||||
if(set->header.bo)
|
||||
radv_cs_add_buffer(ws, cmd_buffer->cs, set->header.bo);
|
||||
}
|
||||
|
||||
void radv_CmdBindDescriptorSets(
|
||||
@@ -4158,7 +4159,7 @@ void radv_CmdBindDescriptorSets(
|
||||
uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
|
||||
assert(dyn_idx < dynamicOffsetCount);
|
||||
|
||||
struct radv_descriptor_range *range = set->dynamic_descriptors + j;
|
||||
struct radv_descriptor_range *range = set->header.dynamic_descriptors + j;
|
||||
|
||||
if (!range->va) {
|
||||
memset(dst, 0, 4 * 4);
|
||||
@@ -4194,18 +4195,18 @@ static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
|
||||
{
|
||||
struct radv_descriptor_state *descriptors_state =
|
||||
radv_get_descriptors_state(cmd_buffer, bind_point);
|
||||
set->size = layout->size;
|
||||
set->layout = layout;
|
||||
set->header.size = layout->size;
|
||||
set->header.layout = layout;
|
||||
|
||||
if (descriptors_state->push_set.capacity < set->size) {
|
||||
size_t new_size = MAX2(set->size, 1024);
|
||||
if (descriptors_state->push_set.capacity < set->header.size) {
|
||||
size_t new_size = MAX2(set->header.size, 1024);
|
||||
new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
|
||||
new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
|
||||
|
||||
free(set->mapped_ptr);
|
||||
set->mapped_ptr = malloc(new_size);
|
||||
free(set->header.mapped_ptr);
|
||||
set->header.mapped_ptr = malloc(new_size);
|
||||
|
||||
if (!set->mapped_ptr) {
|
||||
if (!set->header.mapped_ptr) {
|
||||
descriptors_state->push_set.capacity = 0;
|
||||
cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
return false;
|
||||
@@ -4226,22 +4227,23 @@ void radv_meta_push_descriptor_set(
|
||||
const VkWriteDescriptorSet* pDescriptorWrites)
|
||||
{
|
||||
RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
|
||||
struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
|
||||
struct radv_descriptor_set *push_set =
|
||||
(struct radv_descriptor_set *)&cmd_buffer->meta_push_descriptors;
|
||||
unsigned bo_offset;
|
||||
|
||||
assert(set == 0);
|
||||
assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
|
||||
|
||||
push_set->size = layout->set[set].layout->size;
|
||||
push_set->layout = layout->set[set].layout;
|
||||
push_set->header.size = layout->set[set].layout->size;
|
||||
push_set->header.layout = layout->set[set].layout;
|
||||
|
||||
if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
|
||||
if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->header.size, 32,
|
||||
&bo_offset,
|
||||
(void**) &push_set->mapped_ptr))
|
||||
(void**) &push_set->header.mapped_ptr))
|
||||
return;
|
||||
|
||||
push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
|
||||
push_set->va += bo_offset;
|
||||
push_set->header.va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
|
||||
push_set->header.va += bo_offset;
|
||||
|
||||
radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
|
||||
radv_descriptor_set_to_handle(push_set),
|
||||
@@ -4262,7 +4264,8 @@ void radv_CmdPushDescriptorSetKHR(
|
||||
RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
|
||||
struct radv_descriptor_state *descriptors_state =
|
||||
radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
|
||||
struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
|
||||
struct radv_descriptor_set *push_set =
|
||||
(struct radv_descriptor_set *)&descriptors_state->push_set.set;
|
||||
|
||||
assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
|
||||
|
||||
@@ -4299,7 +4302,8 @@ void radv_CmdPushDescriptorSetWithTemplateKHR(
|
||||
RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
|
||||
struct radv_descriptor_state *descriptors_state =
|
||||
radv_get_descriptors_state(cmd_buffer, templ->bind_point);
|
||||
struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
|
||||
struct radv_descriptor_set *push_set =
|
||||
(struct radv_descriptor_set *)&descriptors_state->push_set.set;
|
||||
|
||||
assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
|
||||
|
||||
|
@@ -199,11 +199,11 @@ radv_dump_descriptor_set(struct radv_device *device,
|
||||
|
||||
if (!set)
|
||||
return;
|
||||
layout = set->layout;
|
||||
layout = set->header.layout;
|
||||
|
||||
for (i = 0; i < set->layout->binding_count; i++) {
|
||||
for (i = 0; i < set->header.layout->binding_count; i++) {
|
||||
uint32_t *desc =
|
||||
set->mapped_ptr + layout->binding[i].offset / 4;
|
||||
set->header.mapped_ptr + layout->binding[i].offset / 4;
|
||||
|
||||
switch (layout->binding[i].type) {
|
||||
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
|
||||
|
@@ -559,7 +559,7 @@ radv_descriptor_set_create(struct radv_device *device,
|
||||
buffer_count = layout->binding[layout->binding_count - 1].buffer_offset +
|
||||
*variable_count * stride;
|
||||
}
|
||||
unsigned range_offset = sizeof(struct radv_descriptor_set) +
|
||||
unsigned range_offset = sizeof(struct radv_descriptor_set_header) +
|
||||
sizeof(struct radeon_winsys_bo *) * buffer_count;
|
||||
unsigned mem_size = range_offset +
|
||||
sizeof(struct radv_descriptor_range) * layout->dynamic_offset_count;
|
||||
@@ -580,15 +580,15 @@ radv_descriptor_set_create(struct radv_device *device,
|
||||
|
||||
memset(set, 0, mem_size);
|
||||
|
||||
vk_object_base_init(&device->vk, &set->base,
|
||||
vk_object_base_init(&device->vk, &set->header.base,
|
||||
VK_OBJECT_TYPE_DESCRIPTOR_SET);
|
||||
|
||||
if (layout->dynamic_offset_count) {
|
||||
set->dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset);
|
||||
set->header.dynamic_descriptors = (struct radv_descriptor_range*)((uint8_t*)set + range_offset);
|
||||
}
|
||||
|
||||
set->layout = layout;
|
||||
set->buffer_count = buffer_count;
|
||||
set->header.layout = layout;
|
||||
set->header.buffer_count = buffer_count;
|
||||
uint32_t layout_size = layout->size;
|
||||
if (variable_count) {
|
||||
assert(layout->has_variable_descriptors);
|
||||
@@ -601,7 +601,7 @@ radv_descriptor_set_create(struct radv_device *device,
|
||||
}
|
||||
layout_size = align_u32(layout_size, 32);
|
||||
if (layout_size) {
|
||||
set->size = layout_size;
|
||||
set->header.size = layout_size;
|
||||
|
||||
if (!pool->host_memory_base && pool->entry_count == pool->max_entry_count) {
|
||||
vk_free2(&device->vk.alloc, NULL, set);
|
||||
@@ -612,9 +612,9 @@ radv_descriptor_set_create(struct radv_device *device,
|
||||
* time looking for gaps if the app only allocates &
|
||||
* resets via the pool. */
|
||||
if (pool->current_offset + layout_size <= pool->size) {
|
||||
set->bo = pool->bo;
|
||||
set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
|
||||
set->va = set->bo ? (radv_buffer_get_va(set->bo) + pool->current_offset) : 0;
|
||||
set->header.bo = pool->bo;
|
||||
set->header.mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
|
||||
set->header.va = pool->bo ? (radv_buffer_get_va(set->header.bo) + pool->current_offset) : 0;
|
||||
if (!pool->host_memory_base) {
|
||||
pool->entries[pool->entry_count].offset = pool->current_offset;
|
||||
pool->entries[pool->entry_count].size = layout_size;
|
||||
@@ -636,9 +636,9 @@ radv_descriptor_set_create(struct radv_device *device,
|
||||
vk_free2(&device->vk.alloc, NULL, set);
|
||||
return vk_error(device->instance, VK_ERROR_OUT_OF_POOL_MEMORY);
|
||||
}
|
||||
set->bo = pool->bo;
|
||||
set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
|
||||
set->va = set->bo ? (radv_buffer_get_va(set->bo) + offset) : 0;
|
||||
set->header.bo = pool->bo;
|
||||
set->header.mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
|
||||
set->header.va = pool->bo ? (radv_buffer_get_va(set->header.bo) + offset) : 0;
|
||||
memmove(&pool->entries[index + 1], &pool->entries[index],
|
||||
sizeof(pool->entries[0]) * (pool->entry_count - index));
|
||||
pool->entries[index].offset = offset;
|
||||
@@ -661,7 +661,7 @@ radv_descriptor_set_create(struct radv_device *device,
|
||||
|
||||
const uint32_t *samplers = (const uint32_t*)((const char*)layout + layout->binding[i].immutable_samplers_offset);
|
||||
for (unsigned j = 0; j < layout->binding[i].array_size; ++j) {
|
||||
memcpy(set->mapped_ptr + offset, samplers + 4 * j, 16);
|
||||
memcpy(set->header.mapped_ptr + offset, samplers + 4 * j, 16);
|
||||
offset += layout->binding[i].size / 4;
|
||||
}
|
||||
|
||||
@@ -679,8 +679,8 @@ radv_descriptor_set_destroy(struct radv_device *device,
|
||||
{
|
||||
assert(!pool->host_memory_base);
|
||||
|
||||
if (free_bo && set->size && !pool->host_memory_base) {
|
||||
uint32_t offset = (uint8_t*)set->mapped_ptr - pool->mapped_ptr;
|
||||
if (free_bo && set->header.size && !pool->host_memory_base) {
|
||||
uint32_t offset = (uint8_t*)set->header.mapped_ptr - pool->mapped_ptr;
|
||||
for (int i = 0; i < pool->entry_count; ++i) {
|
||||
if (pool->entries[i].offset == offset) {
|
||||
memmove(&pool->entries[i], &pool->entries[i+1],
|
||||
@@ -690,7 +690,7 @@ radv_descriptor_set_destroy(struct radv_device *device,
|
||||
}
|
||||
}
|
||||
}
|
||||
vk_object_base_finish(&set->base);
|
||||
vk_object_base_finish(&set->header.base);
|
||||
vk_free2(&device->vk.alloc, NULL, set);
|
||||
}
|
||||
|
||||
@@ -1143,8 +1143,8 @@ void radv_update_descriptor_sets(
|
||||
RADV_FROM_HANDLE(radv_descriptor_set, set,
|
||||
dstSetOverride ? dstSetOverride : writeset->dstSet);
|
||||
const struct radv_descriptor_set_binding_layout *binding_layout =
|
||||
set->layout->binding + writeset->dstBinding;
|
||||
uint32_t *ptr = set->mapped_ptr;
|
||||
set->header.layout->binding + writeset->dstBinding;
|
||||
uint32_t *ptr = set->header.mapped_ptr;
|
||||
struct radeon_winsys_bo **buffer_list = set->descriptors;
|
||||
/* Immutable samplers are not copied into push descriptors when they are
|
||||
* allocated, so if we are writing push descriptors we have to copy the
|
||||
@@ -1152,7 +1152,7 @@ void radv_update_descriptor_sets(
|
||||
*/
|
||||
const bool copy_immutable_samplers = cmd_buffer &&
|
||||
binding_layout->immutable_samplers_offset && !binding_layout->immutable_samplers_equal;
|
||||
const uint32_t *samplers = radv_immutable_samplers(set->layout, binding_layout);
|
||||
const uint32_t *samplers = radv_immutable_samplers(set->header.layout, binding_layout);
|
||||
|
||||
ptr += binding_layout->offset / 4;
|
||||
|
||||
@@ -1170,8 +1170,8 @@ void radv_update_descriptor_sets(
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
|
||||
unsigned idx = writeset->dstArrayElement + j;
|
||||
idx += binding_layout->dynamic_offset_offset;
|
||||
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
|
||||
write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
|
||||
assert(!(set->header.layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
|
||||
write_dynamic_buffer_descriptor(device, set->header.dynamic_descriptors + idx,
|
||||
buffer_list, writeset->pBufferInfo + j);
|
||||
break;
|
||||
}
|
||||
@@ -1230,11 +1230,11 @@ void radv_update_descriptor_sets(
|
||||
RADV_FROM_HANDLE(radv_descriptor_set, dst_set,
|
||||
copyset->dstSet);
|
||||
const struct radv_descriptor_set_binding_layout *src_binding_layout =
|
||||
src_set->layout->binding + copyset->srcBinding;
|
||||
src_set->header.layout->binding + copyset->srcBinding;
|
||||
const struct radv_descriptor_set_binding_layout *dst_binding_layout =
|
||||
dst_set->layout->binding + copyset->dstBinding;
|
||||
uint32_t *src_ptr = src_set->mapped_ptr;
|
||||
uint32_t *dst_ptr = dst_set->mapped_ptr;
|
||||
dst_set->header.layout->binding + copyset->dstBinding;
|
||||
uint32_t *src_ptr = src_set->header.mapped_ptr;
|
||||
uint32_t *dst_ptr = dst_set->header.mapped_ptr;
|
||||
struct radeon_winsys_bo **src_buffer_list = src_set->descriptors;
|
||||
struct radeon_winsys_bo **dst_buffer_list = dst_set->descriptors;
|
||||
|
||||
@@ -1272,8 +1272,8 @@ void radv_update_descriptor_sets(
|
||||
src_idx += src_binding_layout->dynamic_offset_offset;
|
||||
dst_idx += dst_binding_layout->dynamic_offset_offset;
|
||||
|
||||
src_range = src_set->dynamic_descriptors + src_idx;
|
||||
dst_range = dst_set->dynamic_descriptors + dst_idx;
|
||||
src_range = src_set->header.dynamic_descriptors + src_idx;
|
||||
dst_range = dst_set->header.dynamic_descriptors + dst_idx;
|
||||
*dst_range = *src_range;
|
||||
break;
|
||||
}
|
||||
@@ -1422,7 +1422,7 @@ void radv_update_descriptor_set_with_template(struct radv_device *device,
|
||||
|
||||
for (i = 0; i < templ->entry_count; ++i) {
|
||||
struct radeon_winsys_bo **buffer_list = set->descriptors + templ->entry[i].buffer_offset;
|
||||
uint32_t *pDst = set->mapped_ptr + templ->entry[i].dst_offset;
|
||||
uint32_t *pDst = set->header.mapped_ptr + templ->entry[i].dst_offset;
|
||||
const uint8_t *pSrc = ((const uint8_t *) pData) + templ->entry[i].src_offset;
|
||||
uint32_t j;
|
||||
|
||||
@@ -1436,8 +1436,8 @@ void radv_update_descriptor_set_with_template(struct radv_device *device,
|
||||
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
|
||||
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
|
||||
const unsigned idx = templ->entry[i].dst_offset + j;
|
||||
assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
|
||||
write_dynamic_buffer_descriptor(device, set->dynamic_descriptors + idx,
|
||||
assert(!(set->header.layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
|
||||
write_dynamic_buffer_descriptor(device, set->header.dynamic_descriptors + idx,
|
||||
buffer_list, (struct VkDescriptorBufferInfo *) pSrc);
|
||||
break;
|
||||
}
|
||||
|
@@ -905,7 +905,7 @@ struct radv_descriptor_range {
|
||||
uint32_t size;
|
||||
};
|
||||
|
||||
struct radv_descriptor_set {
|
||||
struct radv_descriptor_set_header {
|
||||
struct vk_object_base base;
|
||||
const struct radv_descriptor_set_layout *layout;
|
||||
uint32_t size;
|
||||
@@ -915,13 +915,17 @@ struct radv_descriptor_set {
|
||||
uint64_t va;
|
||||
uint32_t *mapped_ptr;
|
||||
struct radv_descriptor_range *dynamic_descriptors;
|
||||
};
|
||||
|
||||
struct radeon_winsys_bo *descriptors[0];
|
||||
struct radv_descriptor_set {
|
||||
struct radv_descriptor_set_header header;
|
||||
|
||||
struct radeon_winsys_bo *descriptors[];
|
||||
};
|
||||
|
||||
struct radv_push_descriptor_set
|
||||
{
|
||||
struct radv_descriptor_set set;
|
||||
struct radv_descriptor_set_header set;
|
||||
uint32_t capacity;
|
||||
};
|
||||
|
||||
@@ -1457,7 +1461,7 @@ struct radv_cmd_buffer {
|
||||
|
||||
uint8_t push_constants[MAX_PUSH_CONSTANTS_SIZE];
|
||||
VkShaderStageFlags push_constant_stages;
|
||||
struct radv_descriptor_set meta_push_descriptors;
|
||||
struct radv_descriptor_set_header meta_push_descriptors;
|
||||
|
||||
struct radv_descriptor_state descriptors[MAX_BIND_POINTS];
|
||||
|
||||
|
Reference in New Issue
Block a user