anv/query: Rework store_query_result

The new version is a nice GPU parallel to cpu_write_query_result and it
nicely handles things like dealing with 32 vs. 64-bit offsets in the
destination buffer.

Reviewed-By: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
This commit is contained in:
Jason Ekstrand
2017-03-15 15:29:13 -07:00
parent c773ae88df
commit 149d10d38a

View File

@@ -401,18 +401,31 @@ emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
} }
static void static void
store_query_result(struct anv_batch *batch, uint32_t reg, gpu_write_query_result(struct anv_batch *batch,
struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags) struct anv_buffer *dst_buffer, uint32_t dst_offset,
VkQueryResultFlags flags,
uint32_t value_index, uint32_t reg)
{ {
if (flags & VK_QUERY_RESULT_64_BIT)
dst_offset += value_index * 8;
else
dst_offset += value_index * 4;
anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) { anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
srm.RegisterAddress = reg; srm.RegisterAddress = reg;
srm.MemoryAddress = (struct anv_address) { bo, offset }; srm.MemoryAddress = (struct anv_address) {
.bo = dst_buffer->bo,
.offset = dst_buffer->offset + dst_offset,
};
} }
if (flags & VK_QUERY_RESULT_64_BIT) { if (flags & VK_QUERY_RESULT_64_BIT) {
anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) { anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
srm.RegisterAddress = reg + 4; srm.RegisterAddress = reg + 4;
srm.MemoryAddress = (struct anv_address) { bo, offset + 4 }; srm.MemoryAddress = (struct anv_address) {
.bo = dst_buffer->bo,
.offset = dst_buffer->offset + dst_offset + 4,
};
} }
} }
} }
@@ -455,7 +468,6 @@ void genX(CmdCopyQueryPoolResults)(
} }
} }
dst_offset = buffer->offset + destOffset;
for (uint32_t i = 0; i < queryCount; i++) { for (uint32_t i = 0; i < queryCount; i++) {
slot_offset = (firstQuery + i) * pool->stride; slot_offset = (firstQuery + i) * pool->stride;
@@ -463,32 +475,29 @@ void genX(CmdCopyQueryPoolResults)(
case VK_QUERY_TYPE_OCCLUSION: case VK_QUERY_TYPE_OCCLUSION:
compute_query_result(&cmd_buffer->batch, OPERAND_R2, compute_query_result(&cmd_buffer->batch, OPERAND_R2,
&pool->bo, slot_offset + 8); &pool->bo, slot_offset + 8);
gpu_write_query_result(&cmd_buffer->batch, buffer, destOffset,
flags, 0, CS_GPR(2));
break; break;
case VK_QUERY_TYPE_TIMESTAMP: case VK_QUERY_TYPE_TIMESTAMP:
emit_load_alu_reg_u64(&cmd_buffer->batch, emit_load_alu_reg_u64(&cmd_buffer->batch,
CS_GPR(2), &pool->bo, slot_offset + 8); CS_GPR(2), &pool->bo, slot_offset + 8);
gpu_write_query_result(&cmd_buffer->batch, buffer, destOffset,
flags, 0, CS_GPR(2));
break; break;
default: default:
unreachable("unhandled query type"); unreachable("unhandled query type");
} }
store_query_result(&cmd_buffer->batch,
CS_GPR(2), buffer->bo, dst_offset, flags);
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) { if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0), emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
&pool->bo, slot_offset); &pool->bo, slot_offset);
if (flags & VK_QUERY_RESULT_64_BIT) gpu_write_query_result(&cmd_buffer->batch, buffer, destOffset,
store_query_result(&cmd_buffer->batch, flags, 1, CS_GPR(0));
CS_GPR(0), buffer->bo, dst_offset + 8, flags);
else
store_query_result(&cmd_buffer->batch,
CS_GPR(0), buffer->bo, dst_offset + 4, flags);
} }
dst_offset += destStride; destOffset += destStride;
} }
} }