vk/cmd_buffer: Rename emit_batch_buffer_end to end_batch_buffer
This is more generic and doesn't imply that it emits MI_BATCH_BUFFER_END. While we're at it, we'll move NOOP adding from bo_finish to end_batch_buffer.
This commit is contained in:
@@ -249,10 +249,6 @@ anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
|
|||||||
static void
|
static void
|
||||||
anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
|
anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
|
||||||
{
|
{
|
||||||
/* Round batch up to an even number of dwords. */
|
|
||||||
if ((batch->next - batch->start) & 4)
|
|
||||||
anv_batch_emit(batch, GEN8_MI_NOOP);
|
|
||||||
|
|
||||||
assert(batch->start == bbo->bo.map);
|
assert(batch->start == bbo->bo.map);
|
||||||
bbo->length = batch->next - batch->start;
|
bbo->length = batch->next - batch->start;
|
||||||
VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
|
VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
|
||||||
@@ -511,7 +507,7 @@ anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
anv_cmd_buffer_emit_batch_buffer_end(struct anv_cmd_buffer *cmd_buffer)
|
anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
|
||||||
{
|
{
|
||||||
struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
|
struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
|
||||||
struct anv_batch_bo *surface_bbo =
|
struct anv_batch_bo *surface_bbo =
|
||||||
@@ -519,6 +515,10 @@ anv_cmd_buffer_emit_batch_buffer_end(struct anv_cmd_buffer *cmd_buffer)
|
|||||||
|
|
||||||
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END);
|
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END);
|
||||||
|
|
||||||
|
/* Round batch up to an even number of dwords. */
|
||||||
|
if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
|
||||||
|
anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP);
|
||||||
|
|
||||||
anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
|
anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
|
||||||
|
|
||||||
surface_bbo->length = cmd_buffer->surface_next;
|
surface_bbo->length = cmd_buffer->surface_next;
|
||||||
|
@@ -182,7 +182,7 @@ VkResult anv_EndCommandBuffer(
|
|||||||
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
|
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
|
||||||
struct anv_device *device = cmd_buffer->device;
|
struct anv_device *device = cmd_buffer->device;
|
||||||
|
|
||||||
anv_cmd_buffer_emit_batch_buffer_end(cmd_buffer);
|
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
|
||||||
|
|
||||||
/* The algorithm used to compute the validate list is not threadsafe as
|
/* The algorithm used to compute the validate list is not threadsafe as
|
||||||
* it uses the bo->index field. We have to lock the device around it.
|
* it uses the bo->index field. We have to lock the device around it.
|
||||||
|
@@ -740,7 +740,7 @@ struct anv_cmd_buffer {
|
|||||||
VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
|
VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
|
||||||
void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
|
void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
|
||||||
void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
|
void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
|
||||||
void anv_cmd_buffer_emit_batch_buffer_end(struct anv_cmd_buffer *cmd_buffer);
|
void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
|
||||||
void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
|
void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
|
||||||
|
|
||||||
struct anv_bo *
|
struct anv_bo *
|
||||||
|
Reference in New Issue
Block a user