anv/pipeline: Roll compute_urb_partition into emit_urb_setup

Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Topi Pohjolainen <topi.pohjolainen@intel.com>
This commit is contained in:
Jason Ekstrand
2016-08-20 06:11:41 -07:00
parent 823ab83432
commit 8cb144bd93
3 changed files with 138 additions and 156 deletions

View File

@@ -805,147 +805,6 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
return VK_SUCCESS;
}
void
anv_compute_urb_partition(struct anv_pipeline *pipeline)
{
const struct gen_device_info *devinfo = &pipeline->device->info;
bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
unsigned vs_size = vs_present ?
get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
unsigned vs_entry_size_bytes = vs_size * 64;
bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
unsigned gs_size = gs_present ?
get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
unsigned gs_entry_size_bytes = gs_size * 64;
/* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
*
* VS Number of URB Entries must be divisible by 8 if the VS URB Entry
* Allocation Size is less than 9 512-bit URB entries.
*
* Similar text exists for GS.
*/
unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
/* URB allocations must be done in 8k chunks. */
unsigned chunk_size_bytes = 8192;
/* Determine the size of the URB in chunks. */
unsigned urb_chunks = pipeline->urb.total_size * 1024 / chunk_size_bytes;
/* Reserve space for push constants */
unsigned push_constant_kb;
if (pipeline->device->info.gen >= 8)
push_constant_kb = 32;
else if (pipeline->device->info.is_haswell)
push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
else
push_constant_kb = 16;
unsigned push_constant_bytes = push_constant_kb * 1024;
unsigned push_constant_chunks =
push_constant_bytes / chunk_size_bytes;
/* Initially, assign each stage the minimum amount of URB space it needs,
* and make a note of how much additional space it "wants" (the amount of
* additional space it could actually make use of).
*/
/* VS has a lower limit on the number of URB entries */
unsigned vs_chunks =
ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes;
unsigned vs_wants =
ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes - vs_chunks;
unsigned gs_chunks = 0;
unsigned gs_wants = 0;
if (gs_present) {
/* There are two constraints on the minimum amount of URB space we can
* allocate:
*
* (1) We need room for at least 2 URB entries, since we always operate
* the GS in DUAL_OBJECT mode.
*
* (2) We can't allocate less than nr_gs_entries_granularity.
*/
gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes;
gs_wants =
ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes - gs_chunks;
}
/* There should always be enough URB space to satisfy the minimum
* requirements of each stage.
*/
unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
assert(total_needs <= urb_chunks);
/* Mete out remaining space (if any) in proportion to "wants". */
unsigned total_wants = vs_wants + gs_wants;
unsigned remaining_space = urb_chunks - total_needs;
if (remaining_space > total_wants)
remaining_space = total_wants;
if (remaining_space > 0) {
unsigned vs_additional = (unsigned)
round(vs_wants * (((double) remaining_space) / total_wants));
vs_chunks += vs_additional;
remaining_space -= vs_additional;
gs_chunks += remaining_space;
}
/* Sanity check that we haven't over-allocated. */
assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
/* Finally, compute the number of entries that can fit in the space
* allocated to each stage.
*/
unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
/* Since we rounded up when computing *_wants, this may be slightly more
* than the maximum allowed amount, so correct for that.
*/
nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
/* Ensure that we program a multiple of the granularity. */
nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
/* Finally, sanity check to make sure we have at least the minimum number
* of entries needed for each stage.
*/
assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
if (gs_present)
assert(nr_gs_entries >= 2);
/* Lay out the URB in the following order:
* - push constants
* - VS
* - GS
*/
pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks;
pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size;
pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries;
pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks;
pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size;
pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries;
pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks;
pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1;
pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0;
pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks;
pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1;
pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0;
}
/**
* Copy pipeline state not marked as dynamic.
* Dynamic state is pipeline state which hasn't been provided at pipeline
@@ -1244,7 +1103,6 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
}
anv_pipeline_setup_l3_config(pipeline, false);
anv_compute_urb_partition(pipeline);
const VkPipelineVertexInputStateCreateInfo *vi_info =
pCreateInfo->pVertexInputState;

View File

@@ -1521,9 +1521,6 @@ struct anv_pipeline {
struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
struct {
uint32_t start[MESA_SHADER_GEOMETRY + 1];
uint32_t size[MESA_SHADER_GEOMETRY + 1];
uint32_t entries[MESA_SHADER_GEOMETRY + 1];
const struct gen_l3_config * l3_config;
uint32_t total_size;
} urb;
@@ -1637,9 +1634,6 @@ anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
}
void
anv_compute_urb_partition(struct anv_pipeline *pipeline);
void
anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);

View File

@@ -190,9 +190,123 @@ emit_vertex_input(struct anv_pipeline *pipeline,
static inline void
emit_urb_setup(struct anv_pipeline *pipeline)
{
#if GEN_GEN == 7 && !GEN_IS_HASWELL
struct anv_device *device = pipeline->device;
bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
unsigned vs_size = vs_present ?
get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
unsigned vs_entry_size_bytes = vs_size * 64;
bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
unsigned gs_size = gs_present ?
get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
unsigned gs_entry_size_bytes = gs_size * 64;
/* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
*
* VS Number of URB Entries must be divisible by 8 if the VS URB Entry
* Allocation Size is less than 9 512-bit URB entries.
*
* Similar text exists for GS.
*/
unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
/* URB allocations must be done in 8k chunks. */
unsigned chunk_size_bytes = 8192;
/* Determine the size of the URB in chunks. */
unsigned urb_chunks = pipeline->urb.total_size * 1024 / chunk_size_bytes;
/* Reserve space for push constants */
unsigned push_constant_kb;
if (pipeline->device->info.gen >= 8)
push_constant_kb = 32;
else if (pipeline->device->info.is_haswell)
push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
else
push_constant_kb = 16;
unsigned push_constant_bytes = push_constant_kb * 1024;
unsigned push_constant_chunks =
push_constant_bytes / chunk_size_bytes;
/* Initially, assign each stage the minimum amount of URB space it needs,
* and make a note of how much additional space it "wants" (the amount of
* additional space it could actually make use of).
*/
/* VS has a lower limit on the number of URB entries */
unsigned vs_chunks =
ALIGN(device->info.urb.min_vs_entries * vs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes;
unsigned vs_wants =
ALIGN(device->info.urb.max_vs_entries * vs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes - vs_chunks;
unsigned gs_chunks = 0;
unsigned gs_wants = 0;
if (gs_present) {
/* There are two constraints on the minimum amount of URB space we can
* allocate:
*
* (1) We need room for at least 2 URB entries, since we always operate
* the GS in DUAL_OBJECT mode.
*
* (2) We can't allocate less than nr_gs_entries_granularity.
*/
gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes;
gs_wants =
ALIGN(device->info.urb.max_gs_entries * gs_entry_size_bytes,
chunk_size_bytes) / chunk_size_bytes - gs_chunks;
}
/* There should always be enough URB space to satisfy the minimum
* requirements of each stage.
*/
unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
assert(total_needs <= urb_chunks);
/* Mete out remaining space (if any) in proportion to "wants". */
unsigned total_wants = vs_wants + gs_wants;
unsigned remaining_space = urb_chunks - total_needs;
if (remaining_space > total_wants)
remaining_space = total_wants;
if (remaining_space > 0) {
unsigned vs_additional = (unsigned)
round(vs_wants * (((double) remaining_space) / total_wants));
vs_chunks += vs_additional;
remaining_space -= vs_additional;
gs_chunks += remaining_space;
}
/* Sanity check that we haven't over-allocated. */
assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
/* Finally, compute the number of entries that can fit in the space
* allocated to each stage.
*/
unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
/* Since we rounded up when computing *_wants, this may be slightly more
* than the maximum allowed amount, so correct for that.
*/
nr_vs_entries = MIN2(nr_vs_entries, device->info.urb.max_vs_entries);
nr_gs_entries = MIN2(nr_gs_entries, device->info.urb.max_gs_entries);
/* Ensure that we program a multiple of the granularity. */
nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
/* Finally, sanity check to make sure we have at least the minimum number
* of entries needed for each stage.
*/
assert(nr_vs_entries >= device->info.urb.min_vs_entries);
if (gs_present)
assert(nr_gs_entries >= 2);
#if GEN_GEN == 7 && !GEN_IS_HASWELL
/* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
*
* "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall
@@ -208,13 +322,29 @@ emit_urb_setup(struct anv_pipeline *pipeline)
}
#endif
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_VS), urb) {
urb._3DCommandSubOpcode = 48 + i;
urb.VSURBStartingAddress = pipeline->urb.start[i];
urb.VSURBEntryAllocationSize = pipeline->urb.size[i] - 1;
urb.VSNumberofURBEntries = pipeline->urb.entries[i];
}
/* Lay out the URB in the following order:
* - push constants
* - VS
* - GS
*/
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_VS), urb) {
urb.VSURBStartingAddress = push_constant_chunks;
urb.VSURBEntryAllocationSize = vs_size - 1;
urb.VSNumberofURBEntries = nr_vs_entries;
}
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_HS), urb) {
urb.HSURBStartingAddress = push_constant_chunks;
}
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_DS), urb) {
urb.DSURBStartingAddress = push_constant_chunks;
}
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_GS), urb) {
urb.GSURBStartingAddress = push_constant_chunks + vs_chunks;
urb.GSURBEntryAllocationSize = gs_size - 1;
urb.GSNumberofURBEntries = nr_gs_entries;
}
}