2016-01-21 09:19:53 -08:00
|
|
|
/*
|
|
|
|
* Copyright © 2015-2016 Intel Corporation
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
* Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "brw_compiler.h"
|
2017-02-28 16:12:22 -08:00
|
|
|
#include "brw_shader.h"
|
2017-03-09 16:01:30 -08:00
|
|
|
#include "brw_eu.h"
|
2019-04-05 15:39:51 -07:00
|
|
|
#include "dev/gen_debug.h"
|
2016-01-21 09:19:53 -08:00
|
|
|
#include "compiler/nir/nir.h"
|
|
|
|
#include "main/errors.h"
|
|
|
|
#include "util/debug.h"
|
|
|
|
|
2016-01-21 09:30:05 -08:00
|
|
|
#define COMMON_OPTIONS \
|
|
|
|
.lower_sub = true, \
|
|
|
|
.lower_fdiv = true, \
|
|
|
|
.lower_scmp = true, \
|
2018-04-18 11:02:51 +02:00
|
|
|
.lower_flrp16 = true, \
|
2019-06-03 13:18:55 -07:00
|
|
|
.lower_fmod = true, \
|
2016-01-21 09:30:05 -08:00
|
|
|
.lower_bitfield_extract = true, \
|
|
|
|
.lower_bitfield_insert = true, \
|
|
|
|
.lower_uadd_carry = true, \
|
|
|
|
.lower_usub_borrow = true, \
|
|
|
|
.lower_fdiv = true, \
|
2016-01-19 08:19:20 +01:00
|
|
|
.lower_flrp64 = true, \
|
2019-02-06 13:26:17 -08:00
|
|
|
.lower_isign = true, \
|
2018-02-27 19:19:21 +11:00
|
|
|
.lower_ldexp = true, \
|
2017-09-21 13:54:55 -07:00
|
|
|
.lower_device_index_to_zero = true, \
|
2019-05-21 15:18:25 -07:00
|
|
|
.vectorize_io = true, \
|
2016-07-12 03:57:25 -07:00
|
|
|
.use_interpolated_input_intrinsics = true, \
|
2018-04-28 14:09:22 +02:00
|
|
|
.vertex_id_zero_based = true, \
|
|
|
|
.lower_base_vertex = true
|
2016-01-21 09:30:05 -08:00
|
|
|
|
2017-06-14 16:20:41 -07:00
|
|
|
#define COMMON_SCALAR_OPTIONS \
|
2019-10-07 22:46:00 -04:00
|
|
|
.lower_to_scalar = true, \
|
2017-06-14 16:20:41 -07:00
|
|
|
.lower_pack_half_2x16 = true, \
|
|
|
|
.lower_pack_snorm_2x16 = true, \
|
|
|
|
.lower_pack_snorm_4x8 = true, \
|
|
|
|
.lower_pack_unorm_2x16 = true, \
|
|
|
|
.lower_pack_unorm_4x8 = true, \
|
|
|
|
.lower_unpack_half_2x16 = true, \
|
|
|
|
.lower_unpack_snorm_2x16 = true, \
|
|
|
|
.lower_unpack_snorm_4x8 = true, \
|
|
|
|
.lower_unpack_unorm_2x16 = true, \
|
|
|
|
.lower_unpack_unorm_4x8 = true, \
|
|
|
|
.max_unroll_iterations = 32
|
|
|
|
|
2016-01-21 09:30:05 -08:00
|
|
|
static const struct nir_shader_compiler_options scalar_nir_options = {
|
|
|
|
COMMON_OPTIONS,
|
2017-06-14 16:20:41 -07:00
|
|
|
COMMON_SCALAR_OPTIONS,
|
|
|
|
};
|
|
|
|
|
2016-01-21 09:30:05 -08:00
|
|
|
static const struct nir_shader_compiler_options vector_nir_options = {
|
|
|
|
COMMON_OPTIONS,
|
|
|
|
|
|
|
|
/* In the vec4 backend, our dpN instruction replicates its result to all the
|
|
|
|
* components of a vec4. We would like NIR to give us replicated fdot
|
|
|
|
* instructions because it can optimize better for us.
|
|
|
|
*/
|
|
|
|
.fdot_replicates = true,
|
2016-01-25 11:07:28 -08:00
|
|
|
|
|
|
|
.lower_pack_snorm_2x16 = true,
|
|
|
|
.lower_pack_unorm_2x16 = true,
|
|
|
|
.lower_unpack_snorm_2x16 = true,
|
|
|
|
.lower_unpack_unorm_2x16 = true,
|
|
|
|
.lower_extract_byte = true,
|
|
|
|
.lower_extract_word = true,
|
2019-06-03 15:22:15 -07:00
|
|
|
.intel_vec4 = true,
|
2016-12-13 11:36:51 +11:00
|
|
|
.max_unroll_iterations = 32,
|
2016-01-21 09:30:05 -08:00
|
|
|
};
|
|
|
|
|
2016-01-21 09:19:53 -08:00
|
|
|
struct brw_compiler *
|
2016-08-22 15:01:08 -07:00
|
|
|
brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo)
|
2016-01-21 09:19:53 -08:00
|
|
|
{
|
|
|
|
struct brw_compiler *compiler = rzalloc(mem_ctx, struct brw_compiler);
|
|
|
|
|
|
|
|
compiler->devinfo = devinfo;
|
|
|
|
|
|
|
|
brw_fs_alloc_reg_sets(compiler);
|
|
|
|
brw_vec4_alloc_reg_set(compiler);
|
2017-03-09 16:01:30 -08:00
|
|
|
brw_init_compaction_tables(devinfo);
|
2016-01-21 09:19:53 -08:00
|
|
|
|
2016-06-11 13:17:27 -07:00
|
|
|
compiler->precise_trig = env_var_as_boolean("INTEL_PRECISE_TRIG", false);
|
|
|
|
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
compiler->use_tcs_8_patch =
|
2019-05-03 16:39:18 -07:00
|
|
|
devinfo->gen >= 12 ||
|
|
|
|
(devinfo->gen >= 9 && (INTEL_DEBUG & DEBUG_TCS_EIGHT_PATCH));
|
intel/compiler: Implement TCS 8_PATCH mode and INTEL_DEBUG=tcs8
Our tessellation control shaders can be dispatched in several modes.
- SINGLE_PATCH (Gen7+) processes a single patch per thread, with each
channel corresponding to a different patch vertex. PATCHLIST_N will
launch (N / 8) threads. If N is less than 8, some channels will be
disabled, leaving some untapped hardware capabilities. Conditionals
based on gl_InvocationID are non-uniform, which means that they'll
often have to execute both paths. However, if there are fewer than
8 vertices, all invocations will happen within a single thread, so
barriers can become no-ops, which is nice. We also burn a maximum
of 4 registers for ICP handles, so we can compile without regard for
the value of N. It also works in all cases.
- DUAL_PATCH mode processes up to two patches at a time, where the first
four channels come from patch 1, and the second group of four come
from patch 2. This tries to provide better EU utilization for small
patches (N <= 4). It cannot be used in all cases.
- 8_PATCH mode processes 8 patches at a time, with a thread launched per
vertex in the patch. Each channel corresponds to the same vertex, but
in each of the 8 patches. This utilizes all channels even for small
patches. It also makes conditions on gl_InvocationID uniform, leading
to proper jumps. Barriers, unfortunately, become real. Worse, for
PATCHLIST_N, the thread payload burns N registers for ICP handles.
This can burn up to 32 registers, or 1/4 of our register file, for
URB handles. For Vulkan (and DX), we know the number of vertices at
compile time, so we can limit the amount of waste. In GL, the patch
dimension is dynamic state, so we either would have to waste all 32
(not reasonable) or guess (badly) and recompile. This is unfortunate.
Because we can only spawn 16 thread instances, we can only use this
mode for PATCHLIST_16 and smaller. The rest must use SINGLE_PATCH.
This patch implements the new 8_PATCH TCS mode, but leaves us using
SINGLE_PATCH by default. A new INTEL_DEBUG=tcs8 flag will switch to
using 8_PATCH mode for testing and benchmarking purposes. We may
want to consider using 8_PATCH mode in Vulkan in some cases.
The data I've seen shows that 8_PATCH mode can be more efficient in
some cases, but SINGLE_PATCH mode (the one we use today) is faster
in other cases. Ultimately, the TES matters much more than the TCS
for performance, so the decision may not matter much.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2019-05-03 14:57:54 -07:00
|
|
|
|
2017-05-13 12:11:40 -07:00
|
|
|
if (devinfo->gen >= 10) {
|
|
|
|
/* We don't support vec4 mode on Cannonlake. */
|
|
|
|
for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_STAGES; i++)
|
|
|
|
compiler->scalar_stage[i] = true;
|
|
|
|
} else {
|
|
|
|
compiler->scalar_stage[MESA_SHADER_VERTEX] =
|
2017-06-03 12:26:29 -07:00
|
|
|
devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_VS", true);
|
2017-05-13 12:11:40 -07:00
|
|
|
compiler->scalar_stage[MESA_SHADER_TESS_CTRL] =
|
|
|
|
devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TCS", true);
|
|
|
|
compiler->scalar_stage[MESA_SHADER_TESS_EVAL] =
|
|
|
|
devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_TES", true);
|
|
|
|
compiler->scalar_stage[MESA_SHADER_GEOMETRY] =
|
|
|
|
devinfo->gen >= 8 && env_var_as_boolean("INTEL_SCALAR_GS", true);
|
|
|
|
compiler->scalar_stage[MESA_SHADER_FRAGMENT] = true;
|
|
|
|
compiler->scalar_stage[MESA_SHADER_COMPUTE] = true;
|
|
|
|
}
|
2016-01-21 09:19:53 -08:00
|
|
|
|
2019-02-25 17:17:29 -08:00
|
|
|
nir_lower_int64_options int64_options =
|
|
|
|
nir_lower_imul64 |
|
|
|
|
nir_lower_isign64 |
|
|
|
|
nir_lower_divmod64 |
|
|
|
|
nir_lower_imul_high64;
|
|
|
|
nir_lower_doubles_options fp64_options =
|
|
|
|
nir_lower_drcp |
|
|
|
|
nir_lower_dsqrt |
|
|
|
|
nir_lower_drsq |
|
|
|
|
nir_lower_dtrunc |
|
|
|
|
nir_lower_dfloor |
|
|
|
|
nir_lower_dceil |
|
|
|
|
nir_lower_dfract |
|
|
|
|
nir_lower_dround_even |
|
2019-07-11 16:59:31 -05:00
|
|
|
nir_lower_dmod |
|
|
|
|
nir_lower_dsub |
|
|
|
|
nir_lower_ddiv;
|
2019-02-25 17:17:29 -08:00
|
|
|
|
2019-03-03 10:10:46 -06:00
|
|
|
if (!devinfo->has_64bit_types || (INTEL_DEBUG & DEBUG_SOFT64)) {
|
2019-02-25 17:17:29 -08:00
|
|
|
int64_options |= nir_lower_mov64 |
|
|
|
|
nir_lower_icmp64 |
|
|
|
|
nir_lower_iadd64 |
|
|
|
|
nir_lower_iabs64 |
|
|
|
|
nir_lower_ineg64 |
|
|
|
|
nir_lower_logic64 |
|
|
|
|
nir_lower_minmax64 |
|
2019-07-15 10:31:49 -05:00
|
|
|
nir_lower_shift64 |
|
|
|
|
nir_lower_extract64;
|
2019-02-25 17:17:29 -08:00
|
|
|
fp64_options |= nir_lower_fp64_full_software;
|
|
|
|
}
|
|
|
|
|
2019-02-14 23:08:39 -08:00
|
|
|
/* The Bspec's section tittled "Instruction_multiply[DevBDW+]" claims that
|
|
|
|
* destination type can be Quadword and source type Doubleword for Gen8 and
|
|
|
|
* Gen9. So, lower 64 bit multiply instruction on rest of the platforms.
|
|
|
|
*/
|
|
|
|
if (devinfo->gen < 8 || devinfo->gen > 9)
|
|
|
|
int64_options |= nir_lower_imul_2x32_64;
|
|
|
|
|
2016-01-21 09:19:53 -08:00
|
|
|
/* We want the GLSL compiler to emit code that uses condition codes */
|
|
|
|
for (int i = 0; i < MESA_SHADER_STAGES; i++) {
|
2016-12-13 11:37:25 +11:00
|
|
|
compiler->glsl_compiler_options[i].MaxUnrollIterations = 0;
|
2016-01-21 09:19:53 -08:00
|
|
|
compiler->glsl_compiler_options[i].MaxIfDepth =
|
|
|
|
devinfo->gen < 6 ? 16 : UINT_MAX;
|
|
|
|
|
|
|
|
compiler->glsl_compiler_options[i].EmitNoIndirectInput = true;
|
|
|
|
compiler->glsl_compiler_options[i].EmitNoIndirectUniform = false;
|
|
|
|
|
|
|
|
bool is_scalar = compiler->scalar_stage[i];
|
|
|
|
|
|
|
|
compiler->glsl_compiler_options[i].EmitNoIndirectOutput = is_scalar;
|
|
|
|
compiler->glsl_compiler_options[i].EmitNoIndirectTemp = is_scalar;
|
|
|
|
compiler->glsl_compiler_options[i].OptimizeForAOS = !is_scalar;
|
|
|
|
|
2019-02-25 17:17:29 -08:00
|
|
|
struct nir_shader_compiler_options *nir_options =
|
|
|
|
rzalloc(compiler, struct nir_shader_compiler_options);
|
2016-03-07 10:55:21 -08:00
|
|
|
if (is_scalar) {
|
2019-04-19 13:11:34 -07:00
|
|
|
*nir_options = scalar_nir_options;
|
2016-03-07 10:55:21 -08:00
|
|
|
} else {
|
2019-04-19 13:11:34 -07:00
|
|
|
*nir_options = vector_nir_options;
|
2016-03-07 10:55:21 -08:00
|
|
|
}
|
2019-04-18 17:48:15 -07:00
|
|
|
|
2018-08-18 16:42:04 -07:00
|
|
|
/* Prior to Gen6, there are no three source operations, and Gen11 loses
|
|
|
|
* LRP.
|
|
|
|
*/
|
2019-04-18 17:48:15 -07:00
|
|
|
nir_options->lower_ffma = devinfo->gen < 6;
|
2018-08-18 16:42:04 -07:00
|
|
|
nir_options->lower_flrp32 = devinfo->gen < 6 || devinfo->gen >= 11;
|
2017-12-12 19:01:44 -08:00
|
|
|
nir_options->lower_fpow = devinfo->gen >= 12;
|
2019-04-18 17:48:15 -07:00
|
|
|
|
2019-05-30 14:14:52 -07:00
|
|
|
nir_options->lower_rotate = devinfo->gen < 11;
|
2019-08-26 13:33:06 -07:00
|
|
|
nir_options->lower_bitfield_reverse = devinfo->gen < 7;
|
2019-05-30 14:14:52 -07:00
|
|
|
|
2019-02-25 17:17:29 -08:00
|
|
|
nir_options->lower_int64_options = int64_options;
|
|
|
|
nir_options->lower_doubles_options = fp64_options;
|
2019-06-17 17:12:25 -05:00
|
|
|
|
|
|
|
nir_options->unify_interfaces = i < MESA_SHADER_FRAGMENT;
|
|
|
|
|
2019-02-25 17:17:29 -08:00
|
|
|
compiler->glsl_compiler_options[i].NirOptions = nir_options;
|
2016-01-21 09:19:53 -08:00
|
|
|
|
2016-05-18 20:28:07 -07:00
|
|
|
compiler->glsl_compiler_options[i].ClampBlockIndicesToArrayBounds = true;
|
2016-01-21 09:19:53 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectInput = false;
|
|
|
|
compiler->glsl_compiler_options[MESA_SHADER_TESS_EVAL].EmitNoIndirectInput = false;
|
2015-11-14 17:40:43 -08:00
|
|
|
compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].EmitNoIndirectOutput = false;
|
2016-01-21 09:19:53 -08:00
|
|
|
|
|
|
|
if (compiler->scalar_stage[MESA_SHADER_GEOMETRY])
|
|
|
|
compiler->glsl_compiler_options[MESA_SHADER_GEOMETRY].EmitNoIndirectInput = false;
|
|
|
|
|
|
|
|
return compiler;
|
|
|
|
}
|
2017-10-21 01:30:13 -07:00
|
|
|
|
2018-07-25 14:31:05 -07:00
|
|
|
static void
|
|
|
|
insert_u64_bit(uint64_t *val, bool add)
|
|
|
|
{
|
|
|
|
*val = (*val << 1) | !!add;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
brw_get_compiler_config_value(const struct brw_compiler *compiler)
|
|
|
|
{
|
|
|
|
uint64_t config = 0;
|
|
|
|
insert_u64_bit(&config, compiler->precise_trig);
|
|
|
|
if (compiler->devinfo->gen >= 8 && compiler->devinfo->gen < 10) {
|
|
|
|
insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_VERTEX]);
|
|
|
|
insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_CTRL]);
|
|
|
|
insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_TESS_EVAL]);
|
|
|
|
insert_u64_bit(&config, compiler->scalar_stage[MESA_SHADER_GEOMETRY]);
|
|
|
|
}
|
|
|
|
uint64_t debug_bits = INTEL_DEBUG;
|
|
|
|
uint64_t mask = DEBUG_DISK_CACHE_MASK;
|
|
|
|
while (mask != 0) {
|
|
|
|
const uint64_t bit = 1ULL << (ffsll(mask) - 1);
|
|
|
|
insert_u64_bit(&config, (debug_bits & bit) != 0);
|
|
|
|
mask &= ~bit;
|
|
|
|
}
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
2017-10-21 01:30:13 -07:00
|
|
|
unsigned
|
|
|
|
brw_prog_data_size(gl_shader_stage stage)
|
|
|
|
{
|
|
|
|
STATIC_ASSERT(MESA_SHADER_VERTEX == 0);
|
|
|
|
STATIC_ASSERT(MESA_SHADER_TESS_CTRL == 1);
|
|
|
|
STATIC_ASSERT(MESA_SHADER_TESS_EVAL == 2);
|
|
|
|
STATIC_ASSERT(MESA_SHADER_GEOMETRY == 3);
|
|
|
|
STATIC_ASSERT(MESA_SHADER_FRAGMENT == 4);
|
|
|
|
STATIC_ASSERT(MESA_SHADER_COMPUTE == 5);
|
|
|
|
static const size_t stage_sizes[] = {
|
|
|
|
sizeof(struct brw_vs_prog_data),
|
|
|
|
sizeof(struct brw_tcs_prog_data),
|
|
|
|
sizeof(struct brw_tes_prog_data),
|
|
|
|
sizeof(struct brw_gs_prog_data),
|
|
|
|
sizeof(struct brw_wm_prog_data),
|
|
|
|
sizeof(struct brw_cs_prog_data),
|
|
|
|
};
|
|
|
|
assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_sizes));
|
|
|
|
return stage_sizes[stage];
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
brw_prog_key_size(gl_shader_stage stage)
|
|
|
|
{
|
|
|
|
static const size_t stage_sizes[] = {
|
|
|
|
sizeof(struct brw_vs_prog_key),
|
|
|
|
sizeof(struct brw_tcs_prog_key),
|
|
|
|
sizeof(struct brw_tes_prog_key),
|
|
|
|
sizeof(struct brw_gs_prog_key),
|
|
|
|
sizeof(struct brw_wm_prog_key),
|
|
|
|
sizeof(struct brw_cs_prog_key),
|
|
|
|
};
|
|
|
|
assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_sizes));
|
|
|
|
return stage_sizes[stage];
|
|
|
|
}
|