anv: get rid of duplicated values from gen_device_info

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
Lionel Landwerlin
2016-09-23 01:04:25 +03:00
parent 94d0e7dc08
commit 6b21728c4a
6 changed files with 28 additions and 43 deletions

View File

@@ -924,8 +924,9 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
if (size == 0) { if (size == 0) {
/* We own the lock. Allocate a buffer */ /* We own the lock. Allocate a buffer */
struct anv_physical_device *physical_device = const struct anv_physical_device *physical_device =
&device->instance->physicalDevice; &device->instance->physicalDevice;
const struct gen_device_info *devinfo = &physical_device->info;
/* WaCSScratchSize:hsw /* WaCSScratchSize:hsw
* *
@@ -944,14 +945,14 @@ anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
*/ */
const unsigned subslices = MAX2(physical_device->subslice_total, 1); const unsigned subslices = MAX2(physical_device->subslice_total, 1);
const unsigned scratch_ids_per_subslice = const unsigned scratch_ids_per_subslice =
device->info.is_haswell ? 16 * 8 : physical_device->max_cs_threads; device->info.is_haswell ? 16 * 8 : devinfo->max_cs_threads;
uint32_t max_threads[] = { uint32_t max_threads[] = {
[MESA_SHADER_VERTEX] = physical_device->max_vs_threads, [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
[MESA_SHADER_TESS_CTRL] = physical_device->max_hs_threads, [MESA_SHADER_TESS_CTRL] = devinfo->max_hs_threads,
[MESA_SHADER_TESS_EVAL] = physical_device->max_ds_threads, [MESA_SHADER_TESS_EVAL] = devinfo->max_ds_threads,
[MESA_SHADER_GEOMETRY] = physical_device->max_gs_threads, [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
[MESA_SHADER_FRAGMENT] = physical_device->max_wm_threads, [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
[MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices, [MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
}; };

View File

@@ -135,12 +135,6 @@ anv_physical_device_init(struct anv_physical_device *device,
bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X); bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
device->max_vs_threads = device->info.max_vs_threads;
device->max_hs_threads = device->info.max_hs_threads;
device->max_ds_threads = device->info.max_ds_threads;
device->max_gs_threads = device->info.max_gs_threads;
device->max_wm_threads = device->info.max_wm_threads;
/* GENs prior to 8 do not support EU/Subslice info */ /* GENs prior to 8 do not support EU/Subslice info */
if (device->info.gen >= 8) { if (device->info.gen >= 8) {
device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL); device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
@@ -161,13 +155,11 @@ anv_physical_device_init(struct anv_physical_device *device,
if (device->info.is_cherryview && if (device->info.is_cherryview &&
device->subslice_total > 0 && device->eu_total > 0) { device->subslice_total > 0 && device->eu_total > 0) {
/* Logical CS threads = EUs per subslice * 7 threads per EU */ /* Logical CS threads = EUs per subslice * 7 threads per EU */
device->max_cs_threads = device->eu_total / device->subslice_total * 7; uint32_t max_cs_threads = device->eu_total / device->subslice_total * 7;
/* Fuse configurations may give more threads than expected, never less. */ /* Fuse configurations may give more threads than expected, never less. */
if (device->max_cs_threads < device->info.max_cs_threads) if (max_cs_threads > device->info.max_cs_threads)
device->max_cs_threads = device->info.max_cs_threads; device->info.max_cs_threads = max_cs_threads;
} else {
device->max_cs_threads = device->info.max_cs_threads;
} }
close(fd); close(fd);
@@ -537,11 +529,11 @@ void anv_GetPhysicalDeviceProperties(
.maxFragmentCombinedOutputResources = 8, .maxFragmentCombinedOutputResources = 8,
.maxComputeSharedMemorySize = 32768, .maxComputeSharedMemorySize = 32768,
.maxComputeWorkGroupCount = { 65535, 65535, 65535 }, .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
.maxComputeWorkGroupInvocations = 16 * pdevice->max_cs_threads, .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
.maxComputeWorkGroupSize = { .maxComputeWorkGroupSize = {
16 * pdevice->max_cs_threads, 16 * devinfo->max_cs_threads,
16 * pdevice->max_cs_threads, 16 * devinfo->max_cs_threads,
16 * pdevice->max_cs_threads, 16 * devinfo->max_cs_threads,
}, },
.subPixelPrecisionBits = 4 /* FIXME */, .subPixelPrecisionBits = 4 /* FIXME */,
.subTexelPrecisionBits = 4 /* FIXME */, .subTexelPrecisionBits = 4 /* FIXME */,

View File

@@ -573,17 +573,6 @@ struct anv_physical_device {
uint32_t eu_total; uint32_t eu_total;
uint32_t subslice_total; uint32_t subslice_total;
/**
* Platform specific constants containing the maximum number of threads
* for each pipeline stage.
*/
uint32_t max_vs_threads;
uint32_t max_hs_threads;
uint32_t max_ds_threads;
uint32_t max_gs_threads;
uint32_t max_wm_threads;
uint32_t max_cs_threads;
struct anv_wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX]; struct anv_wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX];
}; };

View File

@@ -45,8 +45,9 @@ genX(graphics_pipeline_create)(
{ {
ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass); ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
struct anv_physical_device *physical_device = const struct anv_physical_device *physical_device =
&device->instance->physicalDevice; &device->instance->physicalDevice;
const struct gen_device_info *devinfo = &physical_device->info;
struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass]; struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
struct anv_pipeline *pipeline; struct anv_pipeline *pipeline;
VkResult result; VkResult result;
@@ -125,7 +126,7 @@ genX(graphics_pipeline_create)(
vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length; vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
vs.VertexURBEntryReadOffset = 0; vs.VertexURBEntryReadOffset = 0;
vs.MaximumNumberofThreads = physical_device->max_vs_threads - 1; vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
vs.StatisticsEnable = true; vs.StatisticsEnable = true;
vs.VSFunctionEnable = true; vs.VSFunctionEnable = true;
} }
@@ -154,7 +155,7 @@ genX(graphics_pipeline_create)(
gs.DispatchGRFStartRegisterforURBData = gs.DispatchGRFStartRegisterforURBData =
gs_prog_data->base.base.dispatch_grf_start_reg; gs_prog_data->base.base.dispatch_grf_start_reg;
gs.MaximumNumberofThreads = physical_device->max_gs_threads - 1; gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
/* This in the next dword on HSW. */ /* This in the next dword on HSW. */
gs.ControlDataFormat = gs_prog_data->control_data_format; gs.ControlDataFormat = gs_prog_data->control_data_format;
gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords; gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
@@ -187,7 +188,7 @@ genX(graphics_pipeline_create)(
* don't at least set the maximum number of threads. * don't at least set the maximum number of threads.
*/ */
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) { anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
ps.MaximumNumberofThreads = physical_device->max_wm_threads - 1; ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
} }
} else { } else {
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline); const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
@@ -209,7 +210,7 @@ genX(graphics_pipeline_create)(
.offset = 0, .offset = 0,
}; };
ps.PerThreadScratchSpace = scratch_space(&wm_prog_data->base); ps.PerThreadScratchSpace = scratch_space(&wm_prog_data->base);
ps.MaximumNumberofThreads = physical_device->max_wm_threads - 1; ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
ps.PushConstantEnable = wm_prog_data->base.nr_params > 0; ps.PushConstantEnable = wm_prog_data->base.nr_params > 0;
ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0; ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask; ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;

View File

@@ -55,8 +55,9 @@ genX(graphics_pipeline_create)(
{ {
ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass); ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
struct anv_physical_device *physical_device = const struct anv_physical_device *physical_device =
&device->instance->physicalDevice; &device->instance->physicalDevice;
const struct gen_device_info *devinfo = &physical_device->info;
struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass]; struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
struct anv_pipeline *pipeline; struct anv_pipeline *pipeline;
VkResult result; VkResult result;
@@ -144,7 +145,7 @@ genX(graphics_pipeline_create)(
gs.DispatchGRFStartRegisterForURBData = gs.DispatchGRFStartRegisterForURBData =
gs_prog_data->base.base.dispatch_grf_start_reg; gs_prog_data->base.base.dispatch_grf_start_reg;
gs.MaximumNumberofThreads = physical_device->max_gs_threads / 2 - 1; gs.MaximumNumberofThreads = devinfo->max_gs_threads / 2 - 1;
gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords; gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
gs.DispatchMode = gs_prog_data->base.dispatch_mode; gs.DispatchMode = gs_prog_data->base.dispatch_mode;
gs.StatisticsEnable = true; gs.StatisticsEnable = true;
@@ -215,7 +216,7 @@ genX(graphics_pipeline_create)(
vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length; vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
vs.VertexURBEntryReadOffset = 0; vs.VertexURBEntryReadOffset = 0;
vs.MaximumNumberofThreads = physical_device->max_vs_threads - 1; vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
vs.StatisticsEnable = false; vs.StatisticsEnable = false;
vs.SIMD8DispatchEnable = pipeline->vs_simd8 != NO_KERNEL; vs.SIMD8DispatchEnable = pipeline->vs_simd8 != NO_KERNEL;
vs.VertexCacheDisable = false; vs.VertexCacheDisable = false;

View File

@@ -35,8 +35,9 @@ genX(compute_pipeline_create)(
VkPipeline* pPipeline) VkPipeline* pPipeline)
{ {
ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_physical_device *physical_device = const struct anv_physical_device *physical_device =
&device->instance->physicalDevice; &device->instance->physicalDevice;
const struct gen_device_info *devinfo = &physical_device->info;
struct anv_pipeline *pipeline; struct anv_pipeline *pipeline;
VkResult result; VkResult result;
@@ -120,7 +121,7 @@ genX(compute_pipeline_create)(
vfe.GPGPUMode = true; vfe.GPGPUMode = true;
#endif #endif
vfe.MaximumNumberofThreads = vfe.MaximumNumberofThreads =
physical_device->max_cs_threads * subslices - 1; devinfo->max_cs_threads * subslices - 1;
vfe.NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2; vfe.NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2;
vfe.ResetGatewayTimer = true; vfe.ResetGatewayTimer = true;
#if GEN_GEN <= 8 #if GEN_GEN <= 8