turnip: make cmdstream bo's read-only to GPU
Would allow earlier faults instead of having corrupted cmdstream.
This was already done to Freedreno long ago in:
04aff7e4
"freedreno: make cmdstream bo's read-only to GPU"
Since private memory should be GPU writable it is now allocated
separately, instead of suballocation from now read-only cmdstream.
Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10807>
This commit is contained in:

committed by
Marge Bot

parent
413e7c6dc8
commit
931ad19a18
@@ -111,7 +111,7 @@ tu_cs_add_bo(struct tu_cs *cs, uint32_t size)
|
|||||||
|
|
||||||
VkResult result =
|
VkResult result =
|
||||||
tu_bo_init_new(cs->device, new_bo, size * sizeof(uint32_t),
|
tu_bo_init_new(cs->device, new_bo, size * sizeof(uint32_t),
|
||||||
TU_BO_ALLOC_ALLOW_DUMP);
|
TU_BO_ALLOC_GPU_READ_ONLY | TU_BO_ALLOC_ALLOW_DUMP);
|
||||||
if (result != VK_SUCCESS) {
|
if (result != VK_SUCCESS) {
|
||||||
free(new_bo);
|
free(new_bo);
|
||||||
return result;
|
return result;
|
||||||
|
@@ -2030,24 +2030,29 @@ calc_pvtmem_size(struct tu_device *dev, struct tu_pvtmem_config *config,
|
|||||||
return dev->physical_device->info.num_sp_cores * per_sp_size;
|
return dev->physical_device->info.num_sp_cores * per_sp_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static VkResult
|
||||||
tu_setup_pvtmem(struct tu_device *dev,
|
tu_setup_pvtmem(struct tu_device *dev,
|
||||||
struct tu_pipeline *pipeline,
|
struct tu_pipeline *pipeline,
|
||||||
struct tu_pvtmem_config *config,
|
struct tu_pvtmem_config *config,
|
||||||
uint32_t pvtmem_bytes, bool per_wave)
|
uint32_t pvtmem_bytes, bool per_wave)
|
||||||
{
|
{
|
||||||
struct tu_cs_memory memory;
|
|
||||||
|
|
||||||
if (!pvtmem_bytes) {
|
if (!pvtmem_bytes) {
|
||||||
memset(config, 0, sizeof(*config));
|
memset(config, 0, sizeof(*config));
|
||||||
return;
|
return VK_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t total_size = calc_pvtmem_size(dev, config, pvtmem_bytes);
|
uint32_t total_size = calc_pvtmem_size(dev, config, pvtmem_bytes);
|
||||||
config->per_wave = per_wave;
|
config->per_wave = per_wave;
|
||||||
|
|
||||||
tu_cs_alloc(&pipeline->cs, total_size / 32, 8, &memory);
|
VkResult result =
|
||||||
config->iova = memory.iova;
|
tu_bo_init_new(dev, &pipeline->pvtmem_bo, total_size,
|
||||||
|
TU_BO_ALLOC_NO_FLAGS);
|
||||||
|
if (result != VK_SUCCESS)
|
||||||
|
return result;
|
||||||
|
|
||||||
|
config->iova = pipeline->pvtmem_bo.iova;
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -2846,6 +2851,9 @@ tu_pipeline_finish(struct tu_pipeline *pipeline,
|
|||||||
{
|
{
|
||||||
tu_cs_finish(&pipeline->cs);
|
tu_cs_finish(&pipeline->cs);
|
||||||
|
|
||||||
|
if (pipeline->pvtmem_bo.size)
|
||||||
|
tu_bo_finish(dev, &pipeline->pvtmem_bo);
|
||||||
|
|
||||||
ralloc_free(pipeline->executables_mem_ctx);
|
ralloc_free(pipeline->executables_mem_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2904,8 +2912,12 @@ tu_pipeline_builder_build(struct tu_pipeline_builder *builder,
|
|||||||
per_wave = false;
|
per_wave = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
tu_setup_pvtmem(builder->device, *pipeline, &builder->pvtmem,
|
result = tu_setup_pvtmem(builder->device, *pipeline, &builder->pvtmem,
|
||||||
pvtmem_size, per_wave);
|
pvtmem_size, per_wave);
|
||||||
|
if (result != VK_SUCCESS) {
|
||||||
|
vk_object_free(&builder->device->vk, builder->alloc, *pipeline);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
tu_pipeline_builder_parse_dynamic(builder, *pipeline);
|
tu_pipeline_builder_parse_dynamic(builder, *pipeline);
|
||||||
tu_pipeline_builder_parse_shader_stages(builder, *pipeline);
|
tu_pipeline_builder_parse_shader_stages(builder, *pipeline);
|
||||||
|
@@ -1110,6 +1110,9 @@ struct tu_pipeline
|
|||||||
|
|
||||||
struct tu_cs cs;
|
struct tu_cs cs;
|
||||||
|
|
||||||
|
/* Separate BO for private memory since it should GPU writable */
|
||||||
|
struct tu_bo pvtmem_bo;
|
||||||
|
|
||||||
struct tu_pipeline_layout *layout;
|
struct tu_pipeline_layout *layout;
|
||||||
|
|
||||||
bool need_indirect_descriptor_sets;
|
bool need_indirect_descriptor_sets;
|
||||||
|
Reference in New Issue
Block a user