lavapipe: Implement VK_KHR_ray_tracing_pipeline

Uses the existing ray traversal helpers and function calls handled by
gallivm.

Reviewed-By: Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/28187>
This commit is contained in:
Konstantin Seurer
2024-03-15 19:07:40 +01:00
committed by Marge Bot
parent 8b71c6d0d1
commit d99e95e033
7 changed files with 1472 additions and 12 deletions

View File

@@ -533,7 +533,7 @@ Khronos extensions that are not part of any Vulkan version:
VK_KHR_push_descriptor DONE (anv, hasvk, lvp, nvk, radv, tu, vn)
VK_KHR_ray_query DONE (anv/gfx12.5+, lvp, radv/gfx10.3+)
VK_KHR_ray_tracing_maintenance1 DONE (anv/gfx12.5+, radv/gfx10.3+)
VK_KHR_ray_tracing_pipeline DONE (anv/gfx12.5+, radv/gfx10.3+)
VK_KHR_ray_tracing_pipeline DONE (anv/gfx12.5+, lvp, radv/gfx10.3+)
VK_KHR_ray_tracing_position_fetch DONE (anv, radv/gfx10.3+)
VK_KHR_shader_clock DONE (anv, hasvk, lvp, nvk, radv, vn)
VK_KHR_shader_expect_assume DONE (anv, dzn, hasvk, lvp, nvk, panvk, pvr, radv, tu, v3dv, vn)

View File

@@ -140,6 +140,7 @@ static const struct vk_device_extension_table lvp_device_extensions_supported =
.KHR_push_descriptor = true,
.KHR_pipeline_library = true,
.KHR_ray_query = true,
.KHR_ray_tracing_pipeline = true,
.KHR_relaxed_block_layout = true,
.KHR_sampler_mirror_clamp_to_edge = true,
.KHR_sampler_ycbcr_conversion = true,
@@ -489,6 +490,13 @@ lvp_get_features(const struct lvp_physical_device *pdevice,
/* VK_KHR_ray_query */
.rayQuery = true,
/* VK_KHR_ray_tracing_pipeline */
.rayTracingPipeline = true,
.rayTracingPipelineShaderGroupHandleCaptureReplay = false,
.rayTracingPipelineShaderGroupHandleCaptureReplayMixed = false,
.rayTracingPipelineTraceRaysIndirect = true,
.rayTraversalPrimitiveCulling = true,
/* VK_EXT_shader_object */
.shaderObject = true,
@@ -1102,6 +1110,18 @@ lvp_get_properties(const struct lvp_physical_device *device, struct vk_propertie
.maxDescriptorSetAccelerationStructures = MAX_DESCRIPTORS,
.maxDescriptorSetUpdateAfterBindAccelerationStructures = MAX_DESCRIPTORS,
.minAccelerationStructureScratchOffsetAlignment = 128,
/* VK_KHR_ray_tracing_pipeline */
.shaderGroupHandleSize = LVP_RAY_TRACING_GROUP_HANDLE_SIZE,
.maxRayRecursionDepth = 31, /* Minimum allowed for DXR. */
.maxShaderGroupStride = 16384, /* dummy */
/* This isn't strictly necessary, but Doom Eternal breaks if the
* alignment is any lower. */
.shaderGroupBaseAlignment = 32,
.shaderGroupHandleCaptureReplaySize = 0,
.maxRayDispatchInvocationCount = 1024 * 1024 * 64,
.shaderGroupHandleAlignment = 16,
.maxRayHitAttributeSize = LVP_RAY_HIT_ATTRIBS_SIZE,
};
/* Vulkan 1.0 */

View File

@@ -107,6 +107,7 @@ struct rendering_state {
struct pipe_draw_info info;
struct pipe_grid_info dispatch_info;
struct pipe_grid_info trace_rays_info;
struct pipe_framebuffer_state framebuffer;
int fb_map[PIPE_MAX_COLOR_BUFS];
bool fb_remapped;
@@ -392,6 +393,9 @@ static void emit_compute_state(struct rendering_state *state)
}
state->compute_shader_dirty = false;
state->pcbuf_dirty[MESA_SHADER_RAYGEN] = true;
state->constbuf_dirty[MESA_SHADER_RAYGEN] = true;
}
static void
@@ -604,6 +608,26 @@ static void handle_compute_pipeline(struct vk_cmd_queue_entry *cmd,
handle_compute_shader(state, &pipeline->shaders[MESA_SHADER_COMPUTE], pipeline->layout);
}
static void handle_ray_tracing_pipeline(struct vk_cmd_queue_entry *cmd,
struct rendering_state *state)
{
LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
struct lvp_shader *shader = &pipeline->shaders[MESA_SHADER_RAYGEN];
state->shaders[MESA_SHADER_RAYGEN] = shader;
if ((pipeline->layout->push_constant_stages & LVP_RAY_TRACING_STAGES) > 0)
state->has_pcbuf[MESA_SHADER_RAYGEN] = pipeline->layout->push_constant_size > 0;
if (!state->has_pcbuf[MESA_SHADER_RAYGEN])
state->pcbuf_dirty[MESA_SHADER_RAYGEN] = false;
state->trace_rays_info.block[0] = shader->pipeline_nir->nir->info.workgroup_size[0];
state->trace_rays_info.block[1] = shader->pipeline_nir->nir->info.workgroup_size[1];
state->trace_rays_info.block[2] = shader->pipeline_nir->nir->info.workgroup_size[2];
}
static void
set_viewport_depth_xform(struct rendering_state *state, unsigned idx)
{
@@ -1093,6 +1117,8 @@ static void handle_pipeline(struct vk_cmd_queue_entry *cmd,
pipeline->used = true;
if (pipeline->type == LVP_PIPELINE_COMPUTE) {
handle_compute_pipeline(cmd, state);
} else if (pipeline->type == LVP_PIPELINE_RAY_TRACING) {
handle_ray_tracing_pipeline(cmd, state);
} else if (pipeline->type == LVP_PIPELINE_GRAPHICS) {
handle_graphics_pipeline(pipeline, state);
} else if (pipeline->type == LVP_PIPELINE_EXEC_GRAPH) {
@@ -1232,6 +1258,9 @@ handle_descriptor_sets(VkBindDescriptorSetsInfoKHR *bds, struct rendering_state
if (pipeline_type == LVP_PIPELINE_COMPUTE) {
bool changed = state->const_buffer[MESA_SHADER_COMPUTE][bds->firstSet + i].buffer == state->desc_buffers[bds->firstSet + i];
state->constbuf_dirty[MESA_SHADER_COMPUTE] |= changed;
} else if (pipeline_type == LVP_PIPELINE_RAY_TRACING) {
bool changed = state->const_buffer[MESA_SHADER_RAYGEN][bds->firstSet + i].buffer == state->desc_buffers[bds->firstSet + i];
state->constbuf_dirty[MESA_SHADER_RAYGEN] |= changed;
} else {
lvp_forall_gfx_stage(j) {
bool changed = state->const_buffer[j][bds->firstSet + i].buffer == state->desc_buffers[bds->firstSet + i];
@@ -1257,6 +1286,12 @@ handle_descriptor_sets(VkBindDescriptorSetsInfoKHR *bds, struct rendering_state
continue;
}
if (pipeline_type == LVP_PIPELINE_RAY_TRACING) {
if (set->layout->shader_stages & LVP_RAY_TRACING_STAGES)
handle_set_stage(state, set, pipeline_type, MESA_SHADER_RAYGEN, bds->firstSet + i);
continue;
}
if (set->layout->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
handle_set_stage(state, set, pipeline_type, MESA_SHADER_VERTEX, bds->firstSet + i);
@@ -2759,6 +2794,7 @@ static void handle_push_constants(struct vk_cmd_queue_entry *cmd,
state->pcbuf_dirty[MESA_SHADER_COMPUTE] |= (stage_flags & VK_SHADER_STAGE_COMPUTE_BIT) > 0;
state->pcbuf_dirty[MESA_SHADER_TASK] |= (stage_flags & VK_SHADER_STAGE_TASK_BIT_EXT) > 0;
state->pcbuf_dirty[MESA_SHADER_MESH] |= (stage_flags & VK_SHADER_STAGE_MESH_BIT_EXT) > 0;
state->pcbuf_dirty[MESA_SHADER_RAYGEN] |= (stage_flags & LVP_RAY_TRACING_STAGES) > 0;
state->inlines_dirty[MESA_SHADER_VERTEX] |= (stage_flags & VK_SHADER_STAGE_VERTEX_BIT) > 0;
state->inlines_dirty[MESA_SHADER_FRAGMENT] |= (stage_flags & VK_SHADER_STAGE_FRAGMENT_BIT) > 0;
state->inlines_dirty[MESA_SHADER_GEOMETRY] |= (stage_flags & VK_SHADER_STAGE_GEOMETRY_BIT) > 0;
@@ -4131,8 +4167,12 @@ bind_db_samplers(struct rendering_state *state, enum lvp_pipeline_type pipeline_
if (!state->desc_buffer_addrs[buffer_index]) {
if (set_layout->immutable_set) {
state->desc_sets[pipeline_type][set] = set_layout->immutable_set;
u_foreach_bit(stage, set_layout->shader_stages)
handle_set_stage_buffer(state, set_layout->immutable_set->bo, 0, vk_to_mesa_shader_stage(1<<stage), set);
if (pipeline_type == LVP_PIPELINE_RAY_TRACING) {
handle_set_stage_buffer(state, set_layout->immutable_set->bo, 0, MESA_SHADER_RAYGEN, set);
} else {
u_foreach_bit(stage, set_layout->shader_stages)
handle_set_stage_buffer(state, set_layout->immutable_set->bo, 0, vk_to_mesa_shader_stage(1<<stage), set);
}
}
return;
}
@@ -4151,8 +4191,12 @@ bind_db_samplers(struct rendering_state *state, enum lvp_pipeline_type pipeline_
struct lp_descriptor *immutable_desc = &bind_layout->immutable_samplers[sampler_index]->desc;
desc[sampler_index].sampler = immutable_desc->sampler;
desc[sampler_index].texture.sampler_index = immutable_desc->texture.sampler_index;
u_foreach_bit(stage, set_layout->shader_stages)
did_update |= BITFIELD_BIT(vk_to_mesa_shader_stage(1<<stage));
if (pipeline_type == LVP_PIPELINE_RAY_TRACING) {
did_update |= BITFIELD_BIT(MESA_SHADER_RAYGEN);
} else {
u_foreach_bit(stage, set_layout->shader_stages)
did_update |= BITFIELD_BIT(vk_to_mesa_shader_stage(1<<stage));
}
}
}
}
@@ -4192,17 +4236,20 @@ handle_descriptor_buffer_offsets(struct vk_cmd_queue_entry *cmd, struct renderin
state->desc_buffer_offsets[pipeline_type][idx].offset = dbo->pOffsets[i];
const struct lvp_descriptor_set_layout *set_layout = get_set_layout(layout, idx);
/* set for all stages */
u_foreach_bit(stage, set_layout->shader_stages) {
gl_shader_stage pstage = vk_to_mesa_shader_stage(1<<stage);
handle_set_stage_buffer(state, state->desc_buffers[dbo->pBufferIndices[i]], dbo->pOffsets[i], pstage, idx);
if (pipeline_type == LVP_PIPELINE_RAY_TRACING) {
handle_set_stage_buffer(state, state->desc_buffers[dbo->pBufferIndices[i]], dbo->pOffsets[i], MESA_SHADER_RAYGEN, idx);
} else {
/* set for all stages */
u_foreach_bit(stage, set_layout->shader_stages) {
gl_shader_stage pstage = vk_to_mesa_shader_stage(1<<stage);
handle_set_stage_buffer(state, state->desc_buffers[dbo->pBufferIndices[i]], dbo->pOffsets[i], pstage, idx);
}
}
bind_db_samplers(state, pipeline_type, idx);
}
}
}
#ifdef VK_ENABLE_BETA_EXTENSIONS
static void *
lvp_push_internal_buffer(struct rendering_state *state, gl_shader_stage stage, uint32_t size)
{
@@ -4223,6 +4270,8 @@ lvp_push_internal_buffer(struct rendering_state *state, gl_shader_stage stage, u
return mem;
}
#ifdef VK_ENABLE_BETA_EXTENSIONS
static void
dispatch_graph(struct rendering_state *state, const VkDispatchGraphInfoAMDX *info, void *scratch)
{
@@ -4417,6 +4466,105 @@ handle_write_acceleration_structures_properties(struct vk_cmd_queue_entry *cmd,
}
}
static void emit_ray_tracing_state(struct rendering_state *state)
{
bool pcbuf_dirty = state->pcbuf_dirty[MESA_SHADER_RAYGEN];
if (pcbuf_dirty)
update_pcbuf(state, MESA_SHADER_COMPUTE, MESA_SHADER_RAYGEN);
if (state->constbuf_dirty[MESA_SHADER_RAYGEN]) {
for (unsigned i = 0; i < state->num_const_bufs[MESA_SHADER_RAYGEN]; i++)
state->pctx->set_constant_buffer(state->pctx, MESA_SHADER_COMPUTE,
i + 1, false, &state->const_buffer[MESA_SHADER_RAYGEN][i]);
state->constbuf_dirty[MESA_SHADER_RAYGEN] = false;
}
state->pctx->bind_compute_state(state->pctx, state->shaders[MESA_SHADER_RAYGEN]->shader_cso);
state->pcbuf_dirty[MESA_SHADER_COMPUTE] = true;
state->constbuf_dirty[MESA_SHADER_COMPUTE] = true;
state->compute_shader_dirty = true;
}
static void
handle_trace_rays(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
{
struct vk_cmd_trace_rays_khr *trace = &cmd->u.trace_rays_khr;
emit_ray_tracing_state(state);
VkTraceRaysIndirectCommand2KHR *command = lvp_push_internal_buffer(
state, MESA_SHADER_COMPUTE, sizeof(VkTraceRaysIndirectCommand2KHR));
*command = (VkTraceRaysIndirectCommand2KHR) {
.raygenShaderRecordAddress = trace->raygen_shader_binding_table->deviceAddress,
.raygenShaderRecordSize = trace->raygen_shader_binding_table->size,
.missShaderBindingTableAddress = trace->miss_shader_binding_table->deviceAddress,
.missShaderBindingTableSize = trace->miss_shader_binding_table->size,
.missShaderBindingTableStride = trace->miss_shader_binding_table->stride,
.hitShaderBindingTableAddress = trace->hit_shader_binding_table->deviceAddress,
.hitShaderBindingTableSize = trace->hit_shader_binding_table->size,
.hitShaderBindingTableStride = trace->hit_shader_binding_table->stride,
.callableShaderBindingTableAddress = trace->callable_shader_binding_table->deviceAddress,
.callableShaderBindingTableSize = trace->callable_shader_binding_table->size,
.callableShaderBindingTableStride = trace->callable_shader_binding_table->stride,
.width = trace->width,
.height = trace->height,
.depth = trace->depth,
};
state->trace_rays_info.grid[0] = DIV_ROUND_UP(trace->width, state->trace_rays_info.block[0]);
state->trace_rays_info.grid[1] = DIV_ROUND_UP(trace->height, state->trace_rays_info.block[1]);
state->trace_rays_info.grid[2] = DIV_ROUND_UP(trace->depth, state->trace_rays_info.block[2]);
state->pctx->launch_grid(state->pctx, &state->trace_rays_info);
}
static void
handle_trace_rays_indirect(struct vk_cmd_queue_entry *cmd, struct rendering_state *state)
{
struct vk_cmd_trace_rays_indirect_khr *trace = &cmd->u.trace_rays_indirect_khr;
emit_ray_tracing_state(state);
size_t indirect_offset;
VkBuffer _indirect = get_buffer(state, (void *)(uintptr_t)trace->indirect_device_address, &indirect_offset);
VK_FROM_HANDLE(lvp_buffer, indirect, _indirect);
struct pipe_transfer *transfer;
const uint8_t *map = pipe_buffer_map(state->pctx, indirect->bo, PIPE_MAP_READ, &transfer);
map += indirect_offset;
const VkTraceRaysIndirectCommandKHR *src = (const void *)map;
VkTraceRaysIndirectCommand2KHR *command = lvp_push_internal_buffer(
state, MESA_SHADER_COMPUTE, sizeof(VkTraceRaysIndirectCommand2KHR));
*command = (VkTraceRaysIndirectCommand2KHR) {
.raygenShaderRecordAddress = trace->raygen_shader_binding_table->deviceAddress,
.raygenShaderRecordSize = trace->raygen_shader_binding_table->size,
.missShaderBindingTableAddress = trace->miss_shader_binding_table->deviceAddress,
.missShaderBindingTableSize = trace->miss_shader_binding_table->size,
.missShaderBindingTableStride = trace->miss_shader_binding_table->stride,
.hitShaderBindingTableAddress = trace->hit_shader_binding_table->deviceAddress,
.hitShaderBindingTableSize = trace->hit_shader_binding_table->size,
.hitShaderBindingTableStride = trace->hit_shader_binding_table->stride,
.callableShaderBindingTableAddress = trace->callable_shader_binding_table->deviceAddress,
.callableShaderBindingTableSize = trace->callable_shader_binding_table->size,
.callableShaderBindingTableStride = trace->callable_shader_binding_table->stride,
.width = src->width,
.height = src->height,
.depth = src->depth,
};
state->trace_rays_info.grid[0] = DIV_ROUND_UP(src->width, state->trace_rays_info.block[0]);
state->trace_rays_info.grid[1] = DIV_ROUND_UP(src->height, state->trace_rays_info.block[1]);
state->trace_rays_info.grid[2] = DIV_ROUND_UP(src->depth, state->trace_rays_info.block[2]);
state->pctx->buffer_unmap(state->pctx, transfer);
state->pctx->launch_grid(state->pctx, &state->trace_rays_info);
}
void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp)
{
struct vk_device_dispatch_table cmd_enqueue_dispatch;
@@ -4564,6 +4712,10 @@ void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp)
ENQUEUE_CMD(CmdBuildAccelerationStructuresIndirectKHR)
ENQUEUE_CMD(CmdWriteAccelerationStructuresPropertiesKHR)
ENQUEUE_CMD(CmdSetRayTracingPipelineStackSizeKHR)
ENQUEUE_CMD(CmdTraceRaysIndirectKHR)
ENQUEUE_CMD(CmdTraceRaysKHR)
#undef ENQUEUE_CMD
}
@@ -4947,6 +5099,14 @@ static void lvp_execute_cmd_buffer(struct list_head *cmds,
case VK_CMD_WRITE_ACCELERATION_STRUCTURES_PROPERTIES_KHR:
handle_write_acceleration_structures_properties(cmd, state);
break;
case VK_CMD_SET_RAY_TRACING_PIPELINE_STACK_SIZE_KHR:
break;
case VK_CMD_TRACE_RAYS_INDIRECT_KHR:
handle_trace_rays_indirect(cmd, state);
break;
case VK_CMD_TRACE_RAYS_KHR:
handle_trace_rays(cmd, state);
break;
default:
fprintf(stderr, "Unsupported command %s\n", vk_cmd_queue_type_names[cmd->type]);
unreachable("Unsupported command");

View File

@@ -95,6 +95,14 @@ lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline, b
lvp_pipeline_destroy(device, p, locked);
}
if (pipeline->rt.stages) {
for (uint32_t i = 0; i < pipeline->rt.stage_count; i++)
lvp_pipeline_nir_ref(pipeline->rt.stages + i, NULL);
}
free(pipeline->rt.stages);
free(pipeline->rt.groups);
vk_free(&device->vk.alloc, pipeline->state_data);
vk_object_base_finish(&pipeline->base);
vk_free(&device->vk.alloc, pipeline);
@@ -342,6 +350,7 @@ compile_spirv(struct lvp_device *pdevice, const VkPipelineShaderStageCreateInfo
.runtime_descriptor_array = true,
.shader_enqueue = true,
.ray_query = true,
.ray_tracing = true,
},
.ubo_addr_format = nir_address_format_vec2_index_32bit_offset,
.ssbo_addr_format = nir_address_format_vec2_index_32bit_offset,

View File

@@ -127,9 +127,9 @@ void __lvp_finishme(const char *file, int line, const char *format, ...)
return; \
} while (0)
#define LVP_SHADER_STAGES (MESA_SHADER_MESH + 1)
#define LVP_SHADER_STAGES (MESA_SHADER_CALLABLE + 1)
#define LVP_STAGE_MASK BITFIELD_MASK(LVP_SHADER_STAGES)
#define LVP_STAGE_MASK_GFX (BITFIELD_MASK(LVP_SHADER_STAGES) & ~BITFIELD_BIT(MESA_SHADER_COMPUTE))
#define LVP_STAGE_MASK_GFX (BITFIELD_MASK(PIPE_SHADER_MESH_TYPES) & ~BITFIELD_BIT(MESA_SHADER_COMPUTE))
#define lvp_foreach_stage(stage, stage_bits) \
for (gl_shader_stage stage, \
@@ -485,6 +485,7 @@ struct lvp_shader {
enum lvp_pipeline_type {
LVP_PIPELINE_GRAPHICS,
LVP_PIPELINE_COMPUTE,
LVP_PIPELINE_RAY_TRACING,
LVP_PIPELINE_EXEC_GRAPH,
LVP_PIPELINE_TYPE_COUNT,
};
@@ -495,6 +496,7 @@ lvp_pipeline_type_from_bind_point(VkPipelineBindPoint bind_point)
switch (bind_point) {
case VK_PIPELINE_BIND_POINT_GRAPHICS: return LVP_PIPELINE_GRAPHICS;
case VK_PIPELINE_BIND_POINT_COMPUTE: return LVP_PIPELINE_COMPUTE;
case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: return LVP_PIPELINE_RAY_TRACING;
#ifdef VK_ENABLE_BETA_EXTENSIONS
case VK_PIPELINE_BIND_POINT_EXECUTION_GRAPH_AMDX: return LVP_PIPELINE_EXEC_GRAPH;
#endif
@@ -502,6 +504,10 @@ lvp_pipeline_type_from_bind_point(VkPipelineBindPoint bind_point)
}
}
#define LVP_RAY_TRACING_STAGES (VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR | \
VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_MISS_BIT_KHR | \
VK_SHADER_STAGE_INTERSECTION_BIT_KHR | VK_SHADER_STAGE_CALLABLE_BIT_KHR)
static inline uint32_t
lvp_pipeline_types_from_shader_stages(VkShaderStageFlags stageFlags)
{
@@ -510,6 +516,8 @@ lvp_pipeline_types_from_shader_stages(VkShaderStageFlags stageFlags)
if (stageFlags & MESA_VK_SHADER_STAGE_WORKGRAPH_HACK_BIT_FIXME)
types |= BITFIELD_BIT(LVP_PIPELINE_EXEC_GRAPH);
#endif
if (stageFlags & LVP_RAY_TRACING_STAGES)
types |= BITFIELD_BIT(LVP_PIPELINE_RAY_TRACING);
if (stageFlags & VK_SHADER_STAGE_COMPUTE_BIT)
types |= BITFIELD_BIT(LVP_PIPELINE_COMPUTE);
if (stageFlags & (VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_MESH_BIT_EXT | VK_SHADER_STAGE_TASK_BIT_EXT))
@@ -517,6 +525,20 @@ lvp_pipeline_types_from_shader_stages(VkShaderStageFlags stageFlags)
return types;
}
#define LVP_RAY_TRACING_GROUP_HANDLE_SIZE 32
#define LVP_RAY_HIT_ATTRIBS_SIZE 32
struct lvp_ray_tracing_group_handle {
uint32_t index;
};
struct lvp_ray_tracing_group {
struct lvp_ray_tracing_group_handle handle;
uint32_t recursive_index;
uint32_t ahit_index;
uint32_t isec_index;
};
struct lvp_pipeline {
struct vk_object_base base;
struct lvp_device * device;
@@ -544,6 +566,13 @@ struct lvp_pipeline {
uint32_t scratch_size;
} exec_graph;
struct {
struct lvp_pipeline_nir **stages;
struct lvp_ray_tracing_group *groups;
uint32_t stage_count;
uint32_t group_count;
} rt;
unsigned num_groups;
unsigned num_groups_total;
VkPipeline groups[0];

File diff suppressed because it is too large Load Diff

View File

@@ -32,6 +32,7 @@ liblvp_files = files(
'lvp_pipeline.c',
'lvp_pipeline_cache.c',
'lvp_query.c',
'lvp_ray_tracing_pipeline.c',
'lvp_wsi.c')
lvp_deps = []