intel/rt: Add return instructions at the end of ray-tracing shaders

Each callable ray-tracing shader shader stage has to perform a return
operation at the end.  In the case of raygen shaders, it retires the
bindless thread because the raygen shader is always the root of the call
tree.  In the case of any-hit shaders, the default action is accep the
hit.  For callable, miss, and closest-hit shaders, it does a return
operation.  The assumption is that the calling shader has placed a
BINDLESS_SHADER_RECORD address for the return in the first QWord of the
callee's scratch space.  The return operation simply loads this value
and calls a btd_spawn intrinsic to jump to it.

Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7356>
This commit is contained in:
Jason Ekstrand
2020-08-06 13:53:34 -05:00
committed by Marge Bot
parent 49778a7253
commit ca88cd8e5a
7 changed files with 158 additions and 0 deletions

View File

@@ -391,6 +391,7 @@ intrinsic("trace_ray", src_comp=[-1, 1, 1, 1, 1, 1, 3, 1, 3, 1, -1])
# src[] = { hit_t, hit_kind }
intrinsic("report_ray_intersection", src_comp=[1, 1], dest_comp=1)
intrinsic("ignore_ray_intersection")
intrinsic("accept_ray_intersection") # Not in SPIR-V; useful for lowering
intrinsic("terminate_ray")
# src[] = { sbt_index, payload }
intrinsic("execute_callable", src_comp=[1, -1])

View File

@@ -96,6 +96,7 @@ COMPILER_FILES = \
compiler/brw_nir_lower_mem_access_bit_sizes.c \
compiler/brw_nir_lower_rt_intrinsics.c \
compiler/brw_nir_lower_scoped_barriers.c \
compiler/brw_nir_lower_shader_calls.c \
compiler/brw_nir_opt_peephole_ffma.c \
compiler/brw_nir_rt.h \
compiler/brw_nir_rt.c \

View File

@@ -0,0 +1,98 @@
/*
* Copyright © 2020 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "brw_nir_rt.h"
#include "brw_nir_rt_builder.h"
/** Insert the appropriate return instruction at the end of the shader */
void
brw_nir_lower_shader_returns(nir_shader *shader)
{
nir_function_impl *impl = nir_shader_get_entrypoint(shader);
/* Reserve scratch space at the start of the shader's per-thread scratch
* space for the return BINDLESS_SHADER_RECORD address and data payload.
* When a shader is called, the calling shader will write the return BSR
* address in this region of the callee's scratch space.
*
* We could also put it at the end of the caller's scratch space. However,
* doing this way means that a shader never accesses its caller's scratch
* space unless given an explicit pointer (such as for ray payloads). It
* also makes computing the address easier given that we want to apply an
* alignment to the scratch offset to ensure we can make alignment
* assumptions in the called shader.
*
* This isn't needed for ray-gen shaders because they end the thread and
* never return to the calling trampoline shader.
*/
assert(shader->scratch_size == 0);
if (shader->info.stage != MESA_SHADER_RAYGEN)
shader->scratch_size = BRW_BTD_STACK_CALLEE_DATA_SIZE;
nir_builder b;
nir_builder_init(&b, impl);
set_foreach(impl->end_block->predecessors, block_entry) {
struct nir_block *block = (void *)block_entry->key;
b.cursor = nir_after_block_before_jump(block);
switch (shader->info.stage) {
case MESA_SHADER_RAYGEN:
/* A raygen shader is always the root of the shader call tree. When
* it ends, we retire the bindless stack ID and no further shaders
* will be executed.
*/
brw_nir_btd_retire(&b);
break;
case MESA_SHADER_ANY_HIT:
/* The default action of an any-hit shader is to accept the ray
* intersection.
*/
nir_accept_ray_intersection(&b);
break;
case MESA_SHADER_CALLABLE:
case MESA_SHADER_MISS:
case MESA_SHADER_CLOSEST_HIT:
/* Callable, miss, and closest-hit shaders don't take any special
* action at the end. They simply return back to the previous shader
* in the call stack.
*/
brw_nir_btd_return(&b);
break;
case MESA_SHADER_INTERSECTION:
unreachable("TODO");
default:
unreachable("Invalid callable shader stage");
}
assert(impl->end_block->predecessors->entries == 1);
break;
}
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
}

View File

@@ -111,6 +111,7 @@ void
brw_nir_lower_raygen(nir_shader *nir)
{
assert(nir->info.stage == MESA_SHADER_RAYGEN);
NIR_PASS_V(nir, brw_nir_lower_shader_returns);
lower_rt_scratch(nir);
}
@@ -118,6 +119,7 @@ void
brw_nir_lower_any_hit(nir_shader *nir, const struct gen_device_info *devinfo)
{
assert(nir->info.stage == MESA_SHADER_ANY_HIT);
NIR_PASS_V(nir, brw_nir_lower_shader_returns);
lower_rt_scratch(nir);
}
@@ -125,6 +127,7 @@ void
brw_nir_lower_closest_hit(nir_shader *nir)
{
assert(nir->info.stage == MESA_SHADER_CLOSEST_HIT);
NIR_PASS_V(nir, brw_nir_lower_shader_returns);
lower_rt_scratch(nir);
}
@@ -132,6 +135,7 @@ void
brw_nir_lower_miss(nir_shader *nir)
{
assert(nir->info.stage == MESA_SHADER_MISS);
NIR_PASS_V(nir, brw_nir_lower_shader_returns);
lower_rt_scratch(nir);
}
@@ -139,6 +143,7 @@ void
brw_nir_lower_callable(nir_shader *nir)
{
assert(nir->info.stage == MESA_SHADER_CALLABLE);
NIR_PASS_V(nir, brw_nir_lower_shader_returns);
lower_rt_scratch(nir);
}
@@ -149,5 +154,6 @@ brw_nir_lower_combined_intersection_any_hit(nir_shader *intersection,
{
assert(intersection->info.stage == MESA_SHADER_INTERSECTION);
assert(any_hit == NULL || any_hit->info.stage == MESA_SHADER_ANY_HIT);
NIR_PASS_V(intersection, brw_nir_lower_shader_returns);
lower_rt_scratch(intersection);
}

View File

@@ -41,9 +41,16 @@ void brw_nir_lower_combined_intersection_any_hit(nir_shader *intersection,
const nir_shader *any_hit,
const struct gen_device_info *devinfo);
/* We reserve the first 16B of the stack for callee data pointers */
#define BRW_BTD_STACK_RESUME_BSR_ADDR_OFFSET 0
#define BRW_BTD_STACK_CALL_DATA_PTR_OFFSET 8
#define BRW_BTD_STACK_CALLEE_DATA_SIZE 16
/* We require the stack to be 8B aligned at the start of a shader */
#define BRW_BTD_STACK_ALIGN 8
void brw_nir_lower_shader_returns(nir_shader *shader);
void brw_nir_lower_rt_intrinsics(nir_shader *shader,
const struct gen_device_info *devinfo);

View File

@@ -65,6 +65,50 @@ brw_nir_rt_store_scratch(nir_builder *b, uint32_t offset, unsigned align,
value, write_mask);
}
static inline void
nir_accept_ray_intersection(nir_builder *b)
{
nir_intrinsic_instr *accept =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_accept_ray_intersection);
nir_builder_instr_insert(b, &accept->instr);
}
static inline void
brw_nir_btd_spawn(nir_builder *b, nir_ssa_def *record_addr)
{
nir_intrinsic_instr *spawn =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_btd_spawn_intel);
spawn->src[0] = nir_src_for_ssa(nir_load_btd_global_arg_addr_intel(b));
spawn->src[1] = nir_src_for_ssa(record_addr);
nir_builder_instr_insert(b, &spawn->instr);
}
static inline void
brw_nir_btd_retire(nir_builder *b)
{
nir_intrinsic_instr *retire =
nir_intrinsic_instr_create(b->shader,
nir_intrinsic_btd_retire_intel);
nir_builder_instr_insert(b, &retire->instr);
}
/** This is a pseudo-op which does a bindless return
*
* It loads the return address from the stack and calls btd_spawn to spawn the
* resume shader.
*/
static inline void
brw_nir_btd_return(struct nir_builder *b)
{
assert(b->shader->scratch_size == BRW_BTD_STACK_CALLEE_DATA_SIZE);
nir_ssa_def *resume_addr =
brw_nir_rt_load_scratch(b, BRW_BTD_STACK_RESUME_BSR_ADDR_OFFSET,
8 /* align */, 1, 64);
brw_nir_btd_spawn(b, resume_addr);
}
static inline void
assert_def_size(nir_ssa_def *def, unsigned num_components, unsigned bit_size)
{

View File

@@ -87,6 +87,7 @@ libintel_compiler_files = files(
'brw_nir_lower_mem_access_bit_sizes.c',
'brw_nir_lower_rt_intrinsics.c',
'brw_nir_lower_scoped_barriers.c',
'brw_nir_lower_shader_calls.c',
'brw_nir_opt_peephole_ffma.c',
'brw_nir_rt.h',
'brw_nir_rt.c',