diff --git a/src/amd/vulkan/bvh/build_helpers.h b/src/amd/vulkan/bvh/build_helpers.h index 9865580f66a..9ac17a76748 100644 --- a/src/amd/vulkan/bvh/build_helpers.h +++ b/src/amd/vulkan/bvh/build_helpers.h @@ -250,8 +250,6 @@ TYPE(radv_ir_triangle_node, 4); TYPE(radv_ir_aabb_node, 4); TYPE(radv_ir_instance_node, 8); -#define NULL_NODE_ID 0xFFFFFFFF - uint32_t id_to_offset(uint32_t id) { @@ -302,7 +300,7 @@ ir_type_to_bvh_type(uint32_t type) return radv_bvh_node_aabb; } /* unreachable in valid nodes */ - return NULL_NODE_ID; + return RADV_BVH_INVALID_NODE; } AABB diff --git a/src/amd/vulkan/bvh/bvh.h b/src/amd/vulkan/bvh/bvh.h index d93af2f2b7e..864b4d0b25c 100644 --- a/src/amd/vulkan/bvh/bvh.h +++ b/src/amd/vulkan/bvh/bvh.h @@ -165,5 +165,6 @@ struct radv_bvh_box32_node { /* a box32 node */ #define RADV_BVH_ROOT_NODE radv_bvh_node_internal +#define RADV_BVH_INVALID_NODE 0xffffffffu #endif diff --git a/src/amd/vulkan/bvh/converter_internal.comp b/src/amd/vulkan/bvh/converter_internal.comp index 96ef27f6eff..b3c097cdb39 100644 --- a/src/amd/vulkan/bvh/converter_internal.comp +++ b/src/amd/vulkan/bvh/converter_internal.comp @@ -98,11 +98,11 @@ main() continue; uint32_t found_child_count = 0; - uint32_t children[4] = {NULL_NODE_ID, NULL_NODE_ID, - NULL_NODE_ID, NULL_NODE_ID}; + uint32_t children[4] = {RADV_BVH_INVALID_NODE, RADV_BVH_INVALID_NODE, + RADV_BVH_INVALID_NODE, RADV_BVH_INVALID_NODE}; for (uint32_t i = 0; i < 2; ++i) - if (src.children[i] != NULL_NODE_ID) + if (src.children[i] != RADV_BVH_INVALID_NODE) children[found_child_count++] = src.children[i]; while (found_child_count < 4) { @@ -131,10 +131,10 @@ main() uint32_t grandchildren[2] = DEREF(child_node).children; uint32_t valid_grandchild_count = 0; - if (grandchildren[1] != NULL_NODE_ID) + if (grandchildren[1] != RADV_BVH_INVALID_NODE) ++valid_grandchild_count; - if (grandchildren[0] != NULL_NODE_ID) + if (grandchildren[0] != RADV_BVH_INVALID_NODE) ++valid_grandchild_count; else grandchildren[0] = grandchildren[1]; @@ -200,6 +200,6 @@ main() DEREF(header).aabb = src.base.aabb; DEREF(header).bvh_offset = args.output_bvh_offset; - set_parent(RADV_BVH_ROOT_NODE, NULL_NODE_ID); + set_parent(RADV_BVH_ROOT_NODE, RADV_BVH_INVALID_NODE); } } diff --git a/src/amd/vulkan/bvh/lbvh_internal.comp b/src/amd/vulkan/bvh/lbvh_internal.comp index 088f5619a34..621bb4b97d6 100644 --- a/src/amd/vulkan/bvh/lbvh_internal.comp +++ b/src/amd/vulkan/bvh/lbvh_internal.comp @@ -66,7 +66,7 @@ main(void) uint32_t child_id = DEREF(INDEX(key_id_pair, args.src_ids, src_index + i)).id; - if (i < child_count && child_id != NULL_NODE_ID) { + if (i < child_count && child_id != RADV_BVH_INVALID_NODE) { VOID_REF node = OFFSET(args.bvh, ir_id_to_offset(child_id)); REF(radv_ir_node) child = REF(radv_ir_node)(node); bounds = load_aabb(child); @@ -75,7 +75,7 @@ main(void) total_bounds.max = max(total_bounds.max, bounds.max); is_active = true; } else { - child_id = NULL_NODE_ID; + child_id = RADV_BVH_INVALID_NODE; } DEREF(dst_node).children[i] = child_id; @@ -89,7 +89,7 @@ main(void) DEREF(dst_node).in_final_tree = FINAL_TREE_UNKNOWN; /* An internal node is considered inactive if it has no children. Set the resulting scratch node - * id to NULL_NODE_ID for more internal nodes to become inactive. + * id to RADV_BVH_INVALID_NODE for more internal nodes to become inactive. */ - DEREF(INDEX(key_id_pair, args.dst_ids, global_id)).id = is_active ? current_id : NULL_NODE_ID; + DEREF(INDEX(key_id_pair, args.dst_ids, global_id)).id = is_active ? current_id : RADV_BVH_INVALID_NODE; } diff --git a/src/amd/vulkan/bvh/leaf.comp b/src/amd/vulkan/bvh/leaf.comp index 5273210efa4..987125e7e41 100644 --- a/src/amd/vulkan/bvh/leaf.comp +++ b/src/amd/vulkan/bvh/leaf.comp @@ -308,7 +308,7 @@ main(void) is_active = build_instance(bounds, src_ptr, dst_ptr, global_id); } - DEREF(id_ptr).id = is_active ? pack_ir_node_id(dst_offset, node_type) : NULL_NODE_ID; + DEREF(id_ptr).id = is_active ? pack_ir_node_id(dst_offset, node_type) : RADV_BVH_INVALID_NODE; min_float_emulated(INDEX(int32_t, args.bounds, 0), bounds.min.x); min_float_emulated(INDEX(int32_t, args.bounds, 1), bounds.min.y); diff --git a/src/amd/vulkan/bvh/morton.comp b/src/amd/vulkan/bvh/morton.comp index 578b05eead2..750531bd0ea 100644 --- a/src/amd/vulkan/bvh/morton.comp +++ b/src/amd/vulkan/bvh/morton.comp @@ -72,7 +72,7 @@ main(void) uint32_t id = DEREF(key_id).id; uint32_t key; - if (id != NULL_NODE_ID) { + if (id != RADV_BVH_INVALID_NODE) { AABB bounds = load_aabb(REF(radv_ir_node)OFFSET(args.bvh, ir_id_to_offset(id))); vec3 center = (bounds.min + bounds.max) * 0.5; diff --git a/src/amd/vulkan/radv_nir_lower_ray_queries.c b/src/amd/vulkan/radv_nir_lower_ray_queries.c index 6526d528e4d..2177c936cf9 100644 --- a/src/amd/vulkan/radv_nir_lower_ray_queries.c +++ b/src/amd/vulkan/radv_nir_lower_ray_queries.c @@ -403,10 +403,10 @@ lower_rq_initialize(nir_builder *b, nir_ssa_def *index, nir_intrinsic_instr *ins rq_store_var(b, index, vars->trav.stack, nir_imm_int(b, 0), 0x1); rq_store_var(b, index, vars->trav.current_node, nir_imm_int(b, RADV_BVH_ROOT_NODE), 0x1); - rq_store_var(b, index, vars->trav.previous_node, nir_imm_int(b, -1), 0x1); - rq_store_var(b, index, vars->trav.instance_top_node, nir_imm_int(b, -1), 0x1); - rq_store_var(b, index, vars->trav.instance_bottom_node, - nir_imm_int(b, RADV_BVH_NO_INSTANCE_ROOT), 0x1); + rq_store_var(b, index, vars->trav.previous_node, nir_imm_int(b, RADV_BVH_INVALID_NODE), 0x1); + rq_store_var(b, index, vars->trav.instance_top_node, nir_imm_int(b, RADV_BVH_INVALID_NODE), + 0x1); + rq_store_var(b, index, vars->trav.instance_bottom_node, nir_imm_int(b, RADV_BVH_NO_INSTANCE_ROOT), 0x1); rq_store_var(b, index, vars->trav.top_stack, nir_imm_int(b, -1), 1); rq_store_var(b, index, vars->trav.stack_base, nir_imm_int(b, 0), 1); diff --git a/src/amd/vulkan/radv_pipeline_rt.c b/src/amd/vulkan/radv_pipeline_rt.c index 05194baafe4..df2454ef376 100644 --- a/src/amd/vulkan/radv_pipeline_rt.c +++ b/src/amd/vulkan/radv_pipeline_rt.c @@ -1378,10 +1378,9 @@ build_traversal_shader(struct radv_device *device, nir_store_var(&b, trav_vars.stack, nir_imul_imm(&b, nir_load_local_invocation_index(&b), sizeof(uint32_t)), 1); nir_store_var(&b, trav_vars.stack_base, nir_load_var(&b, trav_vars.stack), 1); nir_store_var(&b, trav_vars.current_node, nir_imm_int(&b, RADV_BVH_ROOT_NODE), 0x1); - nir_store_var(&b, trav_vars.previous_node, nir_imm_int(&b, -1), 0x1); - nir_store_var(&b, trav_vars.instance_top_node, nir_imm_int(&b, -1), 0x1); - nir_store_var(&b, trav_vars.instance_bottom_node, nir_imm_int(&b, RADV_BVH_NO_INSTANCE_ROOT), - 0x1); + nir_store_var(&b, trav_vars.previous_node, nir_imm_int(&b, RADV_BVH_INVALID_NODE), 0x1); + nir_store_var(&b, trav_vars.instance_top_node, nir_imm_int(&b, RADV_BVH_INVALID_NODE), 0x1); + nir_store_var(&b, trav_vars.instance_bottom_node, nir_imm_int(&b, RADV_BVH_NO_INSTANCE_ROOT), 0x1); nir_store_var(&b, trav_vars.top_stack, nir_imm_int(&b, -1), 1); diff --git a/src/amd/vulkan/radv_rt_common.c b/src/amd/vulkan/radv_rt_common.c index 58ef5933d29..97bea5cc9ce 100644 --- a/src/amd/vulkan/radv_rt_common.c +++ b/src/amd/vulkan/radv_rt_common.c @@ -544,7 +544,8 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, nir_push_loop(b); { - nir_push_if(b, nir_ieq_imm(b, nir_load_deref(b, args->vars.current_node), -1)); + nir_push_if( + b, nir_ieq_imm(b, nir_load_deref(b, args->vars.current_node), RADV_BVH_INVALID_NODE)); { /* Early exit if we never overflowed the stack, to avoid having to backtrack to * the root for no reason. */ @@ -586,7 +587,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, build_node_to_addr(device, b, nir_load_deref(b, args->vars.bvh_base)); nir_ssa_def *parent = fetch_parent_node(b, bvh_addr, prev); - nir_push_if(b, nir_ieq(b, parent, nir_imm_int(b, -1))); + nir_push_if(b, nir_ieq(b, parent, nir_imm_int(b, RADV_BVH_INVALID_NODE))); { nir_store_var(b, incomplete, nir_imm_bool(b, false), 0x1); nir_jump(b, nir_jump_break); @@ -605,13 +606,15 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, nir_imm_int(b, args->stack_stride * args->stack_entries)); nir_ssa_def *bvh_node = args->stack_load_cb(b, stack_ptr, args); nir_store_deref(b, args->vars.current_node, bvh_node, 0x1); - nir_store_deref(b, args->vars.previous_node, nir_imm_int(b, -1), 0x1); + nir_store_deref(b, args->vars.previous_node, nir_imm_int(b, RADV_BVH_INVALID_NODE), + 0x1); } nir_pop_if(b, NULL); } nir_push_else(b, NULL); { - nir_store_deref(b, args->vars.previous_node, nir_imm_int(b, -1), 0x1); + nir_store_deref(b, args->vars.previous_node, nir_imm_int(b, RADV_BVH_INVALID_NODE), + 0x1); } nir_pop_if(b, NULL); @@ -619,7 +622,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, nir_ssa_def *prev_node = nir_load_deref(b, args->vars.previous_node); nir_store_deref(b, args->vars.previous_node, bvh_node, 0x1); - nir_store_deref(b, args->vars.current_node, nir_imm_int(b, -1), 0x1); + nir_store_deref(b, args->vars.current_node, nir_imm_int(b, RADV_BVH_INVALID_NODE), 0x1); nir_ssa_def *global_bvh_node = nir_iadd(b, nir_load_deref(b, args->vars.bvh_base), nir_u2u64(b, bvh_node)); @@ -705,14 +708,14 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, } /* box */ - nir_push_if(b, nir_ieq_imm(b, prev_node, -1)); + nir_push_if(b, nir_ieq_imm(b, prev_node, RADV_BVH_INVALID_NODE)); { nir_ssa_def *new_nodes[4]; for (unsigned i = 0; i < 4; ++i) new_nodes[i] = nir_channel(b, result, i); for (unsigned i = 1; i < 4; ++i) - nir_push_if(b, nir_ine_imm(b, new_nodes[i], -1)); + nir_push_if(b, nir_ine_imm(b, new_nodes[i], RADV_BVH_INVALID_NODE)); for (unsigned i = 4; i-- > 1;) { nir_ssa_def *stack = nir_load_deref(b, args->vars.stack); @@ -736,7 +739,7 @@ radv_build_ray_traversal(struct radv_device *device, nir_builder *b, } nir_push_else(b, NULL); { - nir_ssa_def *next = nir_imm_int(b, -1); + nir_ssa_def *next = nir_imm_int(b, RADV_BVH_INVALID_NODE); for (unsigned i = 0; i < 3; ++i) { next = nir_bcsel(b, nir_ieq(b, prev_node, nir_channel(b, result, i)), nir_channel(b, result, i + 1), next);