amd: drop support for LLVM 8

It doesn't support Navi1x and the removal enables this nice code cleanup.

v2: rebase - mareko

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> (v1)
Acked-by: Marek Olšák <marek.olsak@amd.com>
Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10199>
This commit is contained in:
Samuel Pitoiset
2020-01-30 11:02:00 +01:00
committed by Marge Bot
parent ac2ab5c895
commit 936b58378c
12 changed files with 57 additions and 186 deletions

View File

@@ -1538,7 +1538,9 @@ endif
if with_microsoft_clc
_llvm_version = '>= 10.0.0'
elif with_amd_vk or with_gallium_radeonsi or with_gallium_opencl
elif with_amd_vk or with_gallium_radeonsi
_llvm_version = '>= 9.0.0'
elif with_gallium_opencl
_llvm_version = '>= 8.0.0'
elif with_gallium_swr
_llvm_version = '>= 6.0.0'

View File

@@ -205,10 +205,8 @@ void finish_assembler_test()
/* we could use CLRX for disassembly but that would require it to be
* installed */
if (program->chip_class == GFX10_3 && LLVM_VERSION_MAJOR < 9) {
if (program->chip_class == GFX10_3 && LLVM_VERSION_MAJOR < 11) {
skip_test("LLVM 11 needed for GFX10_3 disassembly");
} else if (program->chip_class == GFX10 && LLVM_VERSION_MAJOR < 9) {
skip_test("LLVM 9 needed for GFX10 disassembly");
} else if (program->chip_class >= GFX8) {
print_asm(program.get(), binary, exec_size / 4u, output);
} else {

View File

@@ -448,7 +448,7 @@ void ac_build_optimization_barrier(struct ac_llvm_context *ctx, LLVMValueRef *pv
LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, nir_scope scope)
{
const char *subgroup = LLVM_VERSION_MAJOR >= 9 ? "llvm.readcyclecounter" : "llvm.amdgcn.s.memtime";
const char *subgroup = "llvm.readcyclecounter";
const char *name = scope == NIR_SCOPE_DEVICE ? "llvm.amdgcn.s.memrealtime" : subgroup;
LLVMValueRef tmp = ac_build_intrinsic(ctx, name, ctx->i64, NULL, 0, 0);
@@ -462,14 +462,11 @@ LLVMValueRef ac_build_ballot(struct ac_llvm_context *ctx, LLVMValueRef value)
if (LLVMTypeOf(value) == ctx->i1)
value = LLVMBuildZExt(ctx->builder, value, ctx->i32, "");
if (LLVM_VERSION_MAJOR >= 9) {
if (ctx->wave_size == 64)
name = "llvm.amdgcn.icmp.i64.i32";
else
name = "llvm.amdgcn.icmp.i32.i32";
} else {
name = "llvm.amdgcn.icmp.i32";
}
if (ctx->wave_size == 64)
name = "llvm.amdgcn.icmp.i64.i32";
else
name = "llvm.amdgcn.icmp.i32.i32";
LLVMValueRef args[3] = {value, ctx->i32_0, LLVMConstInt(ctx->i32, LLVMIntNE, 0)};
/* We currently have no other way to prevent LLVM from lifting the icmp
@@ -488,14 +485,11 @@ LLVMValueRef ac_get_i1_sgpr_mask(struct ac_llvm_context *ctx, LLVMValueRef value
{
const char *name;
if (LLVM_VERSION_MAJOR >= 9) {
if (ctx->wave_size == 64)
name = "llvm.amdgcn.icmp.i64.i1";
else
name = "llvm.amdgcn.icmp.i32.i1";
} else {
name = "llvm.amdgcn.icmp.i1";
}
if (ctx->wave_size == 64)
name = "llvm.amdgcn.icmp.i64.i1";
else
name = "llvm.amdgcn.icmp.i32.i1";
LLVMValueRef args[3] = {
value,
ctx->i1false,
@@ -1143,8 +1137,7 @@ void ac_build_buffer_store_dword(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
unsigned num_channels, LLVMValueRef voffset, LLVMValueRef soffset,
unsigned inst_offset, unsigned cache_policy)
{
/* Split 3 channel stores, because only LLVM 9+ support 3-channel
* intrinsics. */
/* Split 3 channel stores. */
if (num_channels == 3 && !ac_has_vec3_support(ctx->chip_class, false)) {
LLVMValueRef v[3], v01;
@@ -1348,63 +1341,24 @@ LLVMValueRef ac_build_struct_tbuffer_load(struct ac_llvm_context *ctx, LLVMValue
nfmt, cache_policy, can_speculate, true);
}
LLVMValueRef ac_build_raw_tbuffer_load(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef voffset, LLVMValueRef soffset,
LLVMValueRef immoffset, unsigned num_channels, unsigned dfmt,
unsigned nfmt, unsigned cache_policy, bool can_speculate)
{
return ac_build_tbuffer_load(ctx, rsrc, NULL, voffset, soffset, immoffset, num_channels, dfmt,
nfmt, cache_policy, can_speculate, false);
}
LLVMValueRef ac_build_tbuffer_load_short(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef voffset, LLVMValueRef soffset,
LLVMValueRef immoffset, unsigned cache_policy)
{
LLVMValueRef res;
voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
if (LLVM_VERSION_MAJOR >= 9) {
voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
/* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
res = ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i16,
cache_policy, false, false, false);
} else {
unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_16;
unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
res = ac_build_raw_tbuffer_load(ctx, rsrc, voffset, soffset, immoffset, 1, dfmt, nfmt,
cache_policy, false);
res = LLVMBuildTrunc(ctx->builder, res, ctx->i16, "");
}
return res;
return ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i16,
cache_policy, false, false, false);
}
LLVMValueRef ac_build_tbuffer_load_byte(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef voffset, LLVMValueRef soffset,
LLVMValueRef immoffset, unsigned cache_policy)
{
LLVMValueRef res;
voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
if (LLVM_VERSION_MAJOR >= 9) {
voffset = LLVMBuildAdd(ctx->builder, voffset, immoffset, "");
/* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
res = ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i8, cache_policy,
false, false, false);
} else {
unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_8;
unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
res = ac_build_raw_tbuffer_load(ctx, rsrc, voffset, soffset, immoffset, 1, dfmt, nfmt,
cache_policy, false);
res = LLVMBuildTrunc(ctx->builder, res, ctx->i8, "");
}
return res;
return ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i8, cache_policy,
false, false, false);
}
/**
@@ -1521,8 +1475,6 @@ LLVMValueRef ac_build_opencoded_load_format(struct ac_llvm_context *ctx, unsigne
load_log_size += -log_recombine;
}
assert(load_log_size >= 2 || LLVM_VERSION_MAJOR >= 9);
LLVMValueRef loads[32]; /* up to 32 bytes */
for (unsigned i = 0; i < load_num_channels; ++i) {
tmp =
@@ -1756,19 +1708,8 @@ void ac_build_tbuffer_store_short(struct ac_llvm_context *ctx, LLVMValueRef rsrc
{
vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i16, "");
if (LLVM_VERSION_MAJOR >= 9) {
/* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false,
false);
} else {
unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_16;
unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
vdata = LLVMBuildZExt(ctx->builder, vdata, ctx->i32, "");
ac_build_raw_tbuffer_store(ctx, rsrc, vdata, voffset, soffset, ctx->i32_0, 1, dfmt, nfmt,
cache_policy);
}
ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false,
false);
}
void ac_build_tbuffer_store_byte(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef vdata,
@@ -1776,20 +1717,10 @@ void ac_build_tbuffer_store_byte(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
{
vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i8, "");
if (LLVM_VERSION_MAJOR >= 9) {
/* LLVM 9+ supports i8/i16 with struct/raw intrinsics. */
ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false,
false);
} else {
unsigned dfmt = V_008F0C_BUF_DATA_FORMAT_8;
unsigned nfmt = V_008F0C_BUF_NUM_FORMAT_UINT;
vdata = LLVMBuildZExt(ctx->builder, vdata, ctx->i32, "");
ac_build_raw_tbuffer_store(ctx, rsrc, vdata, voffset, soffset, ctx->i32_0, 1, dfmt, nfmt,
cache_policy);
}
ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false,
false);
}
/**
* Set range metadata on an instruction. This can only be used on load and
* call instructions. If you know an instruction can only produce the values

View File

@@ -143,12 +143,8 @@ void ac_enable_global_isel(LLVMTargetMachineRef tm);
static inline bool ac_has_vec3_support(enum chip_class chip, bool use_format)
{
if (chip == GFX6 && !use_format) {
/* GFX6 only supports vec3 with load/store format. */
return false;
}
return LLVM_VERSION_MAJOR >= 9;
/* GFX6 only supports vec3 with load/store format. */
return chip != GFX6 || use_format;
}
#ifdef __cplusplus

View File

@@ -1925,25 +1925,12 @@ static LLVMValueRef visit_atomic_ssbo(struct ac_nir_context *ctx, nir_intrinsic_
}
params[arg_count++] = ac_llvm_extract_elem(&ctx->ac, get_src(ctx, instr->src[2]), 0);
params[arg_count++] = descriptor;
params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
params[arg_count++] = ctx->ac.i32_0; /* soffset */
params[arg_count++] = ctx->ac.i32_0; /* slc */
if (LLVM_VERSION_MAJOR >= 9) {
/* XXX: The new raw/struct atomic intrinsics are buggy with
* LLVM 8, see r358579.
*/
params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
params[arg_count++] = ctx->ac.i32_0; /* soffset */
params[arg_count++] = ctx->ac.i32_0; /* slc */
ac_build_type_name_for_intr(return_type, type, sizeof(type));
snprintf(name, sizeof(name), "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
} else {
params[arg_count++] = ctx->ac.i32_0; /* vindex */
params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
params[arg_count++] = ctx->ac.i1false; /* slc */
assert(return_type == ctx->ac.i32);
snprintf(name, sizeof(name), "llvm.amdgcn.buffer.atomic.%s", op);
}
ac_build_type_name_for_intr(return_type, type, sizeof(type));
snprintf(name, sizeof(name), "llvm.amdgcn.raw.buffer.atomic.%s.%s", op, type);
result = ac_build_intrinsic(&ctx->ac, name, return_type, params, arg_count, 0);
}
@@ -2086,7 +2073,7 @@ static LLVMValueRef visit_global_atomic(struct ac_nir_context *ctx,
LLVMValueRef result;
/* use "singlethread" sync scope to implement relaxed ordering */
const char *sync_scope = LLVM_VERSION_MAJOR >= 9 ? "singlethread-one-as" : "singlethread";
const char *sync_scope = "singlethread-one-as";
LLVMTypeRef ptr_type = LLVMPointerType(LLVMTypeOf(data), AC_ADDR_SPACE_GLOBAL);
@@ -2403,28 +2390,6 @@ static void get_image_coords(struct ac_nir_context *ctx, const nir_intrinsic_ins
}
}
static LLVMValueRef get_image_buffer_descriptor(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr,
LLVMValueRef dynamic_index, bool write, bool atomic)
{
LLVMValueRef rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, write);
if (ctx->ac.chip_class == GFX9 && LLVM_VERSION_MAJOR < 9 && atomic) {
LLVMValueRef elem_count =
LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 2, 0), "");
LLVMValueRef stride =
LLVMBuildExtractElement(ctx->ac.builder, rsrc, LLVMConstInt(ctx->ac.i32, 1, 0), "");
stride = LLVMBuildLShr(ctx->ac.builder, stride, LLVMConstInt(ctx->ac.i32, 16, 0), "");
LLVMValueRef new_elem_count = LLVMBuildSelect(
ctx->ac.builder, LLVMBuildICmp(ctx->ac.builder, LLVMIntUGT, elem_count, stride, ""),
elem_count, stride, "");
rsrc = LLVMBuildInsertElement(ctx->ac.builder, rsrc, new_elem_count,
LLVMConstInt(ctx->ac.i32, 2, 0), "");
}
return rsrc;
}
static LLVMValueRef enter_waterfall_image(struct ac_nir_context *ctx,
struct waterfall_context *wctx,
const nir_intrinsic_instr *instr)
@@ -2472,7 +2437,7 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri
num_channels = num_channels < 4 ? 2 : 4;
LLVMValueRef rsrc, vindex;
rsrc = get_image_buffer_descriptor(ctx, instr, dynamic_index, false, false);
rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, false);
vindex =
LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]), ctx->ac.i32_0, "");
@@ -2566,7 +2531,7 @@ static void visit_image_store(struct ac_nir_context *ctx, const nir_intrinsic_in
}
if (dim == GLSL_SAMPLER_DIM_BUF) {
LLVMValueRef rsrc = get_image_buffer_descriptor(ctx, instr, dynamic_index, true, false);
LLVMValueRef rsrc = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, true);
unsigned src_channels = ac_get_llvm_num_components(src);
LLVMValueRef vindex;
@@ -2702,30 +2667,22 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_int
LLVMValueRef result;
if (dim == GLSL_SAMPLER_DIM_BUF) {
params[param_count++] = get_image_buffer_descriptor(ctx, instr, dynamic_index, true, true);
params[param_count++] = get_image_descriptor(ctx, instr, dynamic_index, AC_DESC_BUFFER, true);
params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, ""); /* vindex */
params[param_count++] = ctx->ac.i32_0; /* voffset */
if (cmpswap && instr->dest.ssa.bit_size == 64) {
result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true);
} else {
if (LLVM_VERSION_MAJOR >= 9) {
/* XXX: The new raw/struct atomic intrinsics are buggy
* with LLVM 8, see r358579.
*/
params[param_count++] = ctx->ac.i32_0; /* soffset */
params[param_count++] = ctx->ac.i32_0; /* slc */
/* XXX: The new raw/struct atomic intrinsics are buggy
* with LLVM 8, see r358579.
*/
params[param_count++] = ctx->ac.i32_0; /* soffset */
params[param_count++] = ctx->ac.i32_0; /* slc */
length = snprintf(intrinsic_name, sizeof(intrinsic_name),
"llvm.amdgcn.struct.buffer.atomic.%s.%s", atomic_name,
instr->dest.ssa.bit_size == 64 ? "i64" : "i32");
} else {
assert(instr->dest.ssa.bit_size == 64);
params[param_count++] = ctx->ac.i1false; /* slc */
length = snprintf(intrinsic_name, sizeof(intrinsic_name), "llvm.amdgcn.buffer.atomic.%s",
atomic_name);
}
length = snprintf(intrinsic_name, sizeof(intrinsic_name),
"llvm.amdgcn.struct.buffer.atomic.%s.%s", atomic_name,
instr->dest.ssa.bit_size == 64 ? "i64" : "i32");
assert(length < sizeof(intrinsic_name));
result = ac_build_intrinsic(&ctx->ac, intrinsic_name, LLVMTypeOf(params[0]), params, param_count, 0);
@@ -3020,7 +2977,7 @@ static LLVMValueRef visit_var_atomic(struct ac_nir_context *ctx, const nir_intri
LLVMValueRef result;
LLVMValueRef src = get_src(ctx, instr->src[src_idx]);
const char *sync_scope = LLVM_VERSION_MAJOR >= 9 ? "workgroup-one-as" : "workgroup";
const char *sync_scope = "workgroup-one-as";
if (instr->intrinsic == nir_intrinsic_shared_atomic_comp_swap) {
LLVMValueRef src1 = get_src(ctx, instr->src[src_idx + 1]);

View File

@@ -403,7 +403,7 @@ radv_physical_device_get_supported_extensions(const struct radv_physical_device
.KHR_sampler_mirror_clamp_to_edge = true,
.KHR_sampler_ycbcr_conversion = true,
.KHR_separate_depth_stencil_layouts = true,
.KHR_shader_atomic_int64 = LLVM_VERSION_MAJOR >= 9 || !device->use_llvm,
.KHR_shader_atomic_int64 = true,
.KHR_shader_clock = true,
.KHR_shader_draw_parameters = true,
.KHR_shader_float16_int8 = true,
@@ -460,7 +460,7 @@ radv_physical_device_get_supported_extensions(const struct radv_physical_device
.EXT_sampler_filter_minmax = true,
.EXT_scalar_block_layout = device->rad_info.chip_class >= GFX7,
.EXT_shader_atomic_float = true,
.EXT_shader_demote_to_helper_invocation = LLVM_VERSION_MAJOR >= 9 || !device->use_llvm,
.EXT_shader_demote_to_helper_invocation = true,
.EXT_shader_image_atomic_int64 = LLVM_VERSION_MAJOR >= 11 || !device->use_llvm,
.EXT_shader_stencil_export = true,
.EXT_shader_subgroup_ballot = true,
@@ -1165,8 +1165,7 @@ radv_get_physical_device_features_1_1(struct radv_physical_device *pdevice,
f->storageBuffer16BitAccess = true;
f->uniformAndStorageBuffer16BitAccess = true;
f->storagePushConstant16 = true;
f->storageInputOutput16 =
pdevice->rad_info.has_packed_math_16bit && (LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm);
f->storageInputOutput16 = pdevice->rad_info.has_packed_math_16bit;
f->multiview = true;
f->multiviewGeometryShader = true;
f->multiviewTessellationShader = true;
@@ -1188,8 +1187,8 @@ radv_get_physical_device_features_1_2(struct radv_physical_device *pdevice,
f->storageBuffer8BitAccess = true;
f->uniformAndStorageBuffer8BitAccess = true;
f->storagePushConstant8 = true;
f->shaderBufferInt64Atomics = LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm;
f->shaderSharedInt64Atomics = LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm;
f->shaderBufferInt64Atomics = true;
f->shaderSharedInt64Atomics = true;
f->shaderFloat16 = pdevice->rad_info.has_packed_math_16bit;
f->shaderInt8 = true;
@@ -1405,7 +1404,7 @@ radv_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: {
VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *features =
(VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT *)ext;
features->shaderDemoteToHelperInvocation = LLVM_VERSION_MAJOR >= 9 || !pdevice->use_llvm;
features->shaderDemoteToHelperInvocation = true;
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: {

View File

@@ -2345,7 +2345,7 @@ gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx)
LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
const char *sync_scope = LLVM_VERSION_MAJOR >= 9 ? "workgroup-one-as" : "workgroup";
const char *sync_scope = "workgroup-one-as";
/* Use a plain GDS atomic to accumulate the number of generated
* primitives.

View File

@@ -55,12 +55,6 @@ do_winsys_init(struct radv_amdgpu_winsys *ws, int fd)
return false;
}
/* LLVM 9.0 is required for GFX10. */
if (ws->info.chip_class == GFX10 && ws->use_llvm && LLVM_VERSION_MAJOR < 9) {
fprintf(stderr, "radv: Navi family support requires LLVM 9 or higher\n");
return false;
}
ws->addrlib = ac_addrlib_create(&ws->info, &ws->info.max_alignment);
if (!ws->addrlib) {
fprintf(stderr, "amdgpu: Cannot create addrlib.\n");

View File

@@ -228,7 +228,7 @@ static int si_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
return LLVM_VERSION_MAJOR < 9 && !sscreen->info.has_unaligned_shader_loads;
return 0;
case PIPE_CAP_SPARSE_BUFFER_PAGE_SIZE:
/* Gfx8 (Polaris11) hangs, so don't enable this on Gfx8 and older chips. */

View File

@@ -1023,12 +1023,6 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
return NULL;
}
if (sscreen->info.chip_class == GFX10 && LLVM_VERSION_MAJOR < 9) {
fprintf(stderr, "radeonsi: Navi family support requires LLVM 9 or higher\n");
FREE(sscreen);
return NULL;
}
if (sscreen->info.chip_class >= GFX9) {
sscreen->se_tile_repeat = 32 * sscreen->info.max_se;
} else {

View File

@@ -220,7 +220,7 @@ void si_llvm_create_main_func(struct si_shader_context *ctx, bool ngg_cull_shade
if (shader->key.as_ls || ctx->stage == MESA_SHADER_TESS_CTRL) {
if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
if (USE_LDS_SYMBOLS) {
/* The LSHS size is not known until draw time, so we append it
* at the end of whatever LDS use there may be in the rest of
* the shader (currently none, unless LLVM decides to do its

View File

@@ -335,7 +335,7 @@ void si_preload_esgs_ring(struct si_shader_context *ctx)
ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, buf_ptr, offset);
} else {
if (USE_LDS_SYMBOLS && LLVM_VERSION_MAJOR >= 9) {
if (USE_LDS_SYMBOLS) {
/* Declare the ESGS ring as an explicit LDS symbol. */
si_llvm_declare_esgs_ring(ctx);
} else {