radeonsi: rename radeon_*push_*_sh_reg -> gfx11_*push_*_sh_reg

Those will only be used by gfx11.x.

Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26095>
This commit is contained in:
Marek Olšák
2023-10-24 00:49:57 -04:00
committed by Marge Bot
parent 4659d0dcc2
commit ac22440859
6 changed files with 81 additions and 81 deletions

View File

@@ -278,21 +278,21 @@
} while (0) } while (0)
/* GFX11 packet building helpers for buffered SH registers. */ /* GFX11 packet building helpers for buffered SH registers. */
#define radeon_push_gfx_sh_reg(reg, value) \ #define gfx11_push_gfx_sh_reg(reg, value) \
gfx11_push_sh_reg(reg, value, gfx) gfx11_push_sh_reg(reg, value, gfx)
#define radeon_push_compute_sh_reg(reg, value) \ #define gfx11_push_compute_sh_reg(reg, value) \
gfx11_push_sh_reg(reg, value, compute) gfx11_push_sh_reg(reg, value, compute)
#define radeon_opt_push_gfx_sh_reg(reg, reg_enum, value) \ #define gfx11_opt_push_gfx_sh_reg(reg, reg_enum, value) \
gfx11_opt_push_sh_reg(reg, reg_enum, value, gfx) gfx11_opt_push_sh_reg(reg, reg_enum, value, gfx)
#define radeon_opt_push_compute_sh_reg(reg, reg_enum, value) \ #define gfx11_opt_push_compute_sh_reg(reg, reg_enum, value) \
gfx11_opt_push_sh_reg(reg, reg_enum, value, compute) gfx11_opt_push_sh_reg(reg, reg_enum, value, compute)
#define radeon_set_or_push_gfx_sh_reg(reg, value) do { \ #define radeon_set_or_push_gfx_sh_reg(reg, value) do { \
if (GFX_VERSION >= GFX11 && HAS_SH_PAIRS_PACKED) { \ if (GFX_VERSION >= GFX11 && HAS_SH_PAIRS_PACKED) { \
radeon_push_gfx_sh_reg(reg, value); \ gfx11_push_gfx_sh_reg(reg, value); \
} else { \ } else { \
radeon_set_sh_reg_seq(reg, 1); \ radeon_set_sh_reg_seq(reg, 1); \
radeon_emit(value); \ radeon_emit(value); \

View File

@@ -500,23 +500,23 @@ static bool si_switch_compute_shader(struct si_context *sctx, struct si_compute
RADEON_USAGE_READ | RADEON_PRIO_SHADER_BINARY); RADEON_USAGE_READ | RADEON_PRIO_SHADER_BINARY);
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_push_compute_sh_reg(R_00B830_COMPUTE_PGM_LO, shader_va >> 8); gfx11_push_compute_sh_reg(R_00B830_COMPUTE_PGM_LO, shader_va >> 8);
radeon_opt_push_compute_sh_reg(R_00B848_COMPUTE_PGM_RSRC1, gfx11_opt_push_compute_sh_reg(R_00B848_COMPUTE_PGM_RSRC1,
SI_TRACKED_COMPUTE_PGM_RSRC1, config->rsrc1); SI_TRACKED_COMPUTE_PGM_RSRC1, config->rsrc1);
radeon_opt_push_compute_sh_reg(R_00B84C_COMPUTE_PGM_RSRC2, gfx11_opt_push_compute_sh_reg(R_00B84C_COMPUTE_PGM_RSRC2,
SI_TRACKED_COMPUTE_PGM_RSRC2, rsrc2); SI_TRACKED_COMPUTE_PGM_RSRC2, rsrc2);
radeon_opt_push_compute_sh_reg(R_00B8A0_COMPUTE_PGM_RSRC3, gfx11_opt_push_compute_sh_reg(R_00B8A0_COMPUTE_PGM_RSRC3,
SI_TRACKED_COMPUTE_PGM_RSRC3, SI_TRACKED_COMPUTE_PGM_RSRC3,
S_00B8A0_INST_PREF_SIZE(si_get_shader_prefetch_size(shader))); S_00B8A0_INST_PREF_SIZE(si_get_shader_prefetch_size(shader)));
radeon_opt_push_compute_sh_reg(R_00B860_COMPUTE_TMPRING_SIZE, gfx11_opt_push_compute_sh_reg(R_00B860_COMPUTE_TMPRING_SIZE,
SI_TRACKED_COMPUTE_TMPRING_SIZE, tmpring_size); SI_TRACKED_COMPUTE_TMPRING_SIZE, tmpring_size);
if (shader->scratch_bo) { if (shader->scratch_bo) {
radeon_opt_push_compute_sh_reg(R_00B840_COMPUTE_DISPATCH_SCRATCH_BASE_LO, gfx11_opt_push_compute_sh_reg(R_00B840_COMPUTE_DISPATCH_SCRATCH_BASE_LO,
SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_LO, SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_LO,
sctx->compute_scratch_buffer->gpu_address >> 8); sctx->compute_scratch_buffer->gpu_address >> 8);
radeon_opt_push_compute_sh_reg(R_00B844_COMPUTE_DISPATCH_SCRATCH_BASE_HI, gfx11_opt_push_compute_sh_reg(R_00B844_COMPUTE_DISPATCH_SCRATCH_BASE_HI,
SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_HI, SI_TRACKED_COMPUTE_DISPATCH_SCRATCH_BASE_HI,
sctx->compute_scratch_buffer->gpu_address >> 40); sctx->compute_scratch_buffer->gpu_address >> 40);
} }
} else { } else {
radeon_begin(cs); radeon_begin(cs);
@@ -741,9 +741,9 @@ static void si_setup_nir_user_data(struct si_context *sctx, const struct pipe_gr
radeon_begin_again(cs); radeon_begin_again(cs);
} else { } else {
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_push_compute_sh_reg(grid_size_reg, info->grid[0]); gfx11_push_compute_sh_reg(grid_size_reg, info->grid[0]);
radeon_push_compute_sh_reg(grid_size_reg + 4, info->grid[1]); gfx11_push_compute_sh_reg(grid_size_reg + 4, info->grid[1]);
radeon_push_compute_sh_reg(grid_size_reg + 8, info->grid[2]); gfx11_push_compute_sh_reg(grid_size_reg + 8, info->grid[2]);
} else { } else {
radeon_set_sh_reg_seq(grid_size_reg, 3); radeon_set_sh_reg_seq(grid_size_reg, 3);
radeon_emit(info->grid[0]); radeon_emit(info->grid[0]);
@@ -757,7 +757,7 @@ static void si_setup_nir_user_data(struct si_context *sctx, const struct pipe_gr
uint32_t value = info->block[0] | (info->block[1] << 10) | (info->block[2] << 20); uint32_t value = info->block[0] | (info->block[1] << 10) | (info->block[2] << 20);
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_push_compute_sh_reg(block_size_reg, value); gfx11_push_compute_sh_reg(block_size_reg, value);
} else { } else {
radeon_set_sh_reg(block_size_reg, value); radeon_set_sh_reg(block_size_reg, value);
} }
@@ -768,7 +768,7 @@ static void si_setup_nir_user_data(struct si_context *sctx, const struct pipe_gr
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
for (unsigned i = 0; i < num; i++) for (unsigned i = 0; i < num; i++)
radeon_push_compute_sh_reg(cs_user_data_reg + i * 4, sctx->cs_user_data[i]); gfx11_push_compute_sh_reg(cs_user_data_reg + i * 4, sctx->cs_user_data[i]);
} else { } else {
radeon_set_sh_reg_seq(cs_user_data_reg, num); radeon_set_sh_reg_seq(cs_user_data_reg, num);
radeon_emit_array(sctx->cs_user_data, num); radeon_emit_array(sctx->cs_user_data, num);
@@ -803,9 +803,9 @@ static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_
threadgroups_per_cu); threadgroups_per_cu);
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_opt_push_compute_sh_reg(R_00B854_COMPUTE_RESOURCE_LIMITS, gfx11_opt_push_compute_sh_reg(R_00B854_COMPUTE_RESOURCE_LIMITS,
SI_TRACKED_COMPUTE_RESOURCE_LIMITS, SI_TRACKED_COMPUTE_RESOURCE_LIMITS,
compute_resource_limits); compute_resource_limits);
} else { } else {
radeon_opt_set_sh_reg(sctx, R_00B854_COMPUTE_RESOURCE_LIMITS, radeon_opt_set_sh_reg(sctx, R_00B854_COMPUTE_RESOURCE_LIMITS,
SI_TRACKED_COMPUTE_RESOURCE_LIMITS, SI_TRACKED_COMPUTE_RESOURCE_LIMITS,
@@ -845,12 +845,12 @@ static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_
} }
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_opt_push_compute_sh_reg(R_00B81C_COMPUTE_NUM_THREAD_X, gfx11_opt_push_compute_sh_reg(R_00B81C_COMPUTE_NUM_THREAD_X,
SI_TRACKED_COMPUTE_NUM_THREAD_X, num_threads[0]); SI_TRACKED_COMPUTE_NUM_THREAD_X, num_threads[0]);
radeon_opt_push_compute_sh_reg(R_00B820_COMPUTE_NUM_THREAD_Y, gfx11_opt_push_compute_sh_reg(R_00B820_COMPUTE_NUM_THREAD_Y,
SI_TRACKED_COMPUTE_NUM_THREAD_Y, num_threads[1]); SI_TRACKED_COMPUTE_NUM_THREAD_Y, num_threads[1]);
radeon_opt_push_compute_sh_reg(R_00B824_COMPUTE_NUM_THREAD_Z, gfx11_opt_push_compute_sh_reg(R_00B824_COMPUTE_NUM_THREAD_Z,
SI_TRACKED_COMPUTE_NUM_THREAD_Z, num_threads[2]); SI_TRACKED_COMPUTE_NUM_THREAD_Z, num_threads[2]);
} else { } else {
radeon_opt_set_sh_reg3(sctx, R_00B81C_COMPUTE_NUM_THREAD_X, radeon_opt_set_sh_reg3(sctx, R_00B81C_COMPUTE_NUM_THREAD_X,
SI_TRACKED_COMPUTE_NUM_THREAD_X, SI_TRACKED_COMPUTE_NUM_THREAD_X,

View File

@@ -2192,7 +2192,7 @@ void si_shader_change_notify(struct si_context *sctx)
struct si_descriptors *descs = &sctx->descriptors[i]; \ struct si_descriptors *descs = &sctx->descriptors[i]; \
unsigned sh_reg = sh_reg_base + descs->shader_userdata_offset; \ unsigned sh_reg = sh_reg_base + descs->shader_userdata_offset; \
\ \
radeon_push_##type##_sh_reg(sh_reg, descs->gpu_address); \ gfx11_push_##type##_sh_reg(sh_reg, descs->gpu_address); \
} \ } \
} else { \ } else { \
while (mask) { \ while (mask) { \
@@ -2215,12 +2215,12 @@ static void si_emit_global_shader_pointers(struct si_context *sctx, struct si_de
radeon_begin(&sctx->gfx_cs); radeon_begin(&sctx->gfx_cs);
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_push_gfx_sh_reg(R_00B030_SPI_SHADER_USER_DATA_PS_0 + descs->shader_userdata_offset, gfx11_push_gfx_sh_reg(R_00B030_SPI_SHADER_USER_DATA_PS_0 + descs->shader_userdata_offset,
descs->gpu_address); descs->gpu_address);
radeon_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + descs->shader_userdata_offset, gfx11_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + descs->shader_userdata_offset,
descs->gpu_address); descs->gpu_address);
radeon_push_gfx_sh_reg(R_00B430_SPI_SHADER_USER_DATA_HS_0 + descs->shader_userdata_offset, gfx11_push_gfx_sh_reg(R_00B430_SPI_SHADER_USER_DATA_HS_0 + descs->shader_userdata_offset,
descs->gpu_address); descs->gpu_address);
} else if (sctx->gfx_level >= GFX11) { } else if (sctx->gfx_level >= GFX11) {
radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0); radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0);
radeon_emit_one_32bit_pointer(sctx, descs, R_00B230_SPI_SHADER_USER_DATA_GS_0); radeon_emit_one_32bit_pointer(sctx, descs, R_00B230_SPI_SHADER_USER_DATA_GS_0);
@@ -2293,9 +2293,9 @@ void si_emit_graphics_shader_pointers(struct si_context *sctx, unsigned index)
if (sctx->gs_attribute_ring_pointer_dirty) { if (sctx->gs_attribute_ring_pointer_dirty) {
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + gfx11_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
GFX9_SGPR_ATTRIBUTE_RING_ADDR * 4, GFX9_SGPR_ATTRIBUTE_RING_ADDR * 4,
sctx->screen->attribute_ring->gpu_address); sctx->screen->attribute_ring->gpu_address);
} else { } else {
radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
GFX9_SGPR_ATTRIBUTE_RING_ADDR * 4, GFX9_SGPR_ATTRIBUTE_RING_ADDR * 4,
@@ -2341,8 +2341,8 @@ void si_emit_compute_shader_pointers(struct si_context *sctx)
if (sctx->compute_bindless_pointer_dirty) { if (sctx->compute_bindless_pointer_dirty) {
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_push_compute_sh_reg(base + sctx->bindless_descriptors.shader_userdata_offset, gfx11_push_compute_sh_reg(base + sctx->bindless_descriptors.shader_userdata_offset,
sctx->bindless_descriptors.gpu_address); sctx->bindless_descriptors.gpu_address);
} else { } else {
radeon_emit_one_32bit_pointer(sctx, &sctx->bindless_descriptors, base); radeon_emit_one_32bit_pointer(sctx, &sctx->bindless_descriptors, base);
} }

View File

@@ -1423,15 +1423,15 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
if (!is_blit) { if (!is_blit) {
/* Prefer SET_SH_REG_PAIRS_PACKED* on Gfx11+. */ /* Prefer SET_SH_REG_PAIRS_PACKED* on Gfx11+. */
if (HAS_SH_PAIRS_PACKED) { if (HAS_SH_PAIRS_PACKED) {
radeon_opt_push_gfx_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, gfx11_opt_push_gfx_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4,
tracked_base_vertex_reg, base_vertex); tracked_base_vertex_reg, base_vertex);
if (set_draw_id) { if (set_draw_id) {
radeon_opt_push_gfx_sh_reg(sh_base_reg + SI_SGPR_DRAWID * 4, gfx11_opt_push_gfx_sh_reg(sh_base_reg + SI_SGPR_DRAWID * 4,
tracked_base_vertex_reg + 1, drawid_base); tracked_base_vertex_reg + 1, drawid_base);
} }
if (set_base_instance) { if (set_base_instance) {
radeon_opt_push_gfx_sh_reg(sh_base_reg + SI_SGPR_START_INSTANCE * 4, gfx11_opt_push_gfx_sh_reg(sh_base_reg + SI_SGPR_START_INSTANCE * 4,
tracked_base_vertex_reg + 2, info->start_instance); tracked_base_vertex_reg + 2, info->start_instance);
} }
} else { } else {
if (set_base_instance) { if (set_base_instance) {

View File

@@ -1209,12 +1209,12 @@ static void gfx10_emit_shader_ngg(struct si_context *sctx, unsigned index)
shader->ngg.ge_pc_alloc); shader->ngg.ge_pc_alloc);
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
assert(!sctx->screen->info.uses_kernel_cu_mask); assert(!sctx->screen->info.uses_kernel_cu_mask);
radeon_opt_push_gfx_sh_reg(R_00B21C_SPI_SHADER_PGM_RSRC3_GS, gfx11_opt_push_gfx_sh_reg(R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS, SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS,
shader->gs.spi_shader_pgm_rsrc3_gs); shader->gs.spi_shader_pgm_rsrc3_gs);
radeon_opt_push_gfx_sh_reg(R_00B204_SPI_SHADER_PGM_RSRC4_GS, gfx11_opt_push_gfx_sh_reg(R_00B204_SPI_SHADER_PGM_RSRC4_GS,
SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS, SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS,
shader->gs.spi_shader_pgm_rsrc4_gs); shader->gs.spi_shader_pgm_rsrc4_gs);
} else { } else {
if (sctx->screen->info.uses_kernel_cu_mask) { if (sctx->screen->info.uses_kernel_cu_mask) {
radeon_opt_set_sh_reg_idx(sctx, R_00B21C_SPI_SHADER_PGM_RSRC3_GS, radeon_opt_set_sh_reg_idx(sctx, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
@@ -4539,24 +4539,24 @@ void si_update_tess_io_layout_state(struct si_context *sctx)
static void si_emit_tess_io_layout_state(struct si_context *sctx, unsigned index) static void si_emit_tess_io_layout_state(struct si_context *sctx, unsigned index)
{ {
struct radeon_cmdbuf *cs = &sctx->gfx_cs; struct radeon_cmdbuf *cs = &sctx->gfx_cs;
radeon_begin(cs);
if (!sctx->shader.tes.cso || !sctx->shader.tcs.current) if (!sctx->shader.tes.cso || !sctx->shader.tcs.current)
return; return;
radeon_begin(cs);
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_opt_push_gfx_sh_reg(R_00B42C_SPI_SHADER_PGM_RSRC2_HS, gfx11_opt_push_gfx_sh_reg(R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
SI_TRACKED_SPI_SHADER_PGM_RSRC2_HS, sctx->ls_hs_rsrc2); SI_TRACKED_SPI_SHADER_PGM_RSRC2_HS, sctx->ls_hs_rsrc2);
/* Set userdata SGPRs for merged LS-HS. */ /* Set userdata SGPRs for merged LS-HS. */
radeon_opt_push_gfx_sh_reg(R_00B430_SPI_SHADER_USER_DATA_HS_0 + gfx11_opt_push_gfx_sh_reg(R_00B430_SPI_SHADER_USER_DATA_HS_0 +
GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4,
SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_LAYOUT, SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_LAYOUT,
sctx->tcs_offchip_layout); sctx->tcs_offchip_layout);
radeon_opt_push_gfx_sh_reg(R_00B430_SPI_SHADER_USER_DATA_HS_0 + gfx11_opt_push_gfx_sh_reg(R_00B430_SPI_SHADER_USER_DATA_HS_0 +
GFX9_SGPR_TCS_OFFCHIP_ADDR * 4, GFX9_SGPR_TCS_OFFCHIP_ADDR * 4,
SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_ADDR, SI_TRACKED_SPI_SHADER_USER_DATA_HS__TCS_OFFCHIP_ADDR,
sctx->tes_offchip_ring_va_sgpr); sctx->tes_offchip_ring_va_sgpr);
} else if (sctx->gfx_level >= GFX9) { } else if (sctx->gfx_level >= GFX9) {
radeon_opt_set_sh_reg(sctx, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, radeon_opt_set_sh_reg(sctx, R_00B42C_SPI_SHADER_PGM_RSRC2_HS,
SI_TRACKED_SPI_SHADER_PGM_RSRC2_HS, sctx->ls_hs_rsrc2); SI_TRACKED_SPI_SHADER_PGM_RSRC2_HS, sctx->ls_hs_rsrc2);
@@ -4590,16 +4590,16 @@ static void si_emit_tess_io_layout_state(struct si_context *sctx, unsigned index
assert(tes_sh_base); assert(tes_sh_base);
/* TES (as ES or VS) reuses the BaseVertex and DrawID user SGPRs that are used when /* TES (as ES or VS) reuses the BaseVertex and DrawID user SGPRs that are used when
* tessellation is disabled. That's because those user SGPRs are only set in LS * tessellation is disabled. We can do that because those user SGPRs are only set in LS
* for tessellation. * for tessellation and are unused in TES.
*/ */
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_opt_push_gfx_sh_reg(tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, gfx11_opt_push_gfx_sh_reg(tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4,
SI_TRACKED_SPI_SHADER_USER_DATA_ES__BASE_VERTEX, SI_TRACKED_SPI_SHADER_USER_DATA_ES__BASE_VERTEX,
sctx->tcs_offchip_layout); sctx->tcs_offchip_layout);
radeon_opt_push_gfx_sh_reg(tes_sh_base + SI_SGPR_TES_OFFCHIP_ADDR * 4, gfx11_opt_push_gfx_sh_reg(tes_sh_base + SI_SGPR_TES_OFFCHIP_ADDR * 4,
SI_TRACKED_SPI_SHADER_USER_DATA_ES__DRAWID, SI_TRACKED_SPI_SHADER_USER_DATA_ES__DRAWID,
sctx->tes_offchip_ring_va_sgpr); sctx->tes_offchip_ring_va_sgpr);
} else { } else {
bool has_gs = sctx->ngg || sctx->shader.gs.cso; bool has_gs = sctx->ngg || sctx->shader.gs.cso;

View File

@@ -92,9 +92,9 @@ static void si_emit_cull_state(struct si_context *sctx, unsigned index)
RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER); RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER);
if (sctx->screen->info.has_set_sh_pairs_packed) { if (sctx->screen->info.has_set_sh_pairs_packed) {
radeon_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + gfx11_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4, GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
sctx->small_prim_cull_info_address); sctx->small_prim_cull_info_address);
} else { } else {
radeon_begin(&sctx->gfx_cs); radeon_begin(&sctx->gfx_cs);
radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4, radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,