amd: rename RING_* enums to AMD_IP_*

Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16360>
This commit is contained in:
Marek Olšák
2022-05-05 13:49:29 -04:00
committed by Marge Bot
parent ae7e4d7619
commit 7203723120
29 changed files with 160 additions and 160 deletions

View File

@@ -495,15 +495,15 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
amdgpu_device_handle dev = dev_p;
drmDevicePtr devinfo;
STATIC_ASSERT(AMDGPU_HW_IP_GFX == RING_GFX);
STATIC_ASSERT(AMDGPU_HW_IP_COMPUTE == RING_COMPUTE);
STATIC_ASSERT(AMDGPU_HW_IP_DMA == RING_DMA);
STATIC_ASSERT(AMDGPU_HW_IP_UVD == RING_UVD);
STATIC_ASSERT(AMDGPU_HW_IP_VCE == RING_VCE);
STATIC_ASSERT(AMDGPU_HW_IP_UVD_ENC == RING_UVD_ENC);
STATIC_ASSERT(AMDGPU_HW_IP_VCN_DEC == RING_VCN_DEC);
STATIC_ASSERT(AMDGPU_HW_IP_VCN_ENC == RING_VCN_ENC);
STATIC_ASSERT(AMDGPU_HW_IP_VCN_JPEG == RING_VCN_JPEG);
STATIC_ASSERT(AMDGPU_HW_IP_GFX == AMD_IP_GFX);
STATIC_ASSERT(AMDGPU_HW_IP_COMPUTE == AMD_IP_COMPUTE);
STATIC_ASSERT(AMDGPU_HW_IP_DMA == AMD_IP_SDMA);
STATIC_ASSERT(AMDGPU_HW_IP_UVD == AMD_IP_UVD);
STATIC_ASSERT(AMDGPU_HW_IP_VCE == AMD_IP_VCE);
STATIC_ASSERT(AMDGPU_HW_IP_UVD_ENC == AMD_IP_UVD_ENC);
STATIC_ASSERT(AMDGPU_HW_IP_VCN_DEC == AMD_IP_VCN_DEC);
STATIC_ASSERT(AMDGPU_HW_IP_VCN_ENC == AMD_IP_VCN_ENC);
STATIC_ASSERT(AMDGPU_HW_IP_VCN_JPEG == AMD_IP_VCN_JPEG);
/* Get PCI info. */
r = drmGetDevice2(fd, 0, &devinfo);
@@ -983,26 +983,26 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
assert(util_is_power_of_two_or_zero(dma.available_rings + 1));
assert(util_is_power_of_two_or_zero(compute.available_rings + 1));
info->num_rings[RING_GFX] = util_bitcount(gfx.available_rings);
info->num_rings[RING_COMPUTE] = util_bitcount(compute.available_rings);
info->num_rings[RING_DMA] = util_bitcount(dma.available_rings);
info->num_rings[RING_UVD] = util_bitcount(uvd.available_rings);
info->num_rings[RING_VCE] = util_bitcount(vce.available_rings);
info->num_rings[RING_UVD_ENC] = util_bitcount(uvd_enc.available_rings);
info->num_rings[RING_VCN_DEC] = util_bitcount(vcn_dec.available_rings);
info->num_rings[RING_VCN_ENC] = util_bitcount(vcn_enc.available_rings);
info->num_rings[RING_VCN_JPEG] = util_bitcount(vcn_jpeg.available_rings);
info->num_rings[AMD_IP_GFX] = util_bitcount(gfx.available_rings);
info->num_rings[AMD_IP_COMPUTE] = util_bitcount(compute.available_rings);
info->num_rings[AMD_IP_SDMA] = util_bitcount(dma.available_rings);
info->num_rings[AMD_IP_UVD] = util_bitcount(uvd.available_rings);
info->num_rings[AMD_IP_VCE] = util_bitcount(vce.available_rings);
info->num_rings[AMD_IP_UVD_ENC] = util_bitcount(uvd_enc.available_rings);
info->num_rings[AMD_IP_VCN_DEC] = util_bitcount(vcn_dec.available_rings);
info->num_rings[AMD_IP_VCN_ENC] = util_bitcount(vcn_enc.available_rings);
info->num_rings[AMD_IP_VCN_JPEG] = util_bitcount(vcn_jpeg.available_rings);
/* This is "align_mask" copied from the kernel, maximums of all IP versions. */
info->ib_pad_dw_mask[RING_GFX] = 0xff;
info->ib_pad_dw_mask[RING_COMPUTE] = 0xff;
info->ib_pad_dw_mask[RING_DMA] = 0xf;
info->ib_pad_dw_mask[RING_UVD] = 0xf;
info->ib_pad_dw_mask[RING_VCE] = 0x3f;
info->ib_pad_dw_mask[RING_UVD_ENC] = 0x3f;
info->ib_pad_dw_mask[RING_VCN_DEC] = 0xf;
info->ib_pad_dw_mask[RING_VCN_ENC] = 0x3f;
info->ib_pad_dw_mask[RING_VCN_JPEG] = 0xf;
info->ib_pad_dw_mask[AMD_IP_GFX] = 0xff;
info->ib_pad_dw_mask[AMD_IP_COMPUTE] = 0xff;
info->ib_pad_dw_mask[AMD_IP_SDMA] = 0xf;
info->ib_pad_dw_mask[AMD_IP_UVD] = 0xf;
info->ib_pad_dw_mask[AMD_IP_VCE] = 0x3f;
info->ib_pad_dw_mask[AMD_IP_UVD_ENC] = 0x3f;
info->ib_pad_dw_mask[AMD_IP_VCN_DEC] = 0xf;
info->ib_pad_dw_mask[AMD_IP_VCN_ENC] = 0x3f;
info->ib_pad_dw_mask[AMD_IP_VCN_JPEG] = 0xf;
/* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
* on GFX6. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
@@ -1380,15 +1380,15 @@ void ac_print_gpu_info(struct radeon_info *info, FILE *f)
fprintf(f, "Features:\n");
fprintf(f, " has_graphics = %i\n", info->has_graphics);
fprintf(f, " num_rings[RING_GFX] = %i\n", info->num_rings[RING_GFX]);
fprintf(f, " num_rings[RING_DMA] = %i\n", info->num_rings[RING_DMA]);
fprintf(f, " num_rings[RING_COMPUTE] = %u\n", info->num_rings[RING_COMPUTE]);
fprintf(f, " num_rings[RING_UVD] = %i\n", info->num_rings[RING_UVD]);
fprintf(f, " num_rings[RING_VCE] = %i\n", info->num_rings[RING_VCE]);
fprintf(f, " num_rings[RING_UVD_ENC] = %i\n", info->num_rings[RING_UVD_ENC]);
fprintf(f, " num_rings[RING_VCN_DEC] = %i\n", info->num_rings[RING_VCN_DEC]);
fprintf(f, " num_rings[RING_VCN_ENC] = %i\n", info->num_rings[RING_VCN_ENC]);
fprintf(f, " num_rings[RING_VCN_JPEG] = %i\n", info->num_rings[RING_VCN_JPEG]);
fprintf(f, " num_rings[AMD_IP_GFX] = %i\n", info->num_rings[AMD_IP_GFX]);
fprintf(f, " num_rings[AMD_IP_SDMA] = %i\n", info->num_rings[AMD_IP_SDMA]);
fprintf(f, " num_rings[AMD_IP_COMPUTE] = %u\n", info->num_rings[AMD_IP_COMPUTE]);
fprintf(f, " num_rings[AMD_IP_UVD] = %i\n", info->num_rings[AMD_IP_UVD]);
fprintf(f, " num_rings[AMD_IP_VCE] = %i\n", info->num_rings[AMD_IP_VCE]);
fprintf(f, " num_rings[AMD_IP_UVD_ENC] = %i\n", info->num_rings[AMD_IP_UVD_ENC]);
fprintf(f, " num_rings[AMD_IP_VCN_DEC] = %i\n", info->num_rings[AMD_IP_VCN_DEC]);
fprintf(f, " num_rings[AMD_IP_VCN_ENC] = %i\n", info->num_rings[AMD_IP_VCN_ENC]);
fprintf(f, " num_rings[AMD_IP_VCN_JPEG] = %i\n", info->num_rings[AMD_IP_VCN_JPEG]);
fprintf(f, " has_clear_state = %u\n", info->has_clear_state);
fprintf(f, " has_distributed_tess = %u\n", info->has_distributed_tess);
fprintf(f, " has_dcc_constant_encode = %u\n", info->has_dcc_constant_encode);

View File

@@ -64,8 +64,8 @@ struct radeon_info {
/* Features. */
bool has_graphics; /* false if the chip is compute-only */
uint32_t num_rings[NUM_RING_TYPES];
uint32_t ib_pad_dw_mask[NUM_RING_TYPES];
uint32_t num_rings[AMD_NUM_IP_TYPES];
uint32_t ib_pad_dw_mask[AMD_NUM_IP_TYPES];
bool has_clear_state;
bool has_distributed_tess;
bool has_dcc_constant_encode;

View File

@@ -158,16 +158,16 @@ enum chip_class
enum amd_ip_type
{
RING_GFX = 0,
RING_COMPUTE,
RING_DMA,
RING_UVD,
RING_VCE,
RING_UVD_ENC,
RING_VCN_DEC,
RING_VCN_ENC,
RING_VCN_JPEG,
NUM_RING_TYPES,
AMD_IP_GFX = 0,
AMD_IP_COMPUTE,
AMD_IP_SDMA,
AMD_IP_UVD,
AMD_IP_VCE,
AMD_IP_UVD_ENC,
AMD_IP_VCN_DEC,
AMD_IP_VCN_ENC,
AMD_IP_VCN_JPEG,
AMD_NUM_IP_TYPES,
};
const char *ac_get_family_name(enum radeon_family family);

View File

@@ -362,11 +362,11 @@ radv_queue_family_to_ring(struct radv_physical_device *physical_device,
{
switch (f) {
case RADV_QUEUE_GENERAL:
return RING_GFX;
return AMD_IP_GFX;
case RADV_QUEUE_COMPUTE:
return RING_COMPUTE;
return AMD_IP_COMPUTE;
case RADV_QUEUE_TRANSFER:
return RING_DMA;
return AMD_IP_SDMA;
default:
unreachable("Unknown queue family");
}
@@ -708,10 +708,10 @@ radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer, struct radv_pipeline *pip
ring = radv_queue_family_to_ring(device->physical_device, cmd_buffer->qf);
switch (ring) {
case RING_GFX:
case AMD_IP_GFX:
va += 8;
break;
case RING_COMPUTE:
case AMD_IP_COMPUTE:
va += 16;
break;
default:

View File

@@ -512,7 +512,7 @@ static struct radv_pipeline *
radv_get_saved_pipeline(struct radv_device *device, enum amd_ip_type ring)
{
uint64_t *ptr = (uint64_t *)device->trace_id_ptr;
int offset = ring == RING_GFX ? 1 : 2;
int offset = ring == AMD_IP_GFX ? 1 : 2;
return *(struct radv_pipeline **)(ptr + offset);
}
@@ -523,7 +523,7 @@ radv_dump_queue_state(struct radv_queue *queue, const char *dump_dir, FILE *f)
enum amd_ip_type ring = radv_queue_ring(queue);
struct radv_pipeline *pipeline;
fprintf(f, "RING_%s:\n", ring == RING_GFX ? "GFX" : "COMPUTE");
fprintf(f, "AMD_IP_%s:\n", ring == AMD_IP_GFX ? "GFX" : "COMPUTE");
pipeline = radv_get_saved_pipeline(queue->device, ring);
if (pipeline) {
@@ -636,7 +636,7 @@ radv_dump_umr_ring(struct radv_queue *queue, FILE *f)
char cmd[128];
/* TODO: Dump compute ring. */
if (ring != RING_GFX)
if (ring != AMD_IP_GFX)
return;
sprintf(cmd, "umr -R %s 2>&1",
@@ -654,7 +654,7 @@ radv_dump_umr_waves(struct radv_queue *queue, FILE *f)
char cmd[128];
/* TODO: Dump compute ring. */
if (ring != RING_GFX)
if (ring != AMD_IP_GFX)
return;
sprintf(cmd, "umr -O bits,halt_waves -wa %s 2>&1",

View File

@@ -583,7 +583,7 @@ radv_physical_device_init_queue_table(struct radv_physical_device *pdevice)
for (unsigned i = 1; i < RADV_MAX_QUEUE_FAMILIES; i++)
pdevice->vk_queue_to_radv[i] = RADV_MAX_QUEUE_FAMILIES + 1;
if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
if (pdevice->rad_info.num_rings[AMD_IP_COMPUTE] > 0 &&
!(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
pdevice->vk_queue_to_radv[idx] = RADV_QUEUE_COMPUTE;
idx++;
@@ -2416,7 +2416,7 @@ radv_get_physical_device_queue_family_properties(struct radv_physical_device *pd
{
int num_queue_families = 1;
int idx;
if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
if (pdevice->rad_info.num_rings[AMD_IP_COMPUTE] > 0 &&
!(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
num_queue_families++;
@@ -2440,13 +2440,13 @@ radv_get_physical_device_queue_family_properties(struct radv_physical_device *pd
idx++;
}
if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
if (pdevice->rad_info.num_rings[AMD_IP_COMPUTE] > 0 &&
!(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
if (*pCount > idx) {
*pQueueFamilyProperties[idx] = (VkQueueFamilyProperties){
.queueFlags =
VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT | VK_QUEUE_SPARSE_BINDING_BIT,
.queueCount = pdevice->rad_info.num_rings[RING_COMPUTE],
.queueCount = pdevice->rad_info.num_rings[AMD_IP_COMPUTE],
.timestampValidBits = 64,
.minImageTransferGranularity = (VkExtent3D){1, 1, 1},
};

View File

@@ -82,7 +82,7 @@ radv_sdma_v4_v5_copy_image_to_buffer(struct radv_cmd_buffer *cmd_buffer, struct
unsigned copy_height = DIV_ROUND_UP(image->info.height, image->planes[0].surface.blk_h);
bool tmz = false;
uint32_t ib_pad_dw_mask = cmd_buffer->device->physical_device->rad_info.ib_pad_dw_mask[RING_DMA];
uint32_t ib_pad_dw_mask = cmd_buffer->device->physical_device->rad_info.ib_pad_dw_mask[AMD_IP_SDMA];
/* Linear -> linear sub-window copy. */
if (image->planes[0].surface.is_linear) {

View File

@@ -66,7 +66,7 @@ radv_emit_wait_for_idle(struct radv_device *device, struct radeon_cmdbuf *cs, in
enum rgp_flush_bits sqtt_flush_bits = 0;
si_cs_emit_cache_flush(
cs, device->physical_device->rad_info.chip_class, NULL, 0,
family == RING_COMPUTE && device->physical_device->rad_info.chip_class >= GFX7,
family == AMD_IP_COMPUTE && device->physical_device->rad_info.chip_class >= GFX7,
(family == RADV_QUEUE_COMPUTE
? RADV_CMD_FLAG_CS_PARTIAL_FLUSH
: (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |

View File

@@ -580,7 +580,7 @@ si_emit_graphics(struct radv_device *device, struct radeon_cmdbuf *cs)
void
cik_create_gfx_config(struct radv_device *device)
{
struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, RING_GFX);
struct radeon_cmdbuf *cs = device->ws->cs_create(device->ws, AMD_IP_GFX);
if (!cs)
return;

View File

@@ -111,11 +111,11 @@ static bool
ring_can_use_ib_bos(const struct radv_amdgpu_winsys *ws,
enum amd_ip_type ip_type)
{
if (ip_type == RING_UVD ||
ip_type == RING_VCE ||
ip_type == RING_UVD_ENC ||
ip_type == RING_VCN_DEC ||
ip_type == RING_VCN_ENC)
if (ip_type == AMD_IP_UVD ||
ip_type == AMD_IP_VCE ||
ip_type == AMD_IP_UVD_ENC ||
ip_type == AMD_IP_VCN_DEC ||
ip_type == AMD_IP_VCN_ENC)
return false;
return ws->use_ib_bos;
}

View File

@@ -56,8 +56,8 @@ do_winsys_init(struct radv_amdgpu_winsys *ws, int fd)
return false;
}
ws->info.num_rings[RING_DMA] = MIN2(ws->info.num_rings[RING_DMA], MAX_RINGS_PER_TYPE);
ws->info.num_rings[RING_COMPUTE] = MIN2(ws->info.num_rings[RING_COMPUTE], MAX_RINGS_PER_TYPE);
ws->info.num_rings[AMD_IP_SDMA] = MIN2(ws->info.num_rings[AMD_IP_SDMA], MAX_RINGS_PER_TYPE);
ws->info.num_rings[AMD_IP_COMPUTE] = MIN2(ws->info.num_rings[AMD_IP_COMPUTE], MAX_RINGS_PER_TYPE);
ws->use_ib_bos = ws->info.chip_class >= GFX7;
return true;

View File

@@ -405,7 +405,7 @@ struct pipe_context* r300_create_context(struct pipe_screen* screen,
goto fail;
if (!rws->cs_create(&r300->cs, r300->ctx, RING_GFX, r300_flush_callback, r300, false))
if (!rws->cs_create(&r300->cs, r300->ctx, AMD_IP_GFX, r300_flush_callback, r300, false))
goto fail;
if (!r300screen->caps.has_tcl) {

View File

@@ -209,7 +209,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen,
goto fail;
}
ws->cs_create(&rctx->b.gfx.cs, rctx->b.ctx, RING_GFX,
ws->cs_create(&rctx->b.gfx.cs, rctx->b.ctx, AMD_IP_GFX,
r600_context_gfx_flush, rctx, false);
rctx->b.gfx.flush = r600_context_gfx_flush;

View File

@@ -428,7 +428,7 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags,
*/
rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
rctx->check_vm_faults(rctx, &saved, RING_DMA);
rctx->check_vm_faults(rctx, &saved, AMD_IP_SDMA);
radeon_clear_saved_cs(&saved);
}
}
@@ -636,8 +636,8 @@ bool r600_common_context_init(struct r600_common_context *rctx,
if (!rctx->ctx)
return false;
if (rscreen->info.num_rings[RING_DMA] && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
rctx->ws->cs_create(&rctx->dma.cs, rctx->ctx, RING_DMA,
if (rscreen->info.num_rings[AMD_IP_SDMA] && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
rctx->ws->cs_create(&rctx->dma.cs, rctx->ctx, AMD_IP_SDMA,
r600_flush_dma_ring, rctx, false);
rctx->dma.flush = r600_flush_dma_ring;
}
@@ -1294,8 +1294,8 @@ bool r600_common_screen_init(struct r600_common_screen *rscreen,
printf("r600_has_virtual_memory = %i\n", rscreen->info.r600_has_virtual_memory);
printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
printf("uvd_decode = %u\n", rscreen->info.has_video_hw.uvd_decode);
printf("num_rings[RING_DMA] = %i\n", rscreen->info.num_rings[RING_DMA]);
printf("num_rings[RING_COMPUTE] = %u\n", rscreen->info.num_rings[RING_COMPUTE]);
printf("num_rings[AMD_IP_SDMA] = %i\n", rscreen->info.num_rings[AMD_IP_SDMA]);
printf("num_rings[AMD_IP_COMPUTE] = %u\n", rscreen->info.num_rings[AMD_IP_COMPUTE]);
printf("uvd_fw_version = %u\n", rscreen->info.uvd_fw_version);
printf("vce_fw_version = %u\n", rscreen->info.vce_fw_version);
printf("me_fw_version = %i\n", rscreen->info.me_fw_version);

View File

@@ -1091,7 +1091,7 @@ struct pipe_video_codec *ruvd_create_decoder(struct pipe_context *context,
dec->screen = context->screen;
dec->ws = ws;
if (!ws->cs_create(&dec->cs, rctx->ctx, RING_UVD, NULL, NULL, false)) {
if (!ws->cs_create(&dec->cs, rctx->ctx, AMD_IP_UVD, NULL, NULL, false)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}

View File

@@ -432,7 +432,7 @@ struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context,
enc->screen = context->screen;
enc->ws = ws;
if (!ws->cs_create(&enc->cs, rctx->ctx, RING_VCE, rvce_cs_flush, enc, false)) {
if (!ws->cs_create(&enc->cs, rctx->ctx, AMD_IP_VCE, rvce_cs_flush, enc, false)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}

View File

@@ -1269,7 +1269,7 @@ struct pipe_video_codec *si_common_uvd_create_decoder(struct pipe_context *conte
dec->screen = context->screen;
dec->ws = ws;
if (!ws->cs_create(&dec->cs, sctx->ctx, RING_UVD, NULL, NULL, false)) {
if (!ws->cs_create(&dec->cs, sctx->ctx, AMD_IP_UVD, NULL, NULL, false)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}

View File

@@ -295,7 +295,7 @@ struct pipe_video_codec *radeon_uvd_create_encoder(struct pipe_context *context,
enc->screen = context->screen;
enc->ws = ws;
if (!ws->cs_create(&enc->cs, sctx->ctx, RING_UVD_ENC, radeon_uvd_enc_cs_flush, enc, false)) {
if (!ws->cs_create(&enc->cs, sctx->ctx, AMD_IP_UVD_ENC, radeon_uvd_enc_cs_flush, enc, false)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}

View File

@@ -429,7 +429,7 @@ struct pipe_video_codec *si_vce_create_encoder(struct pipe_context *context,
enc->screen = context->screen;
enc->ws = ws;
if (!ws->cs_create(&enc->cs, sctx->ctx, RING_VCE, rvce_cs_flush, enc, false)) {
if (!ws->cs_create(&enc->cs, sctx->ctx, AMD_IP_VCE, rvce_cs_flush, enc, false)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}

View File

@@ -2706,7 +2706,7 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context,
struct si_context *sctx = (struct si_context *)context;
struct radeon_winsys *ws = sctx->ws;
unsigned width = templ->width, height = templ->height;
unsigned bs_buf_size, stream_type = 0, ring = RING_VCN_DEC;
unsigned bs_buf_size, stream_type = 0, ring = AMD_IP_VCN_DEC;
struct radeon_decoder *dec;
int r, i;
@@ -2740,7 +2740,7 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context,
break;
case PIPE_VIDEO_FORMAT_JPEG:
stream_type = RDECODE_CODEC_JPEG;
ring = RING_VCN_JPEG;
ring = AMD_IP_VCN_JPEG;
break;
default:
assert(0);

View File

@@ -548,7 +548,7 @@ struct pipe_video_codec *radeon_create_encoder(struct pipe_context *context,
enc->screen = context->screen;
enc->ws = ws;
if (!ws->cs_create(&enc->cs, sctx->ctx, RING_VCN_ENC, radeon_enc_cs_flush, enc, false)) {
if (!ws->cs_create(&enc->cs, sctx->ctx, AMD_IP_VCN_ENC, radeon_enc_cs_flush, enc, false)) {
RVID_ERR("Can't get command submission context.\n");
goto error;
}

View File

@@ -1102,7 +1102,7 @@ void si_check_vm_faults(struct si_context *sctx, struct radeon_saved_cs *saved,
fprintf(f, "Last apitrace call: %u\n\n", sctx->apitrace_call_number);
switch (ring) {
case RING_GFX: {
case AMD_IP_GFX: {
struct u_log_context log;
u_log_context_init(&log);

View File

@@ -165,7 +165,7 @@ void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_h
*/
ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800 * 1000 * 1000);
si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, AMD_IP_GFX);
}
if (unlikely(ctx->thread_trace &&

View File

@@ -501,7 +501,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
if (!sctx->ctx)
goto fail;
ws->cs_create(&sctx->gfx_cs, sctx->ctx, sctx->has_graphics ? RING_GFX : RING_COMPUTE,
ws->cs_create(&sctx->gfx_cs, sctx->ctx, sctx->has_graphics ? AMD_IP_GFX : AMD_IP_COMPUTE,
(void *)si_flush_gfx_cs, sctx, stop_exec_on_failure);
/* Initialize private allocators. */
@@ -978,7 +978,7 @@ static void si_test_gds_memory_management(struct si_context *sctx, unsigned allo
struct pb_buffer *gds_bo[ARRAY_SIZE(cs)];
for (unsigned i = 0; i < ARRAY_SIZE(cs); i++) {
ws->cs_create(&cs[i], sctx->ctx, RING_COMPUTE, NULL, NULL, false);
ws->cs_create(&cs[i], sctx->ctx, AMD_IP_COMPUTE, NULL, NULL, false);
gds_bo[i] = ws->buffer_create(ws, alloc_size, alignment, domain, 0);
assert(gds_bo[i]);
}

View File

@@ -426,7 +426,7 @@ bool si_sdma_copy_image(struct si_context *sctx, struct si_texture *dst, struct
return false;
sctx->sdma_cs = CALLOC_STRUCT(radeon_cmdbuf);
if (ws->cs_create(sctx->sdma_cs, sctx->ctx, RING_DMA,
if (ws->cs_create(sctx->sdma_cs, sctx->ctx, AMD_IP_SDMA,
NULL, NULL, true))
return false;
}

View File

@@ -216,7 +216,7 @@ si_emit_thread_trace_start(struct si_context* sctx,
S_030800_INSTANCE_BROADCAST_WRITES(1));
/* Start the thread trace with a different event based on the queue. */
if (queue_family_index == RING_COMPUTE) {
if (queue_family_index == AMD_IP_COMPUTE) {
radeon_set_sh_reg(R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
S_00B878_THREAD_TRACE_ENABLE(1));
} else {
@@ -291,7 +291,7 @@ si_emit_thread_trace_stop(struct si_context *sctx,
radeon_begin(cs);
/* Stop the thread trace with a different event based on the queue. */
if (queue_family_index == RING_COMPUTE) {
if (queue_family_index == AMD_IP_COMPUTE) {
radeon_set_sh_reg(R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
S_00B878_THREAD_TRACE_ENABLE(0));
} else {
@@ -383,12 +383,12 @@ si_thread_trace_start(struct si_context *sctx, int family, struct radeon_cmdbuf
radeon_begin(cs);
switch (family) {
case RING_GFX:
case AMD_IP_GFX:
radeon_emit(PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
radeon_emit(CC0_UPDATE_LOAD_ENABLES(1));
radeon_emit(CC1_UPDATE_SHADOW_ENABLES(1));
break;
case RING_COMPUTE:
case AMD_IP_COMPUTE:
radeon_emit(PKT3(PKT3_NOP, 0, 0));
radeon_emit(0);
break;
@@ -437,12 +437,12 @@ si_thread_trace_stop(struct si_context *sctx, int family, struct radeon_cmdbuf *
radeon_begin(cs);
switch (family) {
case RING_GFX:
case AMD_IP_GFX:
radeon_emit(PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
radeon_emit(CC0_UPDATE_LOAD_ENABLES(1));
radeon_emit(CC1_UPDATE_SHADOW_ENABLES(1));
break;
case RING_COMPUTE:
case AMD_IP_COMPUTE:
radeon_emit(PKT3(PKT3_NOP, 0, 0));
radeon_emit(0);
break;
@@ -487,42 +487,42 @@ si_thread_trace_init_cs(struct si_context *sctx)
{
struct radeon_winsys *ws = sctx->ws;
/* Thread trace start CS (only handles RING_GFX). */
sctx->thread_trace->start_cs[RING_GFX] = CALLOC_STRUCT(radeon_cmdbuf);
if (!ws->cs_create(sctx->thread_trace->start_cs[RING_GFX],
sctx->ctx, RING_GFX, NULL, NULL, 0)) {
free(sctx->thread_trace->start_cs[RING_GFX]);
sctx->thread_trace->start_cs[RING_GFX] = NULL;
/* Thread trace start CS (only handles AMD_IP_GFX). */
sctx->thread_trace->start_cs[AMD_IP_GFX] = CALLOC_STRUCT(radeon_cmdbuf);
if (!ws->cs_create(sctx->thread_trace->start_cs[AMD_IP_GFX],
sctx->ctx, AMD_IP_GFX, NULL, NULL, 0)) {
free(sctx->thread_trace->start_cs[AMD_IP_GFX]);
sctx->thread_trace->start_cs[AMD_IP_GFX] = NULL;
return;
}
si_thread_trace_start(sctx, RING_GFX, sctx->thread_trace->start_cs[RING_GFX]);
si_thread_trace_start(sctx, AMD_IP_GFX, sctx->thread_trace->start_cs[AMD_IP_GFX]);
/* Thread trace stop CS. */
sctx->thread_trace->stop_cs[RING_GFX] = CALLOC_STRUCT(radeon_cmdbuf);
if (!ws->cs_create(sctx->thread_trace->stop_cs[RING_GFX],
sctx->ctx, RING_GFX, NULL, NULL, 0)) {
free(sctx->thread_trace->start_cs[RING_GFX]);
sctx->thread_trace->start_cs[RING_GFX] = NULL;
free(sctx->thread_trace->stop_cs[RING_GFX]);
sctx->thread_trace->stop_cs[RING_GFX] = NULL;
sctx->thread_trace->stop_cs[AMD_IP_GFX] = CALLOC_STRUCT(radeon_cmdbuf);
if (!ws->cs_create(sctx->thread_trace->stop_cs[AMD_IP_GFX],
sctx->ctx, AMD_IP_GFX, NULL, NULL, 0)) {
free(sctx->thread_trace->start_cs[AMD_IP_GFX]);
sctx->thread_trace->start_cs[AMD_IP_GFX] = NULL;
free(sctx->thread_trace->stop_cs[AMD_IP_GFX]);
sctx->thread_trace->stop_cs[AMD_IP_GFX] = NULL;
return;
}
si_thread_trace_stop(sctx, RING_GFX, sctx->thread_trace->stop_cs[RING_GFX]);
si_thread_trace_stop(sctx, AMD_IP_GFX, sctx->thread_trace->stop_cs[AMD_IP_GFX]);
}
static void
si_begin_thread_trace(struct si_context *sctx, struct radeon_cmdbuf *rcs)
{
struct radeon_cmdbuf *cs = sctx->thread_trace->start_cs[RING_GFX];
struct radeon_cmdbuf *cs = sctx->thread_trace->start_cs[AMD_IP_GFX];
sctx->ws->cs_flush(cs, 0, NULL);
}
static void
si_end_thread_trace(struct si_context *sctx, struct radeon_cmdbuf *rcs)
{
struct radeon_cmdbuf *cs = sctx->thread_trace->stop_cs[RING_GFX];
struct radeon_cmdbuf *cs = sctx->thread_trace->stop_cs[AMD_IP_GFX];
sctx->ws->cs_flush(cs, 0, &sctx->last_sqtt_fence);
}
@@ -662,8 +662,8 @@ si_destroy_thread_trace(struct si_context *sctx)
if (sctx->thread_trace->trigger_file)
free(sctx->thread_trace->trigger_file);
sscreen->ws->cs_destroy(sctx->thread_trace->start_cs[RING_GFX]);
sscreen->ws->cs_destroy(sctx->thread_trace->stop_cs[RING_GFX]);
sscreen->ws->cs_destroy(sctx->thread_trace->start_cs[AMD_IP_GFX]);
sscreen->ws->cs_destroy(sctx->thread_trace->stop_cs[AMD_IP_GFX]);
struct rgp_pso_correlation *pso_correlation = &sctx->thread_trace->rgp_pso_correlation;
struct rgp_loader_events *loader_events = &sctx->thread_trace->rgp_loader_events;

View File

@@ -717,9 +717,9 @@ static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
enum radeon_bo_domain domain;
unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;
if (cs->ip_type == RING_GFX ||
cs->ip_type == RING_COMPUTE ||
cs->ip_type == RING_DMA) {
if (cs->ip_type == AMD_IP_GFX ||
cs->ip_type == AMD_IP_COMPUTE ||
cs->ip_type == AMD_IP_SDMA) {
domain = ws->info.smart_access_memory ? RADEON_DOMAIN_VRAM : RADEON_DOMAIN_GTT;
flags |= RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC;
} else {
@@ -832,37 +832,37 @@ static bool amdgpu_init_cs_context(struct amdgpu_winsys *ws,
enum amd_ip_type ip_type)
{
switch (ip_type) {
case RING_DMA:
case AMD_IP_SDMA:
cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_DMA;
break;
case RING_UVD:
case AMD_IP_UVD:
cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD;
break;
case RING_UVD_ENC:
case AMD_IP_UVD_ENC:
cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD_ENC;
break;
case RING_VCE:
case AMD_IP_VCE:
cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCE;
break;
case RING_VCN_DEC:
case AMD_IP_VCN_DEC:
cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_DEC;
break;
case RING_VCN_ENC:
case AMD_IP_VCN_ENC:
cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_ENC;
break;
case RING_VCN_JPEG:
case AMD_IP_VCN_JPEG:
cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_JPEG;
break;
case RING_COMPUTE:
case RING_GFX:
cs->ib[IB_MAIN].ip_type = ip_type == RING_GFX ? AMDGPU_HW_IP_GFX :
case AMD_IP_COMPUTE:
case AMD_IP_GFX:
cs->ib[IB_MAIN].ip_type = ip_type == AMD_IP_GFX ? AMDGPU_HW_IP_GFX :
AMDGPU_HW_IP_COMPUTE;
/* The kernel shouldn't invalidate L2 and vL1. The proper place for cache
@@ -955,7 +955,7 @@ amdgpu_cs_create(struct radeon_cmdbuf *rcs,
cs->stop_exec_on_failure = stop_exec_on_failure;
cs->noop = ctx->ws->noop_cs;
cs->has_chaining = ctx->ws->info.chip_class >= GFX7 &&
(ip_type == RING_GFX || ip_type == RING_COMPUTE);
(ip_type == AMD_IP_GFX || ip_type == AMD_IP_COMPUTE);
struct amdgpu_cs_fence_info fence_info;
fence_info.handle = cs->ctx->user_fence_bo;
@@ -1194,7 +1194,7 @@ static bool is_noop_fence_dependency(struct amdgpu_cs *acs,
* We always want no dependency between back-to-back gfx IBs, because
* we need the parallelism between IBs for good performance.
*/
if ((acs->ip_type == RING_GFX ||
if ((acs->ip_type == AMD_IP_GFX ||
acs->ws->info.num_rings[acs->ip_type] == 1) &&
!amdgpu_fence_is_syncobj(fence) &&
fence->ctx == acs->ctx &&
@@ -1455,7 +1455,7 @@ static void amdgpu_cs_submit_ib(void *job, void *gdata, int thread_index)
}
}
if (acs->ip_type == RING_GFX)
if (acs->ip_type == AMD_IP_GFX)
ws->gfx_bo_list_counter += cs->num_real_buffers;
bool noop = false;
@@ -1571,7 +1571,7 @@ static void amdgpu_cs_submit_ib(void *job, void *gdata, int thread_index)
/* Apply RADEON_NOOP. */
if (acs->noop) {
if (acs->ip_type == RING_GFX) {
if (acs->ip_type == AMD_IP_GFX) {
/* Reduce the IB size and fill it with NOP to make it like an empty IB. */
unsigned noop_size = MIN2(cs->ib[IB_MAIN].ib_bytes, ws->info.ib_alignment);
@@ -1659,7 +1659,7 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
/* Pad the IB according to the mask. */
switch (cs->ip_type) {
case RING_DMA:
case AMD_IP_SDMA:
if (ws->info.chip_class <= GFX6) {
while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, 0xf0000000); /* NOP packet */
@@ -1668,8 +1668,8 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
radeon_emit(rcs, SDMA_NOP_PAD);
}
break;
case RING_GFX:
case RING_COMPUTE:
case AMD_IP_GFX:
case AMD_IP_COMPUTE:
if (ws->info.gfx_ib_pad_with_type2) {
while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, PKT2_NOP_PAD);
@@ -1677,15 +1677,15 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, PKT3_NOP_PAD);
}
if (cs->ip_type == RING_GFX)
if (cs->ip_type == AMD_IP_GFX)
ws->gfx_ib_size_counter += (rcs->prev_dw + rcs->current.cdw) * 4;
break;
case RING_UVD:
case RING_UVD_ENC:
case AMD_IP_UVD:
case AMD_IP_UVD_ENC:
while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, 0x80000000); /* type2 nop packet */
break;
case RING_VCN_JPEG:
case AMD_IP_VCN_JPEG:
if (rcs->current.cdw % 2)
assert(0);
while (rcs->current.cdw & ib_pad_dw_mask) {
@@ -1693,7 +1693,7 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
radeon_emit(rcs, 0x00000000);
}
break;
case RING_VCN_DEC:
case AMD_IP_VCN_DEC:
while (rcs->current.cdw & ib_pad_dw_mask)
radeon_emit(rcs, 0x81ff); /* nop packet */
break;
@@ -1768,9 +1768,9 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
rcs->used_gart_kb = 0;
rcs->used_vram_kb = 0;
if (cs->ip_type == RING_GFX)
if (cs->ip_type == AMD_IP_GFX)
ws->num_gfx_IBs++;
else if (cs->ip_type == RING_DMA)
else if (cs->ip_type == AMD_IP_SDMA)
ws->num_sdma_IBs++;
return error_code;

View File

@@ -274,7 +274,7 @@ static unsigned radeon_lookup_or_add_real_buffer(struct radeon_drm_cs *cs,
* This doesn't have to be done if virtual memory is enabled,
* because there is no offset patching with virtual memory.
*/
if (cs->ip_type != RING_DMA || cs->ws->info.r600_has_virtual_memory) {
if (cs->ip_type != AMD_IP_SDMA || cs->ws->info.r600_has_virtual_memory) {
return i;
}
}
@@ -579,7 +579,7 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
struct radeon_cs_context *tmp;
switch (cs->ip_type) {
case RING_DMA:
case AMD_IP_SDMA:
/* pad DMA ring to 8 DWs */
if (cs->ws->info.chip_class <= GFX6) {
while (rcs->current.cdw & 7)
@@ -589,7 +589,7 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
radeon_emit(rcs, 0x00000000); /* NOP packet */
}
break;
case RING_GFX:
case AMD_IP_GFX:
/* pad GFX ring to 8 DWs to meet CP fetch alignment requirements
* r6xx, requires at least 4 dw alignment to avoid a hw bug.
*/
@@ -601,7 +601,7 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
}
break;
case RING_UVD:
case AMD_IP_UVD:
while (rcs->current.cdw & 15)
radeon_emit(rcs, 0x80000000); /* type2 nop packet */
break;
@@ -663,7 +663,7 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
}
switch (cs->ip_type) {
case RING_DMA:
case AMD_IP_SDMA:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_DMA;
cs->cst->cs.num_chunks = 3;
@@ -672,21 +672,21 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
}
break;
case RING_UVD:
case AMD_IP_UVD:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_UVD;
cs->cst->cs.num_chunks = 3;
break;
case RING_VCE:
case AMD_IP_VCE:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_VCE;
cs->cst->cs.num_chunks = 3;
break;
default:
case RING_GFX:
case RING_COMPUTE:
case AMD_IP_GFX:
case AMD_IP_COMPUTE:
cs->cst->flags[0] = RADEON_CS_KEEP_TILING_FLAGS;
cs->cst->flags[1] = RADEON_CS_RING_GFX;
cs->cst->cs.num_chunks = 3;
@@ -699,7 +699,7 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
cs->cst->cs.num_chunks = 3;
}
if (cs->ip_type == RING_COMPUTE) {
if (cs->ip_type == AMD_IP_COMPUTE) {
cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
cs->cst->cs.num_chunks = 3;
}
@@ -724,9 +724,9 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
rcs->used_vram_kb = 0;
rcs->used_gart_kb = 0;
if (cs->ip_type == RING_GFX)
if (cs->ip_type == AMD_IP_GFX)
cs->ws->num_gfx_IBs++;
else if (cs->ip_type == RING_DMA)
else if (cs->ip_type == AMD_IP_SDMA)
cs->ws->num_sdma_IBs++;
return 0;
}

View File

@@ -304,12 +304,12 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
ws->info.has_dedicated_vram = true;
}
ws->info.num_rings[RING_GFX] = 1;
ws->info.num_rings[AMD_IP_GFX] = 1;
/* Check for dma */
ws->info.num_rings[RING_DMA] = 0;
ws->info.num_rings[AMD_IP_SDMA] = 0;
/* DMA is disabled on R700. There is IB corruption and hangs. */
if (ws->info.chip_class >= EVERGREEN && ws->info.drm_minor >= 27) {
ws->info.num_rings[RING_DMA] = 1;
ws->info.num_rings[AMD_IP_SDMA] = 1;
}
/* Check for UVD and VCE */
@@ -321,7 +321,7 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
"UVD Ring working", &value)) {
ws->info.has_video_hw.uvd_decode = value;
ws->info.num_rings[RING_UVD] = 1;
ws->info.num_rings[AMD_IP_UVD] = 1;
}
value = RADEON_CS_RING_VCE;
@@ -331,7 +331,7 @@ static bool do_winsys_init(struct radeon_drm_winsys *ws)
if (radeon_get_drm_value(ws->fd, RADEON_INFO_VCE_FW_VERSION,
"VCE FW version", &value)) {
ws->info.vce_fw_version = value;
ws->info.num_rings[RING_VCE] = 1;
ws->info.num_rings[AMD_IP_VCE] = 1;
ws->info.has_video_hw.vce_encode = true;
}
}