pipe: Extend get_feedback with additional metadata
Reviewed-by: Jesse Natalie <jenatali@microsoft.com> Reviewed-by: Ruijing Dong <ruijing.dong@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26223>
This commit is contained in:
@@ -257,7 +257,10 @@ trace_video_codec_flush(struct pipe_video_codec *_codec)
|
||||
}
|
||||
|
||||
static void
|
||||
trace_video_codec_get_feedback(struct pipe_video_codec *_codec, void *feedback, unsigned *size)
|
||||
trace_video_codec_get_feedback(struct pipe_video_codec *_codec,
|
||||
void *feedback,
|
||||
unsigned *size,
|
||||
struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
struct trace_video_codec *tr_vcodec = trace_video_codec(_codec);
|
||||
struct pipe_video_codec *codec = tr_vcodec->video_codec;
|
||||
@@ -268,7 +271,7 @@ trace_video_codec_get_feedback(struct pipe_video_codec *_codec, void *feedback,
|
||||
trace_dump_arg(ptr, size);
|
||||
trace_dump_call_end();
|
||||
|
||||
codec->get_feedback(codec, feedback, size);
|
||||
codec->get_feedback(codec, feedback, size, metadata);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@@ -2102,7 +2102,7 @@ d3d12_video_encoder_encode_bitstream(struct pipe_video_codec * codec,
|
||||
}
|
||||
|
||||
void
|
||||
d3d12_video_encoder_get_feedback(struct pipe_video_codec *codec, void *feedback, unsigned *size)
|
||||
d3d12_video_encoder_get_feedback(struct pipe_video_codec *codec, void *feedback, unsigned *size, struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
struct d3d12_video_encoder *pD3D12Enc = (struct d3d12_video_encoder *) codec;
|
||||
assert(pD3D12Enc);
|
||||
|
@@ -71,7 +71,7 @@ d3d12_video_encoder_encode_bitstream(struct pipe_video_codec * codec,
|
||||
* get encoder feedback
|
||||
*/
|
||||
void
|
||||
d3d12_video_encoder_get_feedback(struct pipe_video_codec *codec, void *feedback, unsigned *size);
|
||||
d3d12_video_encoder_get_feedback(struct pipe_video_codec *codec, void *feedback, unsigned *size, struct pipe_enc_feedback_metadata* metadata);
|
||||
|
||||
/**
|
||||
* end encoding of the current frame
|
||||
|
@@ -352,7 +352,8 @@ static void rvce_end_frame(struct pipe_video_codec *encoder,
|
||||
}
|
||||
|
||||
static void rvce_get_feedback(struct pipe_video_codec *encoder,
|
||||
void *feedback, unsigned *size)
|
||||
void *feedback, unsigned *size,
|
||||
struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
struct rvce_encoder *enc = (struct rvce_encoder*)encoder;
|
||||
struct rvid_buffer *fb = feedback;
|
||||
|
@@ -257,7 +257,7 @@ static void radeon_uvd_enc_destroy(struct pipe_video_codec *encoder)
|
||||
}
|
||||
|
||||
static void radeon_uvd_enc_get_feedback(struct pipe_video_codec *encoder, void *feedback,
|
||||
unsigned *size)
|
||||
unsigned *size, struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
struct radeon_uvd_encoder *enc = (struct radeon_uvd_encoder *)encoder;
|
||||
struct rvid_buffer *fb = feedback;
|
||||
|
@@ -322,7 +322,8 @@ static void rvce_end_frame(struct pipe_video_codec *encoder, struct pipe_video_b
|
||||
}
|
||||
}
|
||||
|
||||
static void rvce_get_feedback(struct pipe_video_codec *encoder, void *feedback, unsigned *size)
|
||||
static void rvce_get_feedback(struct pipe_video_codec *encoder, void *feedback, unsigned *size,
|
||||
struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
struct rvce_encoder *enc = (struct rvce_encoder *)encoder;
|
||||
struct rvid_buffer *fb = feedback;
|
||||
|
@@ -85,7 +85,7 @@ static void get_task_info_param(struct rvce_encoder *enc)
|
||||
enc->enc_pic.ti.offset_of_next_task_info = 0xffffffff;
|
||||
}
|
||||
|
||||
static void get_feedback_buffer_param(struct rvce_encoder *enc)
|
||||
static void get_feedback_buffer_param(struct rvce_encoder *enc, struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
enc->enc_pic.fb.feedback_ring_size = 0x00000001;
|
||||
}
|
||||
@@ -141,7 +141,7 @@ void si_vce_52_get_param(struct rvce_encoder *enc, struct pipe_h264_enc_picture_
|
||||
get_motion_estimation_param(enc, pic);
|
||||
get_pic_control_param(enc, pic);
|
||||
get_task_info_param(enc);
|
||||
get_feedback_buffer_param(enc);
|
||||
get_feedback_buffer_param(enc, NULL);
|
||||
get_vui_param(enc, pic);
|
||||
get_config_ext_param(enc);
|
||||
|
||||
|
@@ -1055,7 +1055,7 @@ static void radeon_enc_destroy(struct pipe_video_codec *encoder)
|
||||
}
|
||||
|
||||
static void radeon_enc_get_feedback(struct pipe_video_codec *encoder, void *feedback,
|
||||
unsigned *size)
|
||||
unsigned *size, struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
struct radeon_encoder *enc = (struct radeon_encoder *)encoder;
|
||||
struct rvid_buffer *fb = feedback;
|
||||
|
@@ -1144,7 +1144,8 @@ static void virgl_video_flush(struct pipe_video_codec *codec)
|
||||
|
||||
static void virgl_video_get_feedback(struct pipe_video_codec *codec,
|
||||
void *feedback,
|
||||
unsigned *size)
|
||||
unsigned *size,
|
||||
struct pipe_enc_feedback_metadata* metadata)
|
||||
{
|
||||
struct virgl_video_codec *vcdc = virgl_video_codec(codec);
|
||||
struct virgl_context *vctx = vcdc->vctx;
|
||||
|
@@ -162,7 +162,7 @@ void vid_enc_BufferEncoded_common(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
|
||||
|
||||
/* ------------- get size of result ----------------- */
|
||||
|
||||
priv->codec->get_feedback(priv->codec, task->feedback, &size);
|
||||
priv->codec->get_feedback(priv->codec, task->feedback, &size, NULL);
|
||||
|
||||
output->nOffset = 0;
|
||||
output->nFilledLen = size; /* mark buffer as full */
|
||||
|
@@ -195,9 +195,44 @@ VAStatus vlVaMapBuffer2(VADriverContextP ctx, VABufferID buf_id,
|
||||
return VA_STATUS_ERROR_INVALID_BUFFER;
|
||||
|
||||
if (buf->type == VAEncCodedBufferType) {
|
||||
((VACodedBufferSegment*)buf->data)->buf = *pbuff;
|
||||
((VACodedBufferSegment*)buf->data)->size = buf->coded_size;
|
||||
*pbuff = buf->data;
|
||||
VACodedBufferSegment* curr_buf_ptr = (VACodedBufferSegment*) buf->data;
|
||||
|
||||
if ((buf->extended_metadata.present_metadata & PIPE_VIDEO_FEEDBACK_METADATA_TYPE_ENCODE_RESULT) &&
|
||||
(buf->extended_metadata.encode_result & PIPE_VIDEO_FEEDBACK_METADATA_ENCODE_FLAG_FAILED)) {
|
||||
curr_buf_ptr->status = VA_CODED_BUF_STATUS_BAD_BITSTREAM;
|
||||
return VA_STATUS_ERROR_OPERATION_FAILED;
|
||||
}
|
||||
|
||||
if (buf->extended_metadata.encode_result & PIPE_VIDEO_FEEDBACK_METADATA_ENCODE_FLAG_MAX_FRAME_SIZE_OVERFLOW)
|
||||
curr_buf_ptr->status |= VA_CODED_BUF_STATUS_FRAME_SIZE_OVERFLOW;
|
||||
|
||||
if ((buf->extended_metadata.present_metadata & PIPE_VIDEO_FEEDBACK_METADATA_TYPE_CODEC_UNIT_LOCATION) == 0) {
|
||||
curr_buf_ptr->buf = *pbuff;
|
||||
curr_buf_ptr->size = buf->coded_size;
|
||||
*pbuff = buf->data;
|
||||
} else {
|
||||
uint8_t* compressed_bitstream_data = *pbuff;
|
||||
*pbuff = buf->data;
|
||||
|
||||
for (size_t i = 0; i < buf->extended_metadata.codec_unit_metadata_count - 1; i++) {
|
||||
curr_buf_ptr->next = CALLOC(1, sizeof(VACodedBufferSegment));
|
||||
if (!curr_buf_ptr->next)
|
||||
return VA_STATUS_ERROR_ALLOCATION_FAILED;
|
||||
curr_buf_ptr = curr_buf_ptr->next;
|
||||
}
|
||||
curr_buf_ptr->next = NULL;
|
||||
|
||||
curr_buf_ptr = buf->data;
|
||||
for (size_t i = 0; i < buf->extended_metadata.codec_unit_metadata_count; i++) {
|
||||
curr_buf_ptr->status = VA_CODED_BUF_STATUS_SINGLE_NALU;
|
||||
curr_buf_ptr->size = buf->extended_metadata.codec_unit_metadata[i].size;
|
||||
curr_buf_ptr->buf = compressed_bitstream_data + buf->extended_metadata.codec_unit_metadata[i].offset;
|
||||
if (buf->extended_metadata.codec_unit_metadata[i].flags & PIPE_VIDEO_CODEC_UNIT_LOCATION_FLAG_MAX_SLICE_SIZE_OVERFLOW)
|
||||
curr_buf_ptr->status |= VA_CODED_BUF_STATUS_SLICE_OVERFLOW_MASK;
|
||||
|
||||
curr_buf_ptr = curr_buf_ptr->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
mtx_unlock(&drv->mutex);
|
||||
@@ -278,7 +313,17 @@ vlVaDestroyBuffer(VADriverContextP ctx, VABufferID buf_id)
|
||||
buf->derived_image_buffer->destroy(buf->derived_image_buffer);
|
||||
}
|
||||
|
||||
FREE(buf->data);
|
||||
if (buf->type == VAEncCodedBufferType) {
|
||||
VACodedBufferSegment* node = buf->data;
|
||||
while(!node) {
|
||||
VACodedBufferSegment* next = (VACodedBufferSegment*) node->next;
|
||||
FREE(node);
|
||||
node = next;
|
||||
}
|
||||
} else {
|
||||
FREE(buf->data);
|
||||
}
|
||||
|
||||
FREE(buf);
|
||||
handle_table_remove(VL_VA_DRIVER(ctx)->htab, buf_id);
|
||||
mtx_unlock(&drv->mutex);
|
||||
@@ -534,7 +579,7 @@ vlVaSyncBuffer(VADriverContextP ctx, VABufferID buf_id, uint64_t timeout_ns)
|
||||
vlVaSurface* surf = handle_table_get(drv->htab, buf->associated_encode_input_surf);
|
||||
|
||||
if ((buf->feedback) && (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)) {
|
||||
context->decoder->get_feedback(context->decoder, buf->feedback, &(buf->coded_size));
|
||||
context->decoder->get_feedback(context->decoder, buf->feedback, &(buf->coded_size), &(buf->extended_metadata));
|
||||
buf->feedback = NULL;
|
||||
/* Also mark the associated render target (encode source texture) surface as done
|
||||
in case they call vaSyncSurface on it to avoid getting the feedback twice*/
|
||||
|
@@ -1197,6 +1197,17 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
|
||||
context->desc.base.input_full_range = surf->full_range;
|
||||
context->desc.base.output_format = surf->encoder_format;
|
||||
|
||||
int driver_metadata_support = drv->pipe->screen->get_video_param(drv->pipe->screen,
|
||||
context->decoder->profile,
|
||||
context->decoder->entrypoint,
|
||||
PIPE_VIDEO_CAP_ENC_SUPPORTS_FEEDBACK_METADATA);
|
||||
if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
|
||||
context->desc.h264enc.requested_metadata = driver_metadata_support;
|
||||
else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
|
||||
context->desc.h265enc.requested_metadata = driver_metadata_support;
|
||||
else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1)
|
||||
context->desc.av1enc.requested_metadata = driver_metadata_support;
|
||||
|
||||
context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
|
||||
context->decoder->encode_bitstream(context->decoder, context->target,
|
||||
coded_buf->derived_surface.resource, &feedback);
|
||||
|
@@ -188,7 +188,7 @@ vlVaSyncSurface(VADriverContextP ctx, VASurfaceID render_target)
|
||||
}
|
||||
}
|
||||
}
|
||||
context->decoder->get_feedback(context->decoder, surf->feedback, &(surf->coded_buf->coded_size));
|
||||
context->decoder->get_feedback(context->decoder, surf->feedback, &(surf->coded_buf->coded_size), &(surf->coded_buf->extended_metadata));
|
||||
surf->feedback = NULL;
|
||||
surf->coded_buf->feedback = NULL;
|
||||
surf->coded_buf->associated_encode_input_surf = VA_INVALID_ID;
|
||||
|
@@ -321,6 +321,7 @@ typedef struct {
|
||||
unsigned int export_refcount;
|
||||
VABufferInfo export_state;
|
||||
unsigned int coded_size;
|
||||
struct pipe_enc_feedback_metadata extended_metadata;
|
||||
struct pipe_video_buffer *derived_image_buffer;
|
||||
void *feedback;
|
||||
VASurfaceID associated_encode_input_surf;
|
||||
|
@@ -118,7 +118,10 @@ struct pipe_video_codec
|
||||
/**
|
||||
* get encoder feedback
|
||||
*/
|
||||
void (*get_feedback)(struct pipe_video_codec *codec, void *feedback, unsigned *size);
|
||||
void (*get_feedback)(struct pipe_video_codec *codec,
|
||||
void *feedback,
|
||||
unsigned *size,
|
||||
struct pipe_enc_feedback_metadata* metadata /* opt NULL */);
|
||||
|
||||
/**
|
||||
* Get decoder fence.
|
||||
|
@@ -142,6 +142,36 @@ enum pipe_video_cap
|
||||
PIPE_VIDEO_CAP_ENC_MAX_TILE_ROWS = 41,
|
||||
PIPE_VIDEO_CAP_ENC_MAX_TILE_COLS = 42,
|
||||
PIPE_VIDEO_CAP_ENC_INTRA_REFRESH = 43,
|
||||
PIPE_VIDEO_CAP_ENC_SUPPORTS_FEEDBACK_METADATA = 44,
|
||||
};
|
||||
|
||||
enum pipe_video_feedback_encode_result_flags
|
||||
{
|
||||
/* Requires PIPE_VIDEO_FEEDBACK_METADATA_TYPE_ENCODE_RESULT */
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_ENCODE_FLAG_OK = 0x0,
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_ENCODE_FLAG_FAILED = 0x1,
|
||||
/* Requires PIPE_VIDEO_FEEDBACK_METADATA_TYPE_MAX_FRAME_SIZE_OVERFLOW */
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_ENCODE_FLAG_MAX_FRAME_SIZE_OVERFLOW = 0x2,
|
||||
};
|
||||
|
||||
enum codec_unit_location_flags
|
||||
{
|
||||
PIPE_VIDEO_CODEC_UNIT_LOCATION_FLAG_NONE = 0x0,
|
||||
/* Requires PIPE_VIDEO_FEEDBACK_METADATA_TYPE_MAX_SLICE_SIZE_OVERFLOW */
|
||||
PIPE_VIDEO_CODEC_UNIT_LOCATION_FLAG_MAX_SLICE_SIZE_OVERFLOW = 0x1,
|
||||
};
|
||||
|
||||
/* To be used with PIPE_VIDEO_CAP_ENC_SUPPORTS_FEEDBACK_METADATA
|
||||
* for checking gallium driver support and to indicate the
|
||||
* different metadata types in an encode operation
|
||||
*/
|
||||
enum pipe_video_feedback_metadata_type
|
||||
{
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_TYPE_BITSTREAM_SIZE = 0x0,
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_TYPE_ENCODE_RESULT = 0x1,
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_TYPE_CODEC_UNIT_LOCATION = 0x2,
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_TYPE_MAX_FRAME_SIZE_OVERFLOW = 0x4,
|
||||
PIPE_VIDEO_FEEDBACK_METADATA_TYPE_MAX_SLICE_SIZE_OVERFLOW = 0x8,
|
||||
};
|
||||
|
||||
enum pipe_video_av1_enc_filter_mode
|
||||
|
@@ -599,6 +599,9 @@ struct pipe_h264_enc_picture_desc
|
||||
|
||||
unsigned num_slice_descriptors;
|
||||
struct h264_slice_descriptor slices_descriptors[128];
|
||||
|
||||
bool insert_aud_nalu;
|
||||
enum pipe_video_feedback_metadata_type requested_metadata;
|
||||
};
|
||||
|
||||
struct pipe_h265_enc_seq_param
|
||||
@@ -727,6 +730,7 @@ struct pipe_h265_enc_picture_desc
|
||||
|
||||
unsigned num_slice_descriptors;
|
||||
struct h265_slice_descriptor slices_descriptors[128];
|
||||
enum pipe_video_feedback_metadata_type requested_metadata;
|
||||
};
|
||||
|
||||
struct pipe_av1_enc_rate_control
|
||||
@@ -936,6 +940,7 @@ struct pipe_av1_enc_picture_desc
|
||||
uint8_t temporal_id;
|
||||
uint8_t spatial_id;
|
||||
} tg_obu_header;
|
||||
enum pipe_video_feedback_metadata_type requested_metadata;
|
||||
};
|
||||
|
||||
struct pipe_h265_sps
|
||||
@@ -1784,6 +1789,35 @@ union pipe_av1_enc_cap_features_ext2 {
|
||||
uint32_t value;
|
||||
};
|
||||
|
||||
struct codec_unit_location_t
|
||||
{
|
||||
uint64_t offset;
|
||||
uint64_t size;
|
||||
enum codec_unit_location_flags flags;
|
||||
};
|
||||
|
||||
struct pipe_enc_feedback_metadata
|
||||
{
|
||||
/*
|
||||
* Driver writes the metadata types present in this struct
|
||||
*/
|
||||
enum pipe_video_feedback_metadata_type present_metadata;
|
||||
|
||||
/*
|
||||
* Driver writes the result of encoding the associated frame.
|
||||
* Requires PIPE_VIDEO_FEEDBACK_METADATA_TYPE_ENCODE_RESULT
|
||||
*/
|
||||
enum pipe_video_feedback_encode_result_flags encode_result;
|
||||
|
||||
/*
|
||||
* Driver fills in with coded headers information
|
||||
* and a number codec_unit_metadata_count of valid entries
|
||||
* Requires PIPE_VIDEO_FEEDBACK_METADATA_TYPE_CODEC_UNIT_LOCATION
|
||||
*/
|
||||
struct codec_unit_location_t codec_unit_metadata[256];
|
||||
unsigned codec_unit_metadata_count;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user