diff --git a/debian/patches/0021-add-fixes-for-qsv-vpp-filters.patch b/debian/patches/0021-add-fixes-for-qsv-vpp-filters.patch index 2db4a730d87..422c2b2e2aa 100644 --- a/debian/patches/0021-add-fixes-for-qsv-vpp-filters.patch +++ b/debian/patches/0021-add-fixes-for-qsv-vpp-filters.patch @@ -25,11 +25,32 @@ Index: FFmpeg/libavfilter/qsvvpp.c else if (qsv_frame->frame->repeat_pict == 2) qsv_frame->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING; else if (qsv_frame->frame->repeat_pict == 4) +@@ -1024,6 +1028,20 @@ int ff_qsvvpp_filter_frame(QSVVPPContext + out_frame->frame->pts = av_rescale_q(out_frame->surface.Data.TimeStamp, + default_tb, outlink->time_base); + ++ /* Copy the color side data */ ++ if (in_frame->frame->color_primaries != -1) ++ out_frame->frame->color_primaries = in_frame->frame->color_primaries; ++ if (in_frame->frame->color_trc != -1) ++ out_frame->frame->color_trc = in_frame->frame->color_trc; ++ if (in_frame->frame->colorspace != -1) ++ out_frame->frame->colorspace = in_frame->frame->colorspace; ++ if (in_frame->frame->color_range != -1) ++ out_frame->frame->color_range = in_frame->frame->color_range; ++ ++ ret = av_frame_copy_side_data(out_frame->frame, in_frame->frame, 0); ++ if (ret < 0) ++ return ret; ++ + out_frame->queued++; + aframe = (QSVAsyncFrame){ sync, out_frame }; + av_fifo_write(s->async_fifo, &aframe, 1); Index: FFmpeg/libavfilter/vf_overlay_qsv.c =================================================================== --- FFmpeg.orig/libavfilter/vf_overlay_qsv.c +++ FFmpeg/libavfilter/vf_overlay_qsv.c -@@ -228,40 +228,47 @@ static int config_overlay_input(AVFilter +@@ -228,40 +228,51 @@ static int config_overlay_input(AVFilter static int process_frame(FFFrameSync *fs) { @@ -69,6 +90,10 @@ Index: FFmpeg/libavfilter/vf_overlay_qsv.c + if (ret < 0 && ret != AVERROR(EAGAIN)) + return ret; + ++ /* remove all side data of the overlay frame*/ ++ if (overlay) ++ av_frame_remove_all_side_data(overlay); ++ + /* composite overlay frame */ + /* or overwrite main frame again if the overlay frame isn't ready yet */ + return ff_qsvvpp_filter_frame(qsv, overlay ? in1 : in0, overlay ? overlay : main); @@ -102,7 +127,7 @@ Index: FFmpeg/libavfilter/vf_overlay_qsv.c return ff_framesync_configure(&s->fs); } -@@ -282,12 +289,6 @@ static int config_output(AVFilterLink *o +@@ -282,12 +293,6 @@ static int config_output(AVFilterLink *o return AVERROR(EINVAL); } else if (in0->format == AV_PIX_FMT_QSV) { AVHWFramesContext *hw_frame0 = (AVHWFramesContext *)in0->hw_frames_ctx->data; @@ -115,7 +140,7 @@ Index: FFmpeg/libavfilter/vf_overlay_qsv.c vpp->qsv_param.out_sw_format = hw_frame0->sw_format; } -@@ -369,6 +370,7 @@ static int overlay_qsv_query_formats(AVF +@@ -369,6 +374,7 @@ static int overlay_qsv_query_formats(AVF static const enum AVPixelFormat main_in_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NV12, @@ -123,7 +148,7 @@ Index: FFmpeg/libavfilter/vf_overlay_qsv.c AV_PIX_FMT_YUYV422, AV_PIX_FMT_RGB32, AV_PIX_FMT_QSV, -@@ -376,6 +378,7 @@ static int overlay_qsv_query_formats(AVF +@@ -376,6 +382,7 @@ static int overlay_qsv_query_formats(AVF }; static const enum AVPixelFormat out_pix_fmts[] = { AV_PIX_FMT_NV12, @@ -166,7 +191,34 @@ Index: FFmpeg/libavfilter/vf_vpp_qsv.c static int vpp_set_frame_ext_params(AVFilterContext *ctx, const AVFrame *in, AVFrame *out, QSVVPPFrameParam *fp) { #if QSV_ONEVPL -@@ -494,9 +518,9 @@ static int vpp_set_frame_ext_params(AVFi +@@ -461,14 +485,19 @@ static int vpp_set_frame_ext_params(AVFi + + memset(&clli_conf, 0, sizeof(mfxExtContentLightLevelInfo)); + sd = av_frame_get_side_data(in, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); +- if (vpp->tonemap && sd) { +- AVContentLightMetadata *clm = (AVContentLightMetadata *)sd->data; ++ if (vpp->tonemap) { ++ AVContentLightMetadata *clm = sd ? (AVContentLightMetadata *)sd->data : NULL; + +- clli_conf.Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO; +- clli_conf.Header.BufferSz = sizeof(mfxExtContentLightLevelInfo); +- clli_conf.MaxContentLightLevel = FFMIN(clm->MaxCLL, 65535); +- clli_conf.MaxPicAverageLightLevel = FFMIN(clm->MaxFALL, 65535); +- tm = 1; ++ // Dumped from VP HAL, VPL requires at least one type of the metadata to trigger tone-mapping ++ #define HAL_HDR_DEFAULT_MAXCLL 4000 ++ #define HAL_HDR_DEFAULT_MAXFALL 400 ++ if (clm || !tm) { ++ clli_conf.Header.BufferId = MFX_EXTBUFF_CONTENT_LIGHT_LEVEL_INFO; ++ clli_conf.Header.BufferSz = sizeof(mfxExtContentLightLevelInfo); ++ clli_conf.MaxContentLightLevel = FFMIN(clm ? clm->MaxCLL : HAL_HDR_DEFAULT_MAXCLL, 65535); ++ clli_conf.MaxPicAverageLightLevel = FFMIN(clm ? clm->MaxFALL : HAL_HDR_DEFAULT_MAXFALL, 65535); ++ tm = 1; ++ } + } + + if (tm) { +@@ -494,9 +523,9 @@ static int vpp_set_frame_ext_params(AVFi outvsi_conf.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO_OUT; outvsi_conf.Header.BufferSz = sizeof(mfxExtVideoSignalInfo); outvsi_conf.VideoFullRange = (out->color_range == AVCOL_RANGE_JPEG); @@ -179,7 +231,7 @@ Index: FFmpeg/libavfilter/vf_vpp_qsv.c outvsi_conf.ColourDescriptionPresent = 1; if (memcmp(&vpp->invsi_conf, &invsi_conf, sizeof(mfxExtVideoSignalInfo)) || -@@ -686,12 +710,24 @@ static int config_output(AVFilterLink *o +@@ -686,12 +715,24 @@ static int config_output(AVFilterLink *o if (inlink->w != outlink->w || inlink->h != outlink->h || in_format != vpp->out_format) { if (QSV_RUNTIME_VERSION_ATLEAST(mfx_version, 1, 19)) { @@ -208,7 +260,7 @@ Index: FFmpeg/libavfilter/vf_vpp_qsv.c INIT_MFX_EXTBUF(scale_conf, MFX_EXTBUFF_VPP_SCALING); SET_MFX_PARAM_FIELD(scale_conf, ScalingMode, mode); -@@ -880,19 +916,13 @@ static const AVOption vpp_options[] = { +@@ -880,19 +921,13 @@ static const AVOption vpp_options[] = { { "height", "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS }, { "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS }, { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 4 }, 0, INT_MAX, .flags = FLAGS }, @@ -229,7 +281,7 @@ Index: FFmpeg/libavfilter/vf_vpp_qsv.c { "rate", "Generate output at frame rate or field rate, available only for deinterlace mode", OFFSET(field_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, .unit = "rate" }, -@@ -923,8 +953,9 @@ static const AVOption vpp_options[] = { +@@ -923,8 +958,9 @@ static const AVOption vpp_options[] = { { "out_color_transfer", "Output color transfer characteristics", OFFSET(color_transfer_str), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS }, @@ -240,7 +292,7 @@ Index: FFmpeg/libavfilter/vf_vpp_qsv.c { NULL } }; -@@ -978,19 +1009,14 @@ static const AVOption qsvscale_options[] +@@ -978,19 +1014,14 @@ static const AVOption qsvscale_options[] { "h", "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS }, { "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS }, @@ -262,7 +314,7 @@ Index: FFmpeg/libavfilter/vf_vpp_qsv.c { NULL }, }; -@@ -1015,6 +1041,7 @@ static const AVOption qsvdeint_options[] +@@ -1015,6 +1046,7 @@ static const AVOption qsvdeint_options[] { "bob", "bob algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_BOB}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, .unit = "mode"}, { "advanced", "Motion adaptive algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_ADVANCED}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, .unit = "mode"}, @@ -270,3 +322,148 @@ Index: FFmpeg/libavfilter/vf_vpp_qsv.c { NULL }, }; +Index: FFmpeg/libavutil/frame.c +=================================================================== +--- FFmpeg.orig/libavutil/frame.c ++++ FFmpeg/libavutil/frame.c +@@ -98,6 +98,18 @@ static void remove_side_data(AVFrameSide + } + } + ++static void remove_all_side_data(AVFrameSideData ***sd, int *nb_side_data) ++{ ++ for (int i = *nb_side_data - 1; i >= 0; i--) { ++ AVFrameSideData *entry = ((*sd)[i]); ++ ++ free_side_data(&entry); ++ ++ ((*sd)[i]) = ((*sd)[*nb_side_data - 1]); ++ (*nb_side_data)--; ++ } ++} ++ + static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd, + const AVFrameSideData *target) + { +@@ -269,6 +281,36 @@ int av_frame_get_buffer(AVFrame *frame, + return AVERROR(EINVAL); + } + ++int av_frame_copy_side_data(AVFrame* dst, const AVFrame* src, int flags) ++{ ++ for (unsigned i = 0; i < src->nb_side_data; i++) { ++ const AVFrameSideData *sd_src = src->side_data[i]; ++ AVFrameSideData *sd_dst; ++ if ( sd_src->type == AV_FRAME_DATA_PANSCAN ++ && (src->width != dst->width || src->height != dst->height)) ++ continue; ++ if (flags & AV_FRAME_COPY_PROPS_FORCECOPY) { ++ sd_dst = av_frame_new_side_data(dst, sd_src->type, ++ sd_src->size); ++ if (!sd_dst) { ++ frame_side_data_wipe(dst); ++ return AVERROR(ENOMEM); ++ } ++ memcpy(sd_dst->data, sd_src->data, sd_src->size); ++ } else { ++ AVBufferRef *ref = av_buffer_ref(sd_src->buf); ++ sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref); ++ if (!sd_dst) { ++ av_buffer_unref(&ref); ++ frame_side_data_wipe(dst); ++ return AVERROR(ENOMEM); ++ } ++ } ++ av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0); ++ } ++ return 0; ++} ++ + static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy) + { + int ret; +@@ -320,31 +362,9 @@ FF_ENABLE_DEPRECATION_WARNINGS + + av_dict_copy(&dst->metadata, src->metadata, 0); + +- for (int i = 0; i < src->nb_side_data; i++) { +- const AVFrameSideData *sd_src = src->side_data[i]; +- AVFrameSideData *sd_dst; +- if ( sd_src->type == AV_FRAME_DATA_PANSCAN +- && (src->width != dst->width || src->height != dst->height)) +- continue; +- if (force_copy) { +- sd_dst = av_frame_new_side_data(dst, sd_src->type, +- sd_src->size); +- if (!sd_dst) { +- frame_side_data_wipe(dst); +- return AVERROR(ENOMEM); +- } +- memcpy(sd_dst->data, sd_src->data, sd_src->size); +- } else { +- AVBufferRef *ref = av_buffer_ref(sd_src->buf); +- sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref); +- if (!sd_dst) { +- av_buffer_unref(&ref); +- frame_side_data_wipe(dst); +- return AVERROR(ENOMEM); +- } +- } +- av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0); +- } ++ if (ret = av_frame_copy_side_data(dst, src, ++ force_copy ? AV_FRAME_COPY_PROPS_FORCECOPY : 0) < 0) ++ return ret; + + ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref); + ret |= av_buffer_replace(&dst->private_ref, src->private_ref); +@@ -906,6 +926,11 @@ void av_frame_remove_side_data(AVFrame * + remove_side_data(&frame->side_data, &frame->nb_side_data, type); + } + ++void av_frame_remove_all_side_data(AVFrame *frame) ++{ ++ remove_all_side_data(&frame->side_data, &frame->nb_side_data); ++} ++ + const char *av_frame_side_data_name(enum AVFrameSideDataType type) + { + switch(type) { +Index: FFmpeg/libavutil/frame.h +=================================================================== +--- FFmpeg.orig/libavutil/frame.h ++++ FFmpeg/libavutil/frame.h +@@ -901,6 +901,21 @@ int av_frame_copy(AVFrame *dst, const AV + */ + int av_frame_copy_props(AVFrame *dst, const AVFrame *src); + ++/** Copy actual data buffers instead of references. ++ */ ++#define AV_FRAME_COPY_PROPS_FORCECOPY 1 ++ ++/** ++ * Copy only side-data from src to dst. ++ * ++ * @param dst a frame to which the side data should be copied. ++ * @param src a frame from which to copy the side data. ++ * @param flags flags of type AV_FRAME_COPY_PROPS_*, controlling copy behavior. ++ * ++ * @return >= 0 on success, a negative AVERROR on error. ++ */ ++int av_frame_copy_side_data(AVFrame* dst, const AVFrame* src, int flags); ++ + /** + * Get the buffer reference a given data plane is stored in. + * +@@ -953,6 +968,11 @@ AVFrameSideData *av_frame_get_side_data( + */ + void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type); + ++/** ++ * Remove and free all side data in this frame. ++ */ ++void av_frame_remove_all_side_data(AVFrame *frame); ++ + + /** + * Flags for frame cropping.