From ef9c7338e5db098acd94e0e222844d690306e1fc Mon Sep 17 00:00:00 2001 From: Ivan Poleshchuk Date: Fri, 1 Apr 2022 21:46:27 +0200 Subject: [PATCH] Netint hardware support --- cmd/transcoding/transcoding.go | 7 ++- ffmpeg/decoder.c | 65 +++++++++++----------- ffmpeg/decoder.h | 2 + ffmpeg/encoder.c | 18 +++++-- ffmpeg/extras.c | 34 ++++++------ ffmpeg/ffmpeg.go | 99 ++++++++++++++++++++++++++++------ ffmpeg/nvidia_test.go | 3 +- ffmpeg/transcoder.c | 12 +++-- ffmpeg/transcoder.h | 5 ++ 9 files changed, 170 insertions(+), 75 deletions(-) mode change 100644 => 100755 ffmpeg/decoder.c mode change 100644 => 100755 ffmpeg/decoder.h mode change 100644 => 100755 ffmpeg/encoder.c mode change 100644 => 100755 ffmpeg/ffmpeg.go mode change 100644 => 100755 ffmpeg/transcoder.c mode change 100644 => 100755 ffmpeg/transcoder.h diff --git a/cmd/transcoding/transcoding.go b/cmd/transcoding/transcoding.go index a04fd627f8..92369dc5f0 100644 --- a/cmd/transcoding/transcoding.go +++ b/cmd/transcoding/transcoding.go @@ -26,12 +26,15 @@ func main() { var err error args := append([]string{os.Args[0]}, flag.Args()...) if len(args) <= 3 { - panic("Usage: [-hevc] [-from dur] [-to dur] ") + panic("Usage: [-hevc] [-from dur] [-to dur] ") } str2accel := func(inp string) (ffmpeg.Acceleration, string) { if inp == "nv" { return ffmpeg.Nvidia, "nv" } + if inp == "nt" { + return ffmpeg.Netint, "nt" + } return ffmpeg.Software, "sw" } str2profs := func(inp string) []ffmpeg.VideoProfile { @@ -72,7 +75,7 @@ func main() { options := profs2opts(profiles) var dev string - if accel == ffmpeg.Nvidia { + if accel != ffmpeg.Software { if len(args) <= 4 { panic("Expected device number") } diff --git a/ffmpeg/decoder.c b/ffmpeg/decoder.c old mode 100644 new mode 100755 index 10e9086553..be4c23f228 --- a/ffmpeg/decoder.c +++ b/ffmpeg/decoder.c @@ -27,6 +27,8 @@ static int send_first_pkt(struct input_ctx *ictx) if (ictx->flushed) return 0; if (!ictx->first_pkt) return lpms_ERR_INPUT_NOKF; + //LPMS_WARN("sending flush packet NOW !"); + int ret = avcodec_send_packet(ictx->vc, ictx->first_pkt); ictx->sentinel_count++; if (ret < 0) { @@ -85,28 +87,30 @@ int process_in(struct input_ctx *ictx, AVFrame *frame, AVPacket *pkt) // with video. If there's a nonzero response type, we know there are no more // video frames, so continue on to audio. - // Flush video decoder. - // To accommodate CUDA, we feed the decoder sentinel (flush) frames, till we + // Flush video decoder + // To accommodate CUDA, we feed the decoder sentinel (flush) frames, till we // get back all sent frames, or we've made SENTINEL_MAX attempts to retrieve // buffered frames with no success. // TODO this is unnecessary for SW decoding! SW process should match audio - if (ictx->vc && !ictx->flushed && ictx->pkt_diff > 0) { - ictx->flushing = 1; - ret = send_first_pkt(ictx); - if (ret < 0) { - ictx->flushed = 1; - return ret; - } - ret = lpms_receive_frame(ictx, ictx->vc, frame); - pkt->stream_index = ictx->vi; - // Keep flushing if we haven't received all frames back but stop after SENTINEL_MAX tries. - if (ictx->pkt_diff != 0 && ictx->sentinel_count <= SENTINEL_MAX && (!ret || ret == AVERROR(EAGAIN))) { - return 0; // ignore actual return value and keep flushing - } else { - ictx->flushed = 1; - if (!ret) return ret; - } - } + // last stable with Netint: + //if (ictx->hw_type != AV_HWDEVICE_TYPE_MEDIACODEC) { + if (ictx->vc && !ictx->flushed && ictx->pkt_diff > 0) { + ictx->flushing = 1; + ret = send_first_pkt(ictx); + if (ret < 0) { + ictx->flushed = 1; + return ret; + } + ret = lpms_receive_frame(ictx, ictx->vc, frame); + pkt->stream_index = ictx->vi; + // Keep flushing if we haven't received all frames back but stop after SENTINEL_MAX tries. + if (ictx->pkt_diff != 0 && ictx->sentinel_count <= SENTINEL_MAX && (!ret || ret == AVERROR(EAGAIN))) { + return 0; // ignore actual return value and keep flushing + } else { + ictx->flushed = 1; + } + } + //} // Flush audio decoder. if (ictx->ac) { avcodec_send_packet(ictx->ac, NULL); @@ -165,16 +169,6 @@ static enum AVPixelFormat get_hw_pixfmt(AVCodecContext *vc, const enum AVPixelFo ret = av_hwframe_ctx_init(vc->hw_frames_ctx); if (AVERROR(ENOSYS) == ret) ret = lpms_ERR_INPUT_PIXFMT; // most likely if (ret < 0) LPMS_ERR(pixfmt_cleanup, "Unable to initialize a hardware frame pool"); - -/* -fprintf(stderr, "selected format: hw %s sw %s\n", -av_get_pix_fmt_name(frames->format), av_get_pix_fmt_name(frames->sw_format)); -const enum AVPixelFormat *p; -for (p = pix_fmts; *p != -1; p++) { -fprintf(stderr,"possible format: %s\n", av_get_pix_fmt_name(*p)); -} -*/ - return frames->format; pixfmt_cleanup: @@ -231,6 +225,7 @@ int open_video_decoder(input_params *params, struct input_ctx *ctx) { int ret = 0; AVCodec *codec = NULL; + AVDictionary **opts = NULL; AVFormatContext *ic = ctx->ic; // open video decoder ctx->vi = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0); @@ -253,6 +248,11 @@ int open_video_decoder(input_params *params, struct input_ctx *ctx) ret = lpms_ERR_INPUT_PIXFMT; LPMS_ERR(open_decoder_err, "Non 4:2:0 pixel format detected in input"); } + } else if (params->video.name && strlen(params->video.name) != 0) { + // Try to find user specified decoder by name + AVCodec *c = avcodec_find_decoder_by_name(params->video.name); + if (c) codec = c; + if (params->video.opts) opts = ¶ms->video.opts; } AVCodecContext *vc = avcodec_alloc_context3(codec); if (!vc) LPMS_ERR(open_decoder_err, "Unable to alloc video codec"); @@ -261,16 +261,17 @@ int open_video_decoder(input_params *params, struct input_ctx *ctx) if (ret < 0) LPMS_ERR(open_decoder_err, "Unable to assign video params"); vc->opaque = (void*)ctx; // XXX Could this break if the original device falls out of scope in golang? - if (params->hw_type != AV_HWDEVICE_TYPE_NONE) { + if (params->hw_type == AV_HWDEVICE_TYPE_CUDA) { // First set the hw device then set the hw frame ret = av_hwdevice_ctx_create(&ctx->hw_device_ctx, params->hw_type, params->device, NULL, 0); if (ret < 0) LPMS_ERR(open_decoder_err, "Unable to open hardware context for decoding") - ctx->hw_type = params->hw_type; vc->hw_device_ctx = av_buffer_ref(ctx->hw_device_ctx); vc->get_format = get_hw_pixfmt; } + ctx->hw_type = params->hw_type; vc->pkt_timebase = ic->streams[ctx->vi]->time_base; - ret = avcodec_open2(vc, codec, NULL); + av_opt_set(vc->priv_data, "xcoder-params", ctx->xcoderParams, 0); + ret = avcodec_open2(vc, codec, opts); if (ret < 0) LPMS_ERR(open_decoder_err, "Unable to open video decoder"); } diff --git a/ffmpeg/decoder.h b/ffmpeg/decoder.h old mode 100644 new mode 100755 index 9505d9e3ff..44b2623382 --- a/ffmpeg/decoder.h +++ b/ffmpeg/decoder.h @@ -3,6 +3,7 @@ #include #include +#include #include "transcoder.h" struct input_ctx { @@ -16,6 +17,7 @@ struct input_ctx { AVBufferRef *hw_device_ctx; enum AVHWDeviceType hw_type; char *device; + char *xcoderParams; // Decoder flush AVPacket *first_pkt; diff --git a/ffmpeg/encoder.c b/ffmpeg/encoder.c old mode 100644 new mode 100755 index 9b09a959c8..3230d8a374 --- a/ffmpeg/encoder.c +++ b/ffmpeg/encoder.c @@ -155,7 +155,7 @@ void close_output(struct output_ctx *octx) avformat_free_context(octx->oc); octx->oc = NULL; } - if (octx->vc && AV_HWDEVICE_TYPE_NONE == octx->hw_type) avcodec_free_context(&octx->vc); + if (octx->vc && (octx->hw_type == AV_HWDEVICE_TYPE_NONE)) avcodec_free_context(&octx->vc); if (octx->ac) avcodec_free_context(&octx->ac); octx->af.flushed = octx->vf.flushed = 0; octx->af.flushing = octx->vf.flushing = 0; @@ -252,6 +252,11 @@ int open_output(struct output_ctx *octx, struct input_ctx *ictx) } vc->pix_fmt = av_buffersink_get_format(octx->vf.sink_ctx); // XXX select based on encoder + input support if (fmt->flags & AVFMT_GLOBALHEADER) vc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; + if(strcmp(ictx->xcoderParams,"")!=0){ + av_opt_set(vc->priv_data, "xcoder-params", ictx->xcoderParams, 0); + //printf("xcoder-params %s\n", ictx->xcoderParams); + } + ret = avcodec_open2(vc, codec, &octx->video->opts); if (ret < 0) LPMS_ERR(open_output_err, "Error opening video encoder"); octx->hw_type = ictx->hw_type; @@ -345,14 +350,15 @@ static int encode(AVCodecContext* encoder, AVFrame *frame, struct output_ctx* oc // We don't want to send NULL frames for HW encoding // because that closes the encoder: not something we want - if (AV_HWDEVICE_TYPE_NONE == octx->hw_type || AVMEDIA_TYPE_AUDIO == ost->codecpar->codec_type || frame) { + if (AV_HWDEVICE_TYPE_NONE == octx->hw_type || AV_HWDEVICE_TYPE_MEDIACODEC == octx->hw_type || + AVMEDIA_TYPE_AUDIO == ost->codecpar->codec_type || frame) { ret = avcodec_send_frame(encoder, frame); if (AVERROR_EOF == ret) ; // continue ; drain encoder else if (ret < 0) LPMS_ERR(encode_cleanup, "Error sending frame to encoder"); } if (AVMEDIA_TYPE_VIDEO == ost->codecpar->codec_type && - AV_HWDEVICE_TYPE_CUDA == octx->hw_type && !frame) { + (AV_HWDEVICE_TYPE_CUDA == octx->hw_type) && !frame) { avcodec_flush_buffers(encoder); } @@ -527,6 +533,7 @@ int process_out(struct input_ctx *ictx, struct output_ctx *octx, AVCodecContext frame->pict_type = AV_PICTURE_TYPE_I; octx->next_kf_pts = frame->pts + octx->gop_pts_len; } + if(octx->is_dnn_profile) { ret = getmetadatainf(frame, octx); } else { @@ -540,8 +547,9 @@ int process_out(struct input_ctx *ictx, struct output_ctx *octx, AVCodecContext av_frame_unref(frame); // For HW we keep the encoder open so will only get EAGAIN. // Return EOF in place of EAGAIN for to terminate the flush - if (frame == NULL && AV_HWDEVICE_TYPE_NONE != octx->hw_type && - AVERROR(EAGAIN) == ret && !inf) return AVERROR_EOF; + if (frame == NULL && octx->hw_type > AV_HWDEVICE_TYPE_NONE && + AV_HWDEVICE_TYPE_MEDIACODEC != octx->hw_type && + AVERROR(EAGAIN) == ret && !inf) return AVERROR_EOF; if (frame == NULL) return ret; } diff --git a/ffmpeg/extras.c b/ffmpeg/extras.c index bd4a05f12f..ff221b22fa 100644 --- a/ffmpeg/extras.c +++ b/ffmpeg/extras.c @@ -11,7 +11,7 @@ struct match_info { int height; uint64_t bit_rate; int packetcount; //video total packet count - uint64_t timestamp; //XOR sum of avpacket pts + uint64_t timestamp; //XOR sum of avpacket pts int audiosum[4]; //XOR sum of audio data's md5(16 bytes) }; @@ -183,10 +183,10 @@ int lpms_get_codec_info(char *fname, pcodec_info out) return ret; } -// compare two signature files whether those matches or not. -// @param signpath1 full path of the first signature file. -// @param signpath2 full path of the second signature file. -// @return <0: error 0: no matchiing 1: partial matching 2: whole matching. +//// compare two signature files whether those matches or not. +//// @param signpath1 full path of the first signature file. +//// @param signpath2 full path of the second signature file. +//// @return <0: error 0: no matchiing 1: partial matching 2: whole matching. int lpms_compare_sign_bypath(char *signpath1, char *signpath2) { @@ -268,7 +268,7 @@ static int read_packet(void *opaque, uint8_t *buf, int buf_size) return buf_size; } -static int get_matchinfo(void *buffer, int len, struct match_info* info) +static int get_matchinfo(void *buffer, int len, struct match_info* info) { int ret = 0; AVFormatContext* ifmt_ctx = NULL; @@ -292,7 +292,7 @@ static int get_matchinfo(void *buffer, int len, struct match_info* info) LPMS_ERR(clean, "Error allocating buffer"); } - avio_in = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size, 0, &bd, &read_packet, NULL, NULL); + avio_in = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size, 0, &bd, &read_packet, NULL, NULL); if (!avio_ctx_buffer) { ret = AVERROR(ENOMEM); LPMS_ERR(clean, "Error allocating context"); @@ -304,15 +304,15 @@ static int get_matchinfo(void *buffer, int len, struct match_info* info) } ifmt_ctx->pb = avio_in; ifmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO; - + if ((ret = avformat_open_input(&ifmt_ctx, "", NULL, NULL)) < 0) { LPMS_ERR(clean, "Cannot open input video file\n"); } - + if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) { LPMS_ERR(clean, "Cannot find stream information\n"); } - + for (int i = 0; i < ifmt_ctx->nb_streams; i++) { AVStream *stream; stream = ifmt_ctx->streams[i]; @@ -323,10 +323,10 @@ static int get_matchinfo(void *buffer, int len, struct match_info* info) info->bit_rate = in_codecpar->bit_rate; } else if (in_codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { - audioid = i; + audioid = i; } } - packet = av_packet_alloc(); + packet = av_packet_alloc(); if (!packet) LPMS_ERR(clean, "Error allocating packet"); while (1) { ret = av_read_frame(ifmt_ctx, packet); @@ -334,7 +334,7 @@ static int get_matchinfo(void *buffer, int len, struct match_info* info) ret = 0; break; } - else if (ret < 0) { + else if (ret < 0) { LPMS_ERR(clean, "Unable to read input"); } info->packetcount++; @@ -345,8 +345,8 @@ static int get_matchinfo(void *buffer, int len, struct match_info* info) } av_packet_unref(packet); } - -clean: + +clean: if(packet) av_packet_free(&packet); /* note: the internal buffer could have changed, and be != avio_ctx_buffer */ @@ -365,7 +365,7 @@ static int get_matchinfo(void *buffer, int len, struct match_info* info) // @return <0: error =0: matching 1: no matching int lpms_compare_video_bybuffer(void *buffer1, int len1, void *buffer2, int len2) { - int ret = 0; + int ret = 0; struct match_info info1, info2; ret = get_matchinfo(buffer1,len1,&info1); @@ -374,7 +374,7 @@ int lpms_compare_video_bybuffer(void *buffer1, int len1, void *buffer2, int len2 ret = get_matchinfo(buffer2,len2,&info2); if(ret < 0) return ret; //compare two matching information - if (info1.width != info2.width || info1.height != info2.height || + if (info1.width != info2.width || info1.height != info2.height || info1.bit_rate != info2.bit_rate || info1.packetcount != info2.packetcount || info1.timestamp != info2.timestamp || memcmp(info1.audiosum, info2.audiosum, 16)) { ret = 1; diff --git a/ffmpeg/ffmpeg.go b/ffmpeg/ffmpeg.go old mode 100644 new mode 100755 index 720704c84b..bc0e8f1fc7 --- a/ffmpeg/ffmpeg.go +++ b/ffmpeg/ffmpeg.go @@ -48,8 +48,16 @@ const ( Software Acceleration = iota Nvidia Amd + Netint ) +var AccelerationNameLookup = map[Acceleration]string { + Software: "SW", + Nvidia: "Nvidia", + Amd: "Amd", + Netint: "Netint", +} + var FfEncoderLookup = map[Acceleration]map[VideoCodec]string{ Software: { H264: "libx264", @@ -61,6 +69,10 @@ var FfEncoderLookup = map[Acceleration]map[VideoCodec]string{ H264: "h264_nvenc", H265: "hevc_nvenc", }, + Netint: { + H264: "h264_ni_enc", + H265: "h265_ni_enc", + }, } type ComponentOptions struct { @@ -76,21 +88,23 @@ type Transcoder struct { } type TranscodeOptionsIn struct { - Fname string - Accel Acceleration - Device string - Transmuxing bool + Fname string + Accel Acceleration + Device string + XcoderParams string + Transmuxing bool } type TranscodeOptions struct { - Oname string - Profile VideoProfile - Detector DetectorProfile - Accel Acceleration - Device string - CalcSign bool - From time.Duration - To time.Duration + Oname string + Profile VideoProfile + Detector DetectorProfile + Accel Acceleration + Device string + CalcSign bool + From time.Duration + To time.Duration + XcoderParams string Muxer ComponentOptions VideoEncoder ComponentOptions @@ -440,6 +454,14 @@ func configEncoder(inOpts *TranscodeOptionsIn, outOpts TranscodeOptions, inDev, } return encoder, "scale_cuda", nil } + case Netint: + switch outOpts.Accel { + case Software, Nvidia: + return "", "", ErrTranscoderDev // XXX don't allow mix-match between NETINT and sw/nv + case Netint: + // Use software scale filter + return encoder, "scale", nil + } } return "", "", ErrTranscoderHw } @@ -449,11 +471,25 @@ func accelDeviceType(accel Acceleration) (C.enum_AVHWDeviceType, error) { return C.AV_HWDEVICE_TYPE_NONE, nil case Nvidia: return C.AV_HWDEVICE_TYPE_CUDA, nil - + case Netint: + return C.AV_HWDEVICE_TYPE_MEDIACODEC, nil } return C.AV_HWDEVICE_TYPE_NONE, ErrTranscoderHw } +func decoderOpts(accel Acceleration) (string, map[string]string, error) { + switch accel { + case Netint: + opts := map[string]string{} + // TODO add any netint decoder opts NI decoder options + // opts["ffmpegflagkey"] = "flagvalue" + return "h264_ni_dec", opts, nil // TODO replace with actual netint decoder name + //return "", opts, nil // TODO replace with actual netint decoder name + default: + return "", map[string]string{}, ErrTranscoderInp + } +} + func Transcode2(input *TranscodeOptionsIn, ps []TranscodeOptions) error { _, err := Transcode3(input, ps) return err @@ -478,6 +514,7 @@ func (t *Transcoder) Transcode(input *TranscodeOptionsIn, ps []TranscodeOptions) if err != nil { return nil, err } + fmt.Printf("fname %s\n", input.Fname) for _, p := range ps { if p.From != 0 || p.To != 0 { if p.VideoEncoder.Name == "drop" || p.VideoEncoder.Name == "copy" { @@ -491,7 +528,10 @@ func (t *Transcoder) Transcode(input *TranscodeOptionsIn, ps []TranscodeOptions) } } fname := C.CString(input.Fname) + + xcoderParams := C.CString(input.XcoderParams) defer C.free(unsafe.Pointer(fname)) + defer C.free(unsafe.Pointer(xcoderParams)) if input.Transmuxing { t.started = true } @@ -516,7 +556,9 @@ func (t *Transcoder) Transcode(input *TranscodeOptionsIn, ps []TranscodeOptions) p.Muxer = ComponentOptions{Name: "mpegts"} } oname := C.CString(p.Oname) + xcoderParams := C.CString(p.XcoderParams) defer C.free(unsafe.Pointer(oname)) + defer C.free(unsafe.Pointer(xcoderParams)) param := p.Profile w, h, err := VideoProfileResolution(param) @@ -542,7 +584,7 @@ func (t *Transcoder) Transcode(input *TranscodeOptionsIn, ps []TranscodeOptions) // preserve aspect ratio along the larger dimension when rescaling var filters string filters = fmt.Sprintf("%s='w=if(gte(iw,ih),%d,-2):h=if(lt(iw,ih),%d,-2)'", scale_filter, w, h) - if input.Accel != Software && p.Accel == Software { + if input.Accel == Nvidia && p.Accel == Software { // needed for hw dec -> hw rescale -> sw enc filters = filters + ",hwdownload,format=nv12" } @@ -600,6 +642,10 @@ func (t *Transcoder) Transcode(input *TranscodeOptionsIn, ps []TranscodeOptions) defer C.free(unsafe.Pointer(muxOpts.name)) } // Set video encoder options + // TODO understand how h264 profiles and GOP setting works for + // NETINT encoder, and make sure we change relevant things here + // Any other options for the encoder can also be added here + if len(p.VideoEncoder.Name) <= 0 && len(p.VideoEncoder.Opts) <= 0 { p.VideoEncoder.Opts = map[string]string{ "forced-idr": "1", @@ -707,8 +753,25 @@ func (t *Transcoder) Transcode(input *TranscodeOptionsIn, ps []TranscodeOptions) device = C.CString(input.Device) defer C.free(unsafe.Pointer(device)) } - inp := &C.input_params{fname: fname, hw_type: hw_type, device: device, + inp := &C.input_params{fname: fname, hw_type: hw_type, device: device, xcoderParams: xcoderParams, handle: t.handle} + if input.Accel == Netint { + // Set decoder and AVOpts for NETINT + decoder, opts, err := decoderOpts(input.Accel) + if err != nil { + return nil, err + } + inp.video = C.component_opts{ + name: C.CString(decoder), + opts: newAVOpts(opts), + } + defer C.free(unsafe.Pointer(inp.video.name)) + defer func(param *C.input_params) { + if param.video.opts != nil { + C.av_dict_free(¶m.video.opts) + } + }(inp) + } if input.Transmuxing { inp.transmuxe = 1 } @@ -798,6 +861,12 @@ func InitFFmpegWithLogLevel(level LogLevel) { C.lpms_init(C.enum_LPMSLogLevel(level)) } +func InitFFmpegWithXcoderParams(param string) { + fmt.Println("InitFFmpegWithXcoderParams: ", param) + ts_param := C.CString(param) + C.lpms_init_xcoder_params(ts_param) +} + func InitFFmpeg() { InitFFmpegWithLogLevel(FFLogWarning) } diff --git a/ffmpeg/nvidia_test.go b/ffmpeg/nvidia_test.go index c3f77b8c28..2ada114004 100644 --- a/ffmpeg/nvidia_test.go +++ b/ffmpeg/nvidia_test.go @@ -8,7 +8,8 @@ import ( "testing" ) -func TestNvidia_Transcoding(t *testing.T) { +func +TestNvidia_Transcoding(t *testing.T) { codecsComboTest(t, supportedCodecsCombinations([]Acceleration{Nvidia})) } diff --git a/ffmpeg/transcoder.c b/ffmpeg/transcoder.c old mode 100644 new mode 100755 index 012d2be07a..af0536d6ef --- a/ffmpeg/transcoder.c +++ b/ffmpeg/transcoder.c @@ -86,6 +86,11 @@ void lpms_init(enum LPMSLogLevel max_level) av_log_set_level(max_level); } +void lpms_init_xcoder_params(char *ts_params) +{ + +} + // // Transcoder // @@ -120,6 +125,7 @@ int transcode(struct transcode_thread *h, { int ret = 0, i = 0; struct input_ctx *ictx = &h->ictx; + ictx->xcoderParams = inp->xcoderParams; int reopen_decoders = !ictx->transmuxing; struct output_ctx *outputs = h->outputs; int nb_outputs = h->nb_outputs; @@ -161,7 +167,7 @@ int transcode(struct transcode_thread *h, if (reopen_decoders) { // XXX check to see if we can also reuse decoder for sw decoding - if (AV_HWDEVICE_TYPE_CUDA != ictx->hw_type) { + if (ictx->hw_type == AV_HWDEVICE_TYPE_NONE) { ret = open_video_decoder(inp, ictx); if (ret < 0) LPMS_ERR(transcode_cleanup, "Unable to reopen video decoder"); } @@ -390,7 +396,7 @@ int transcode(struct transcode_thread *h, } else if(outputs[i].is_dnn_profile && outputs[i].res->frames > 0) { for (int j = 0; j < MAX_CLASSIFY_SIZE; j++) { - outputs[i].res->probs[j] = outputs[i].res->probs[j] / outputs[i].res->frames; + outputs[i].res->probs[j] = outputs[i].res->probs[j] / outputs[i].res->frames; } } } @@ -416,7 +422,7 @@ int transcode(struct transcode_thread *h, if (ipkt) av_packet_free(&ipkt); // needed for early exits if (ictx->first_pkt) av_packet_free(&ictx->first_pkt); if (ictx->ac) avcodec_free_context(&ictx->ac); - if (ictx->vc && AV_HWDEVICE_TYPE_NONE == ictx->hw_type) avcodec_free_context(&ictx->vc); + if (ictx->vc && (AV_HWDEVICE_TYPE_NONE == ictx->hw_type)) avcodec_free_context(&ictx->vc); for (i = 0; i < nb_outputs; i++) { //send EOF signal to signature filter if(outputs[i].sfilters != NULL && outputs[i].sf.src_ctx != NULL) { diff --git a/ffmpeg/transcoder.h b/ffmpeg/transcoder.h old mode 100644 new mode 100755 index f54385feaf..0668be0e83 --- a/ffmpeg/transcoder.h +++ b/ffmpeg/transcoder.h @@ -51,6 +51,10 @@ typedef struct { // Optional hardware acceleration enum AVHWDeviceType hw_type; char *device; + char *xcoderParams; + + // Optional video decoder + opts + component_opts video; int transmuxe; } input_params; @@ -87,6 +91,7 @@ enum LPMSLogLevel { }; void lpms_init(enum LPMSLogLevel max_level); +void lpms_init_xcoder_params(char *ts_params); int lpms_transcode(input_params *inp, output_params *params, output_results *results, int nb_outputs, output_results *decoded_results); struct transcode_thread* lpms_transcode_new(); struct transcode_thread* lpms_transcode_new_with_dnn(lvpdnn_opts *dnn_opts);