diff --git a/android/ffmpeg-kit-android-lib/Doxyfile b/android/ffmpeg-kit-android-lib/Doxyfile index ef4d4f1..05074d6 100644 --- a/android/ffmpeg-kit-android-lib/Doxyfile +++ b/android/ffmpeg-kit-android-lib/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "FFmpegKit Android API" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 6.0 +PROJECT_NUMBER = 6.1.2 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/android_support.c b/android/ffmpeg-kit-android-lib/src/main/cpp/android_support.c index f0d5c14..72be0c3 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/android_support.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/android_support.c @@ -26,52 +26,50 @@ * SUCH DAMAGE. */ -#include #include #include +#include #include #ifdef __cplusplus extern "C" { #endif -// posix_memalign() and memalign() are not available in the NDK __INTRODUCED_IN(17) +// posix_memalign() and memalign() are not available in the NDK +// __INTRODUCED_IN(17) #if __ANDROID_API__ < 17 -int posix_memalign(void** memptr, size_t alignment, size_t size) { - if ((alignment & (alignment - 1)) != 0 || alignment == 0) { - return EINVAL; - } +int posix_memalign(void **memptr, size_t alignment, size_t size) { + if ((alignment & (alignment - 1)) != 0 || alignment == 0) { + return EINVAL; + } - if (alignment % sizeof(void*) != 0) { - return EINVAL; - } + if (alignment % sizeof(void *) != 0) { + return EINVAL; + } - *memptr = memalign(alignment, size); - if (*memptr == NULL) { - return errno; - } + *memptr = memalign(alignment, size); + if (*memptr == NULL) { + return errno; + } - return 0; + return 0; } #endif /* __ANDROID_API__ < 17 */ -// log2() and log2f() are C99 functions, but they're not available in the NDK __INTRODUCED_IN(18) +// log2() and log2f() are C99 functions, but they're not available in the NDK +// __INTRODUCED_IN(18) #if __ANDROID_API__ < 18 -double log2(double x) { - return (log(x) / M_LN2); -} +double log2(double x) { return (log(x) / M_LN2); } -float log2f(float x) { - return (float) log2((double) x); -} +float log2f(float x) { return (float)log2((double)x); } #endif /* __ANDROID_API__ < 18 */ #ifdef __cplusplus -}; /* end of extern "C" */ +}; /* end of extern "C" */ #endif \ No newline at end of file diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpeg_context.c b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpeg_context.c new file mode 100644 index 0000000..655cbc5 --- /dev/null +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpeg_context.c @@ -0,0 +1,182 @@ +#include "ffmpeg_context.h" + +FFmpegContext *saveFFmpegContext() { + FFmpegContext *context = (FFmpegContext *)av_mallocz(sizeof(FFmpegContext)); + + // cmdutils.c + context->sws_dict = sws_dict; + context->swr_opts = swr_opts; + context->format_opts = format_opts; + context->codec_opts = codec_opts; + context->hide_banner = hide_banner; +#if HAVE_COMMANDLINETOARGVW && defined(_WIN32) + /* Will be leaked on exit */ + context->win32_argv_utf8 = win32_argv_utf8; + context->win32_argc = win32_argc; +#endif + + // ffmpeg.c + context->vstats_file = vstats_file; + context->nb_output_dumped = nb_output_dumped; + context->current_time = current_time; + context->progress_avio = progress_avio; + context->input_files = input_files; + context->nb_input_files = nb_input_files; + context->output_files = output_files; + context->nb_output_files = nb_output_files; + context->filtergraphs = filtergraphs; + context->nb_filtergraphs = nb_filtergraphs; +#if HAVE_TERMIOS_H + /* init terminal so that we can grab keys */ + context->oldtty = oldtty; + context->restore_tty = restore_tty; +#endif + context->received_sigterm = received_sigterm; + context->received_nb_signals = received_nb_signals; + context->transcode_init_done = transcode_init_done; + context->ffmpeg_exited = ffmpeg_exited; + context->copy_ts_first_pts = copy_ts_first_pts; + + // ffmpeg_hw.c + context->nb_hw_devices = nb_hw_devices; + context->hw_devices = hw_devices; + + // ffmpeg_mux.c + context->want_sdp = want_sdp; + + // ffmpeg_mux_init.c + context->enc_stats_files = enc_stats_files; + context->nb_enc_stats_files = nb_enc_stats_files; + + // ffmpeg_opt.c + context->filter_hw_device = filter_hw_device; + context->vstats_filename = vstats_filename; + context->sdp_filename = sdp_filename; + context->audio_drift_threshold = audio_drift_threshold; + context->dts_delta_threshold = dts_delta_threshold; + context->dts_error_threshold = dts_error_threshold; + context->video_sync_method = video_sync_method; + context->frame_drop_threshold = frame_drop_threshold; + context->do_benchmark = do_benchmark; + context->do_benchmark_all = do_benchmark_all; + context->do_hex_dump = do_hex_dump; + context->do_pkt_dump = do_pkt_dump; + context->copy_ts = copy_ts; + context->start_at_zero = start_at_zero; + context->copy_tb = copy_tb; + context->debug_ts = debug_ts; + context->exit_on_error = exit_on_error; + context->abort_on_flags = abort_on_flags; + context->print_stats = print_stats; + context->stdin_interaction = stdin_interaction; + context->max_error_rate = max_error_rate; + context->filter_nbthreads = filter_nbthreads; + context->filter_complex_nbthreads = filter_complex_nbthreads; + context->vstats_version = vstats_version; + context->auto_conversion_filters = auto_conversion_filters; + context->stats_period = stats_period; + context->file_overwrite = file_overwrite; + context->no_file_overwrite = no_file_overwrite; +#if FFMPEG_OPT_PSNR + context->do_psnr = do_psnr; +#endif + context->ignore_unknown_streams = ignore_unknown_streams; + context->copy_unknown_streams = copy_unknown_streams; + context->recast_media = recast_media; + + // opt_common.c + context->report_file = report_file; + context->report_file_level = report_file_level; + context->warned_cfg = warned_cfg; + + return context; +} + +void loadFFmpegContext(FFmpegContext *context) { + + // cmdutils.c + sws_dict = context->sws_dict; + swr_opts = context->swr_opts; + format_opts = context->format_opts; + codec_opts = context->codec_opts; + hide_banner = context->hide_banner; +#if HAVE_COMMANDLINETOARGVW && defined(_WIN32) + /* Will be leaked on exit */ + win32_argv_utf8 = context->win32_argv_utf8; + win32_argc = context->win32_argc; +#endif + + // ffmpeg.c + vstats_file = context->vstats_file; + nb_output_dumped = context->nb_output_dumped; + current_time = context->current_time; + progress_avio = context->progress_avio; + input_files = context->input_files; + nb_input_files = context->nb_input_files; + output_files = context->output_files; + nb_output_files = context->nb_output_files; + filtergraphs = context->filtergraphs; + nb_filtergraphs = context->nb_filtergraphs; +#if HAVE_TERMIOS_H + /* init terminal so that we can grab keys */ + oldtty = context->oldtty; + restore_tty = context->restore_tty; +#endif + received_sigterm = context->received_sigterm; + received_nb_signals = context->received_nb_signals; + transcode_init_done = context->transcode_init_done; + ffmpeg_exited = context->ffmpeg_exited; + copy_ts_first_pts = context->copy_ts_first_pts; + + // ffmpeg_hw.c + nb_hw_devices = context->nb_hw_devices; + hw_devices = context->hw_devices; + + // ffmpeg_mux.c + want_sdp = context->want_sdp; + + // ffmpeg_mux_init.c + enc_stats_files = context->enc_stats_files; + nb_enc_stats_files = context->nb_enc_stats_files; + + // ffmpeg_opt.c + filter_hw_device = context->filter_hw_device; + vstats_filename = context->vstats_filename; + sdp_filename = context->sdp_filename; + audio_drift_threshold = context->audio_drift_threshold; + dts_delta_threshold = context->dts_delta_threshold; + dts_error_threshold = context->dts_error_threshold; + video_sync_method = context->video_sync_method; + frame_drop_threshold = context->frame_drop_threshold; + do_benchmark = context->do_benchmark; + do_benchmark_all = context->do_benchmark_all; + do_hex_dump = context->do_hex_dump; + do_pkt_dump = context->do_pkt_dump; + copy_ts = context->copy_ts; + start_at_zero = context->start_at_zero; + copy_tb = context->copy_tb; + debug_ts = context->debug_ts; + exit_on_error = context->exit_on_error; + abort_on_flags = context->abort_on_flags; + print_stats = context->print_stats; + stdin_interaction = context->stdin_interaction; + max_error_rate = context->max_error_rate; + filter_nbthreads = context->filter_nbthreads; + filter_complex_nbthreads = context->filter_complex_nbthreads; + vstats_version = context->vstats_version; + auto_conversion_filters = context->auto_conversion_filters; + stats_period = context->stats_period; + file_overwrite = context->file_overwrite; + no_file_overwrite = context->no_file_overwrite; +#if FFMPEG_OPT_PSNR + do_psnr = context->do_psnr; +#endif + ignore_unknown_streams = context->ignore_unknown_streams; + copy_unknown_streams = context->copy_unknown_streams; + recast_media = context->recast_media; + + // opt_common.c + report_file = context->report_file; + report_file_level = context->report_file_level; + warned_cfg = context->warned_cfg; +} diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpeg_context.h b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpeg_context.h new file mode 100644 index 0000000..8ec27ce --- /dev/null +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpeg_context.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2024 ARTHENICA LTD + * + * This file is part of FFmpegKit. + * + * FFmpegKit is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * FFmpegKit is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General License for more details. + * + * You should have received a copy of the GNU Lesser General License + * along with FFmpegKit. If not, see . + */ + +#ifndef FFMPEG_CONTEXT_H +#define FFMPEG_CONTEXT_H + +#if HAVE_TERMIOS_H +#include +#endif + +#include "fftools_ffmpeg.h" +#include "libavformat/avio.h" +#include "libavutil/dict.h" + +extern __thread BenchmarkTimeStamps current_time; +extern __thread struct termios oldtty; +extern __thread int restore_tty; +extern __thread volatile int received_sigterm; +extern __thread volatile int received_nb_signals; +extern __thread atomic_int transcode_init_done; +extern __thread volatile int ffmpeg_exited; +extern __thread int64_t copy_ts_first_pts; +extern __thread int nb_hw_devices; +extern __thread HWDevice **hw_devices; +extern __thread int want_sdp; +extern __thread struct EncStatsFile *enc_stats_files; +extern __thread int nb_enc_stats_files; +extern __thread float audio_drift_threshold; +extern __thread int file_overwrite; +extern __thread int no_file_overwrite; +extern __thread FILE *report_file; +extern __thread int report_file_level; +extern __thread int warned_cfg; + +typedef struct FFmpegContext { + + // cmdutils.c + AVDictionary *sws_dict; + AVDictionary *swr_opts; + AVDictionary *format_opts, *codec_opts; + int hide_banner; +#if HAVE_COMMANDLINETOARGVW && defined(_WIN32) + /* Will be leaked on exit */ + char **win32_argv_utf8; + int win32_argc; +#endif + + // ffmpeg.c + FILE *vstats_file; + unsigned nb_output_dumped; + BenchmarkTimeStamps current_time; + AVIOContext *progress_avio; + InputFile **input_files; + int nb_input_files; + OutputFile **output_files; + int nb_output_files; + FilterGraph **filtergraphs; + int nb_filtergraphs; +#if HAVE_TERMIOS_H + /* init terminal so that we can grab keys */ + struct termios oldtty; + int restore_tty; +#endif + volatile int received_sigterm; + volatile int received_nb_signals; + atomic_int transcode_init_done; + volatile int ffmpeg_exited; + int64_t copy_ts_first_pts; + + // ffmpeg_hw.c + int nb_hw_devices; + HWDevice **hw_devices; + + // ffmpeg_mux.c + int want_sdp; + + // ffmpeg_mux_init.c + EncStatsFile *enc_stats_files; + int nb_enc_stats_files; + + // ffmpeg_opt.c + HWDevice *filter_hw_device; + char *vstats_filename; + char *sdp_filename; + float audio_drift_threshold; + float dts_delta_threshold; + float dts_error_threshold; + enum VideoSyncMethod video_sync_method; + float frame_drop_threshold; + int do_benchmark; + int do_benchmark_all; + int do_hex_dump; + int do_pkt_dump; + int copy_ts; + int start_at_zero; + int copy_tb; + int debug_ts; + int exit_on_error; + int abort_on_flags; + int print_stats; + int stdin_interaction; + float max_error_rate; + char *filter_nbthreads; + int filter_complex_nbthreads; + int vstats_version; + int auto_conversion_filters; + int64_t stats_period; + int file_overwrite; + int no_file_overwrite; +#if FFMPEG_OPT_PSNR + int do_psnr; +#endif + int ignore_unknown_streams; + int copy_unknown_streams; + int recast_media; + + // opt_common.c + FILE *report_file; + int report_file_level; + int warned_cfg; + + void *arg; + +} FFmpegContext; + +FFmpegContext *saveFFmpegContext(); +void loadFFmpegContext(FFmpegContext *context); + +#endif // FFMPEG_CONTEXT_H \ No newline at end of file diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.c b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.c index 0d7da37..acd84ae 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.c @@ -20,37 +20,37 @@ #include #include -#include #include +#include #include "config.h" +#include "ffmpegkit.h" +#include "ffprobekit.h" +#include "fftools_ffmpeg.h" #include "libavcodec/jni.h" #include "libavutil/bprint.h" #include "libavutil/file.h" -#include "fftools_ffmpeg.h" -#include "ffmpegkit.h" -#include "ffprobekit.h" -# define LogType 1 -# define StatisticsType 2 +#define LogType 1 +#define StatisticsType 2 /** Callback data structure */ struct CallbackData { - int type; // 1 (log callback) or 2 (statistics callback) - long sessionId; // session identifier + int type; // 1 (log callback) or 2 (statistics callback) + long sessionId; // session identifier - int logLevel; // log level - AVBPrint logData; // log data + int logLevel; // log level + AVBPrint logData; // log data - int statisticsFrameNumber; // statistics frame number - float statisticsFps; // statistics fps - float statisticsQuality; // statistics quality - int64_t statisticsSize; // statistics size - double statisticsTime; // statistics time - double statisticsBitrate; // statistics bitrate - double statisticsSpeed; // statistics speed + int statisticsFrameNumber; // statistics frame number + float statisticsFps; // statistics fps + float statisticsQuality; // statistics quality + int64_t statisticsSize; // statistics size + double statisticsTime; // statistics time + double statisticsBitrate; // statistics bitrate + double statisticsSpeed; // statistics speed - struct CallbackData *next; + struct CallbackData *next; }; /** Session control variables */ @@ -114,22 +114,41 @@ int configuredLogLevel = AV_LOG_INFO; /** Prototypes of native functions defined by Config class. */ JNINativeMethod configMethods[] = { - {"enableNativeRedirection", "()V", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_enableNativeRedirection}, - {"disableNativeRedirection", "()V", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_disableNativeRedirection}, - {"setNativeLogLevel", "(I)V", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeLogLevel}, - {"getNativeLogLevel", "()I", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeLogLevel}, - {"getNativeFFmpegVersion", "()Ljava/lang/String;", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeFFmpegVersion}, - {"getNativeVersion", "()Ljava/lang/String;", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeVersion}, - {"getNativePackageName", "()Ljava/lang/String;", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativePackageName}, - {"nativeFFmpegExecute", "(J[Ljava/lang/String;)I", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegExecute}, - {"nativeFFmpegCancel", "(J)V", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegCancel}, - {"nativeFFprobeExecute", "(J[Ljava/lang/String;)I", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprobeExecute}, - {"registerNewNativeFFmpegPipe", "(Ljava/lang/String;)I", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_registerNewNativeFFmpegPipe}, - {"getNativeBuildDate", "()Ljava/lang/String;", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeBuildDate}, - {"setNativeEnvironmentVariable", "(Ljava/lang/String;Ljava/lang/String;)I", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeEnvironmentVariable}, - {"ignoreNativeSignal", "(I)V", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_ignoreNativeSignal}, - {"messagesInTransmit", "(J)I", (void*) Java_com_arthenica_ffmpegkit_FFmpegKitConfig_messagesInTransmit} -}; + {"enableNativeRedirection", "()V", + (void *) + Java_com_arthenica_ffmpegkit_FFmpegKitConfig_enableNativeRedirection}, + {"disableNativeRedirection", "()V", + (void *) + Java_com_arthenica_ffmpegkit_FFmpegKitConfig_disableNativeRedirection}, + {"setNativeLogLevel", "(I)V", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeLogLevel}, + {"getNativeLogLevel", "()I", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeLogLevel}, + {"getNativeFFmpegVersion", "()Ljava/lang/String;", + (void *) + Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeFFmpegVersion}, + {"getNativeVersion", "()Ljava/lang/String;", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeVersion}, + {"getNativePackageName", "()Ljava/lang/String;", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativePackageName}, + {"nativeFFmpegExecute", "(J[Ljava/lang/String;)I", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegExecute}, + {"nativeFFmpegCancel", "(J)V", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegCancel}, + {"nativeFFprobeExecute", "(J[Ljava/lang/String;)I", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprobeExecute}, + {"registerNewNativeFFmpegPipe", "(Ljava/lang/String;)I", + (void *) + Java_com_arthenica_ffmpegkit_FFmpegKitConfig_registerNewNativeFFmpegPipe}, + {"getNativeBuildDate", "()Ljava/lang/String;", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeBuildDate}, + {"setNativeEnvironmentVariable", "(Ljava/lang/String;Ljava/lang/String;)I", + (void *) + Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeEnvironmentVariable}, + {"ignoreNativeSignal", "(I)V", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_ignoreNativeSignal}, + {"messagesInTransmit", "(J)I", + (void *)Java_com_arthenica_ffmpegkit_FFmpegKitConfig_messagesInTransmit}}; /** Forward declaration for function defined in fftools_ffmpeg.c */ int ffmpeg_execute(int argc, char **argv); @@ -159,42 +178,45 @@ static const char *avutil_log_get_level_str(int level) { } } -static void avutil_log_format_line(void *avcl, int level, const char *fmt, va_list vl, AVBPrint part[4], int *print_prefix) { +static void avutil_log_format_line(void *avcl, int level, const char *fmt, + va_list vl, AVBPrint part[4], + int *print_prefix) { int flags = av_log_get_flags(); - AVClass* avc = avcl ? *(AVClass **) avcl : NULL; - av_bprint_init(part+0, 0, 1); - av_bprint_init(part+1, 0, 1); - av_bprint_init(part+2, 0, 1); - av_bprint_init(part+3, 0, 65536); + AVClass *avc = avcl ? *(AVClass **)avcl : NULL; + av_bprint_init(part + 0, 0, 1); + av_bprint_init(part + 1, 0, 1); + av_bprint_init(part + 2, 0, 1); + av_bprint_init(part + 3, 0, 65536); if (*print_prefix && avc) { if (avc->parent_log_context_offset) { - AVClass** parent = *(AVClass ***) (((uint8_t *) avcl) + - avc->parent_log_context_offset); + AVClass **parent = *(AVClass ***)(((uint8_t *)avcl) + + avc->parent_log_context_offset); if (parent && *parent) { - av_bprintf(part+0, "[%s @ %p] ", - (*parent)->item_name(parent), parent); + av_bprintf(part + 0, "[%s @ %p] ", (*parent)->item_name(parent), + parent); } } - av_bprintf(part+1, "[%s @ %p] ", - avc->item_name(avcl), avcl); + av_bprintf(part + 1, "[%s @ %p] ", avc->item_name(avcl), avcl); } if (*print_prefix && (level > AV_LOG_QUIET) && (flags & AV_LOG_PRINT_LEVEL)) - av_bprintf(part+2, "[%s] ", avutil_log_get_level_str(level)); + av_bprintf(part + 2, "[%s] ", avutil_log_get_level_str(level)); - av_vbprintf(part+3, fmt, vl); + av_vbprintf(part + 3, fmt, vl); - if(*part[0].str || *part[1].str || *part[2].str || *part[3].str) { - char lastc = part[3].len && part[3].len <= part[3].size ? part[3].str[part[3].len - 1] : 0; + if (*part[0].str || *part[1].str || *part[2].str || *part[3].str) { + char lastc = part[3].len && part[3].len <= part[3].size + ? part[3].str[part[3].len - 1] + : 0; *print_prefix = lastc == '\n' || lastc == '\r'; } } static void avutil_log_sanitize(uint8_t *line) { - while(*line){ - if(*line < 0x08 || (*line > 0x0D && *line < 0x20)) - *line='?'; + while (*line) { + if (*line < 0x08 || (*line > 0x0D && *line < 0x20)) + *line = '?'; line++; } } @@ -224,22 +246,16 @@ void monitorInit() { pthread_condattr_destroy(&cattributes); } -void mutexUnInit() { - pthread_mutex_destroy(&lockMutex); -} +void mutexUnInit() { pthread_mutex_destroy(&lockMutex); } void monitorUnInit() { pthread_mutex_destroy(&monitorMutex); pthread_cond_destroy(&monitorCondition); } -void mutexLock() { - pthread_mutex_lock(&lockMutex); -} +void mutexLock() { pthread_mutex_lock(&lockMutex); } -void mutexUnlock() { - pthread_mutex_unlock(&lockMutex); -} +void mutexUnlock() { pthread_mutex_unlock(&lockMutex); } void monitorWait(int milliSeconds) { struct timeval tp; @@ -251,10 +267,10 @@ void monitorWait(int milliSeconds) { return; } - ts.tv_sec = tp.tv_sec; + ts.tv_sec = tp.tv_sec; ts.tv_nsec = tp.tv_usec * 1000; ts.tv_sec += milliSeconds / 1000; - ts.tv_nsec += (milliSeconds % 1000)*1000000; + ts.tv_nsec += (milliSeconds % 1000) * 1000000; ts.tv_sec += ts.tv_nsec / 1000000000L; ts.tv_nsec = ts.tv_nsec % 1000000000L; @@ -278,7 +294,8 @@ void monitorNotify() { void logCallbackDataAdd(int level, AVBPrint *data) { // CREATE DATA STRUCT FIRST - struct CallbackData *newData = (struct CallbackData*)av_malloc(sizeof(struct CallbackData)); + struct CallbackData *newData = + (struct CallbackData *)av_malloc(sizeof(struct CallbackData)); newData->type = LogType; newData->sessionId = globalSessionId; newData->logLevel = level; @@ -293,7 +310,8 @@ void logCallbackDataAdd(int level, AVBPrint *data) { callbackDataTail = newData; if (callbackDataHead != NULL) { - LOGE("Dangling callback data head detected. This can cause memory leak."); + LOGE("Dangling callback data head detected. This can cause memory " + "leak."); } else { callbackDataHead = newData; } @@ -308,16 +326,21 @@ void logCallbackDataAdd(int level, AVBPrint *data) { monitorNotify(); - atomic_fetch_add(&sessionInTransitMessageCountMap[globalSessionId % SESSION_MAP_SIZE], 1); + atomic_fetch_add( + &sessionInTransitMessageCountMap[globalSessionId % SESSION_MAP_SIZE], + 1); } /** * Adds statistics data to the end of callback data list. */ -void statisticsCallbackDataAdd(int frameNumber, float fps, float quality, int64_t size, double time, double bitrate, double speed) { +void statisticsCallbackDataAdd(int frameNumber, float fps, float quality, + int64_t size, double time, double bitrate, + double speed) { // CREATE DATA STRUCT FIRST - struct CallbackData *newData = (struct CallbackData*)av_malloc(sizeof(struct CallbackData)); + struct CallbackData *newData = + (struct CallbackData *)av_malloc(sizeof(struct CallbackData)); newData->type = StatisticsType; newData->sessionId = globalSessionId; newData->statisticsFrameNumber = frameNumber; @@ -337,7 +360,8 @@ void statisticsCallbackDataAdd(int frameNumber, float fps, float quality, int64_ callbackDataTail = newData; if (callbackDataHead != NULL) { - LOGE("Dangling callback data head detected. This can cause memory leak."); + LOGE("Dangling callback data head detected. This can cause memory " + "leak."); } else { callbackDataHead = newData; } @@ -352,7 +376,9 @@ void statisticsCallbackDataAdd(int frameNumber, float fps, float quality, int64_ monitorNotify(); - atomic_fetch_add(&sessionInTransitMessageCountMap[globalSessionId % SESSION_MAP_SIZE], 1); + atomic_fetch_add( + &sessionInTransitMessageCountMap[globalSessionId % SESSION_MAP_SIZE], + 1); } /** @@ -380,7 +406,9 @@ struct CallbackData *callbackDataRemove() { struct CallbackData *nextHead = currentData->next; if (nextHead == NULL) { if (callbackDataHead != callbackDataTail) { - LOGE("Head and tail callback data pointers do not match for single callback data element. This can cause memory leak."); + LOGE("Head and tail callback data pointers do not match for " + "single callback data element. This can cause memory " + "leak."); } else { callbackDataTail = NULL; } @@ -415,7 +443,8 @@ void cancelSession(long id) { } /** - * Checks whether a cancel request for the given session id exists in the session map. + * Checks whether a cancel request for the given session id exists in the + * session map. * * @param id session id * @return 1 if exists, false otherwise @@ -445,7 +474,8 @@ void resetMessagesInTransmit(long id) { * @param format format string * @param vargs arguments */ -void ffmpegkit_log_callback_function(void *ptr, int level, const char* format, va_list vargs) { +void ffmpegkit_log_callback_function(void *ptr, int level, const char *format, + va_list vargs) { AVBPrint fullLine; AVBPrint part[4]; int print_prefix = 1; @@ -456,7 +486,8 @@ void ffmpegkit_log_callback_function(void *ptr, int level, const char* format, v int activeLogLevel = av_log_get_level(); // AV_LOG_STDERR logs are always redirected - if ((activeLogLevel == AV_LOG_QUIET && level != AV_LOG_STDERR) || (level > activeLogLevel)) { + if ((activeLogLevel == AV_LOG_QUIET && level != AV_LOG_STDERR) || + (level > activeLogLevel)) { return; } @@ -469,16 +500,17 @@ void ffmpegkit_log_callback_function(void *ptr, int level, const char* format, v avutil_log_sanitize(part[3].str); // COMBINE ALL 4 LOG PARTS - av_bprintf(&fullLine, "%s%s%s%s", part[0].str, part[1].str, part[2].str, part[3].str); + av_bprintf(&fullLine, "%s%s%s%s", part[0].str, part[1].str, part[2].str, + part[3].str); if (fullLine.len > 0) { logCallbackDataAdd(level, &fullLine); } av_bprint_finalize(part, NULL); - av_bprint_finalize(part+1, NULL); - av_bprint_finalize(part+2, NULL); - av_bprint_finalize(part+3, NULL); + av_bprint_finalize(part + 1, NULL); + av_bprint_finalize(part + 2, NULL); + av_bprint_finalize(part + 3, NULL); av_bprint_finalize(&fullLine, NULL); } @@ -493,8 +525,12 @@ void ffmpegkit_log_callback_function(void *ptr, int level, const char* format, v * @param bitrate output bit rate in kbits/s * @param speed processing speed = processed duration / operation duration */ -void ffmpegkit_statistics_callback_function(int frameNumber, float fps, float quality, int64_t size, double time, double bitrate, double speed) { - statisticsCallbackDataAdd(frameNumber, fps, quality, size, time, bitrate, speed); +void ffmpegkit_statistics_callback_function(int frameNumber, float fps, + float quality, int64_t size, + double time, double bitrate, + double speed) { + statisticsCallbackDataAdd(frameNumber, fps, quality, size, time, bitrate, + speed); } /** @@ -502,22 +538,26 @@ void ffmpegkit_statistics_callback_function(int frameNumber, float fps, float qu */ void *callbackThreadFunction() { JNIEnv *env; - jint getEnvRc = (*globalVm)->GetEnv(globalVm, (void**) &env, JNI_VERSION_1_6); + jint getEnvRc = + (*globalVm)->GetEnv(globalVm, (void **)&env, JNI_VERSION_1_6); if (getEnvRc != JNI_OK) { if (getEnvRc != JNI_EDETACHED) { - LOGE("Callback thread failed to GetEnv for class %s with rc %d.\n", configClassName, getEnvRc); + LOGE("Callback thread failed to GetEnv for class %s with rc %d.\n", + configClassName, getEnvRc); return NULL; } if ((*globalVm)->AttachCurrentThread(globalVm, &env, NULL) != 0) { - LOGE("Callback thread failed to AttachCurrentThread for class %s.\n", configClassName); + LOGE( + "Callback thread failed to AttachCurrentThread for class %s.\n", + configClassName); return NULL; } } LOGD("Async callback block started.\n"); - while(redirectionEnabled) { + while (redirectionEnabled) { struct CallbackData *callbackData = callbackDataRemove(); if (callbackData != NULL) { @@ -527,9 +567,13 @@ void *callbackThreadFunction() { int size = callbackData->logData.len; - jbyteArray byteArray = (jbyteArray) (*env)->NewByteArray(env, size); - (*env)->SetByteArrayRegion(env, byteArray, 0, size, callbackData->logData.str); - (*env)->CallStaticVoidMethod(env, configClass, logMethod, (jlong) callbackData->sessionId, callbackData->logLevel, byteArray); + jbyteArray byteArray = + (jbyteArray)(*env)->NewByteArray(env, size); + (*env)->SetByteArrayRegion(env, byteArray, 0, size, + callbackData->logData.str); + (*env)->CallStaticVoidMethod(env, configClass, logMethod, + (jlong)callbackData->sessionId, + callbackData->logLevel, byteArray); (*env)->DeleteLocalRef(env, byteArray); // CLEAN LOG DATA @@ -539,15 +583,21 @@ void *callbackThreadFunction() { // STATISTICS CALLBACK - (*env)->CallStaticVoidMethod(env, configClass, statisticsMethod, - (jlong) callbackData->sessionId, callbackData->statisticsFrameNumber, - callbackData->statisticsFps, callbackData->statisticsQuality, + (*env)->CallStaticVoidMethod( + env, configClass, statisticsMethod, + (jlong)callbackData->sessionId, + callbackData->statisticsFrameNumber, + callbackData->statisticsFps, + callbackData->statisticsQuality, callbackData->statisticsSize, callbackData->statisticsTime, - callbackData->statisticsBitrate, callbackData->statisticsSpeed); - + callbackData->statisticsBitrate, + callbackData->statisticsSpeed); } - atomic_fetch_sub(&sessionInTransitMessageCountMap[callbackData->sessionId % SESSION_MAP_SIZE], 1); + atomic_fetch_sub( + &sessionInTransitMessageCountMap[callbackData->sessionId % + SESSION_MAP_SIZE], + 1); // CLEAN STRUCT callbackData->next = NULL; @@ -566,57 +616,67 @@ void *callbackThreadFunction() { } /** - * Used by saf protocol; If it is called from a Java thread, we don't need attach/detach. - * However it can be called from other threads as well (as it happens for concat demuxer), - * in that case we perform attach & detach. - * Returns file descriptor created for this SAF id or 0 if an error occurs. + * Used by saf protocol; If it is called from a Java thread, we don't need + * attach/detach. However it can be called from other threads as well (as it + * happens for concat demuxer), in that case we perform attach & detach. Returns + * file descriptor created for this SAF id or 0 if an error occurs. */ int saf_open(int safId) { JNIEnv *env = NULL; bool attached = false; - jint getEnvRc = (*globalVm)->GetEnv(globalVm, (void**) &env, JNI_VERSION_1_6); + jint getEnvRc = + (*globalVm)->GetEnv(globalVm, (void **)&env, JNI_VERSION_1_6); if (getEnvRc != JNI_OK) { if (getEnvRc != JNI_EDETACHED) { - LOGE("saf_open failed to GetEnv for class %s with rc %d.\n", configClassName, getEnvRc); + LOGE("saf_open failed to GetEnv for class %s with rc %d.\n", + configClassName, getEnvRc); return 0; } if ((*globalVm)->AttachCurrentThread(globalVm, &env, NULL) != 0) { - LOGE("saf_open failed to AttachCurrentThread for class %s.\n", configClassName); + LOGE("saf_open failed to AttachCurrentThread for class %s.\n", + configClassName); return 0; } else { - attached = true; + attached = true; } } - int result = (*env)->CallStaticIntMethod(env, configClass, safOpenMethod, safId); - if (attached) (*globalVm)->DetachCurrentThread(globalVm); + int result = + (*env)->CallStaticIntMethod(env, configClass, safOpenMethod, safId); + if (attached) + (*globalVm)->DetachCurrentThread(globalVm); return result; } /** - * Used by saf protocol; If it is called from a Java thread, we don't need attach/detach. - * However it can be called from other threads as well (as it happens for concat demuxer), - * in that case we perform attach & detach. - * Returns 1 if the given file descriptor is closed successfully, 0 if an error occurs. + * Used by saf protocol; If it is called from a Java thread, we don't need + * attach/detach. However it can be called from other threads as well (as it + * happens for concat demuxer), in that case we perform attach & detach. Returns + * 1 if the given file descriptor is closed successfully, 0 if an error occurs. */ int saf_close(int fd) { JNIEnv *env = NULL; bool attached = false; - jint getEnvRc = (*globalVm)->GetEnv(globalVm, (void**) &env, JNI_VERSION_1_6); + jint getEnvRc = + (*globalVm)->GetEnv(globalVm, (void **)&env, JNI_VERSION_1_6); if (getEnvRc != JNI_OK) { if (getEnvRc != JNI_EDETACHED) { - LOGE("saf_close failed to GetEnv for class %s with rc %d.\n", configClassName, getEnvRc); + LOGE("saf_close failed to GetEnv for class %s with rc %d.\n", + configClassName, getEnvRc); return 0; } if ((*globalVm)->AttachCurrentThread(globalVm, &env, NULL) != 0) { - LOGE("saf_close failed to AttachCurrentThread for class %s.\n", configClassName); + LOGE("saf_close failed to AttachCurrentThread for class %s.\n", + configClassName); return 0; } else { - attached = true; + attached = true; } } - int result = (*env)->CallStaticIntMethod(env, configClass, safCloseMethod, fd); - if (attached) (*globalVm)->DetachCurrentThread(globalVm); + int result = + (*env)->CallStaticIntMethod(env, configClass, safCloseMethod, fd); + if (attached) + (*globalVm)->DetachCurrentThread(globalVm); return result; } @@ -653,7 +713,7 @@ static void enableNativeRedirection() { */ jint JNI_OnLoad(JavaVM *vm, void *reserved) { JNIEnv *env; - if ((*vm)->GetEnv(vm, (void**)(&env), JNI_VERSION_1_6) != JNI_OK) { + if ((*vm)->GetEnv(vm, (void **)(&env), JNI_VERSION_1_6) != JNI_OK) { LOGE("OnLoad failed to GetEnv for class %s.\n", configClassName); return JNI_FALSE; } @@ -665,7 +725,8 @@ jint JNI_OnLoad(JavaVM *vm, void *reserved) { } if ((*env)->RegisterNatives(env, localConfigClass, configMethods, 15) < 0) { - LOGE("OnLoad failed to RegisterNatives for class %s.\n", configClassName); + LOGE("OnLoad failed to RegisterNatives for class %s.\n", + configClassName); return JNI_FALSE; } @@ -677,31 +738,37 @@ jint JNI_OnLoad(JavaVM *vm, void *reserved) { (*env)->GetJavaVM(env, &globalVm); - logMethod = (*env)->GetStaticMethodID(env, localConfigClass, "log", "(JI[B)V"); + logMethod = + (*env)->GetStaticMethodID(env, localConfigClass, "log", "(JI[B)V"); if (logMethod == NULL) { LOGE("OnLoad thread failed to GetStaticMethodID for %s.\n", "log"); return JNI_FALSE; } - statisticsMethod = (*env)->GetStaticMethodID(env, localConfigClass, "statistics", "(JIFFJDDD)V"); + statisticsMethod = (*env)->GetStaticMethodID(env, localConfigClass, + "statistics", "(JIFFJDDD)V"); if (statisticsMethod == NULL) { - LOGE("OnLoad thread failed to GetStaticMethodID for %s.\n", "statistics"); + LOGE("OnLoad thread failed to GetStaticMethodID for %s.\n", + "statistics"); return JNI_FALSE; } - safOpenMethod = (*env)->GetStaticMethodID(env, localConfigClass, "safOpen", "(I)I"); + safOpenMethod = + (*env)->GetStaticMethodID(env, localConfigClass, "safOpen", "(I)I"); if (safOpenMethod == NULL) { LOGE("OnLoad thread failed to GetStaticMethodID for %s.\n", "safOpen"); return JNI_FALSE; } - safCloseMethod = (*env)->GetStaticMethodID(env, localConfigClass, "safClose", "(I)I"); + safCloseMethod = + (*env)->GetStaticMethodID(env, localConfigClass, "safClose", "(I)I"); if (safCloseMethod == NULL) { LOGE("OnLoad thread failed to GetStaticMethodID for %s.\n", "safClose"); return JNI_FALSE; } - stringConstructor = (*env)->GetMethodID(env, localStringClass, "", "([BLjava/lang/String;)V"); + stringConstructor = (*env)->GetMethodID(env, localStringClass, "", + "([BLjava/lang/String;)V"); if (stringConstructor == NULL) { LOGE("OnLoad thread failed to GetMethodID for %s.\n", ""); return JNI_FALSE; @@ -709,13 +776,13 @@ jint JNI_OnLoad(JavaVM *vm, void *reserved) { av_jni_set_java_vm(vm, NULL); - configClass = (jclass) ((*env)->NewGlobalRef(env, localConfigClass)); - stringClass = (jclass) ((*env)->NewGlobalRef(env, localStringClass)); + configClass = (jclass)((*env)->NewGlobalRef(env, localConfigClass)); + stringClass = (jclass)((*env)->NewGlobalRef(env, localStringClass)); callbackDataHead = NULL; callbackDataTail = NULL; - - for(int i = 0; iNewStringUTF(env, FFMPEG_VERSION); } @@ -808,7 +886,9 @@ JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNative * @param object reference to the class on which this method is invoked * @return FFmpegKit version string */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeVersion(JNIEnv *env, jclass object) { +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeVersion(JNIEnv *env, + jclass object) { return (*env)->NewStringUTF(env, FFMPEG_KIT_VERSION); } @@ -819,7 +899,9 @@ JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNative * @param object reference to the class on which this method is invoked * @return native FFmpegKit package name */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativePackageName(JNIEnv *env, jclass object) { +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativePackageName( + JNIEnv *env, jclass object) { #ifdef FFMPEG_KIT_PACKAGE return (*env)->NewStringUTF(env, FFMPEG_KIT_PACKAGE); #else @@ -836,7 +918,9 @@ JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNative * @param stringArray reference to the object holding FFmpeg command arguments * @return zero on successful execution, non-zero on error */ -JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegExecute(JNIEnv *env, jclass object, jlong id, jobjectArray stringArray) { +JNIEXPORT jint JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegExecute( + JNIEnv *env, jclass object, jlong id, jobjectArray stringArray) { jstring *tempArray = NULL; int argumentCount = 1; char **argv = NULL; @@ -848,30 +932,33 @@ JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpeg int programArgumentCount = (*env)->GetArrayLength(env, stringArray); argumentCount = programArgumentCount + 1; - tempArray = (jstring *) av_malloc(sizeof(jstring) * programArgumentCount); + tempArray = + (jstring *)av_malloc(sizeof(jstring) * programArgumentCount); } /* PRESERVE USAGE FORMAT * * ffmpeg */ - argv = (char **)av_malloc(sizeof(char*) * (argumentCount)); + argv = (char **)av_malloc(sizeof(char *) * (argumentCount)); argv[0] = (char *)av_malloc(sizeof(char) * (strlen(LIB_NAME) + 1)); strcpy(argv[0], LIB_NAME); // PREPARE ARRAY ELEMENTS if (stringArray) { for (int i = 0; i < (argumentCount - 1); i++) { - tempArray[i] = (jstring) (*env)->GetObjectArrayElement(env, stringArray, i); + tempArray[i] = + (jstring)(*env)->GetObjectArrayElement(env, stringArray, i); if (tempArray[i] != NULL) { - argv[i + 1] = (char *) (*env)->GetStringUTFChars(env, tempArray[i], 0); + argv[i + 1] = + (char *)(*env)->GetStringUTFChars(env, tempArray[i], 0); } } } // REGISTER THE ID BEFORE STARTING THE SESSION - globalSessionId = (long) id; - addSession((long) id); + globalSessionId = (long)id; + addSession((long)id); resetMessagesInTransmit(globalSessionId); @@ -879,7 +966,7 @@ JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpeg int returnCode = ffmpeg_execute(argumentCount, argv); // ALWAYS REMOVE THE ID FROM THE MAP - removeSession((long) id); + removeSession((long)id); // CLEANUP if (tempArray) { @@ -902,7 +989,10 @@ JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpeg * @param object reference to the class on which this method is invoked * @param id session id */ -JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegCancel(JNIEnv *env, jclass object, jlong id) { +JNIEXPORT void JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegCancel(JNIEnv *env, + jclass object, + jlong id) { cancel_operation(id); } @@ -914,8 +1004,11 @@ JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpeg * @param ffmpegPipePath full path of ffmpeg pipe * @return zero on successful creation, non-zero on error */ -JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_registerNewNativeFFmpegPipe(JNIEnv *env, jclass object, jstring ffmpegPipePath) { - const char *ffmpegPipePathString = (*env)->GetStringUTFChars(env, ffmpegPipePath, 0); +JNIEXPORT int JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_registerNewNativeFFmpegPipe( + JNIEnv *env, jclass object, jstring ffmpegPipePath) { + const char *ffmpegPipePathString = + (*env)->GetStringUTFChars(env, ffmpegPipePath, 0); return mkfifo(ffmpegPipePathString, S_IRWXU | S_IRWXG | S_IROTH); } @@ -927,7 +1020,9 @@ JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_registerNewNa * @param object reference to the class on which this method is invoked * @return FFmpegKit library build date */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeBuildDate(JNIEnv *env, jclass object) { +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeBuildDate(JNIEnv *env, + jclass object) { char buildDate[10]; sprintf(buildDate, "%d", FFMPEG_KIT_BUILD_DATE); return (*env)->NewStringUTF(env, buildDate); @@ -942,9 +1037,13 @@ JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNative * @param variableValue environment variable value * @return zero on success, non-zero on error */ -JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeEnvironmentVariable(JNIEnv *env, jclass object, jstring variableName, jstring variableValue) { - const char *variableNameString = (*env)->GetStringUTFChars(env, variableName, 0); - const char *variableValueString = (*env)->GetStringUTFChars(env, variableValue, 0); +JNIEXPORT int JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeEnvironmentVariable( + JNIEnv *env, jclass object, jstring variableName, jstring variableValue) { + const char *variableNameString = + (*env)->GetStringUTFChars(env, variableName, 0); + const char *variableValueString = + (*env)->GetStringUTFChars(env, variableValue, 0); int rc = setenv(variableNameString, variableValueString, 1); @@ -954,13 +1053,17 @@ JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeEnvi } /** - * Registers a new ignored signal. Ignored signals are not handled by the library. + * Registers a new ignored signal. Ignored signals are not handled by the + * library. * * @param env pointer to native method interface * @param object reference to the class on which this method is invoked * @param signum signal number */ -JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_ignoreNativeSignal(JNIEnv *env, jclass object, jint signum) { +JNIEXPORT void JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_ignoreNativeSignal(JNIEnv *env, + jclass object, + jint signum) { if (signum == SIGQUIT) { handleSIGQUIT = 0; } else if (signum == SIGINT) { @@ -975,13 +1078,16 @@ JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_ignoreNative } /** - * Returns the number of native messages which are not transmitted to the Java callbacks for the - * given session. + * Returns the number of native messages which are not transmitted to the Java + * callbacks for the given session. * * @param env pointer to native method interface * @param object reference to the class on which this method is invoked * @param id session id */ -JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_messagesInTransmit(JNIEnv *env, jclass object, jlong id) { +JNIEXPORT int JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_messagesInTransmit(JNIEnv *env, + jclass object, + jlong id) { return atomic_load(&sessionInTransitMessageCountMap[id % SESSION_MAP_SIZE]); } diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.h b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.h index b0dfc9c..f217b45 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.h +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit.h @@ -20,20 +20,22 @@ #ifndef FFMPEG_KIT_H #define FFMPEG_KIT_H -#include #include +#include +#include -#include "libavutil/log.h" #include "libavutil/ffversion.h" +#include "libavutil/log.h" /** Library version string */ -#define FFMPEG_KIT_VERSION "6.0" +#define FFMPEG_KIT_VERSION "6.1.2" /** Defines tag used for Android logging. */ #define LIB_NAME "ffmpeg-kit" /** Verbose Android logging macro. */ -#define LOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, LIB_NAME, __VA_ARGS__) +#define LOGV(...) \ + __android_log_print(ANDROID_LOG_VERBOSE, LIB_NAME, __VA_ARGS__) /** Debug Android logging macro. */ #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LIB_NAME, __VA_ARGS__) @@ -52,97 +54,127 @@ * Method: enableNativeRedirection * Signature: ()V */ -JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_enableNativeRedirection(JNIEnv *, jclass); +JNIEXPORT void JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_enableNativeRedirection(JNIEnv *, + jclass); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: disableNativeRedirection * Signature: ()V */ -JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_disableNativeRedirection(JNIEnv *, jclass); +JNIEXPORT void JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_disableNativeRedirection(JNIEnv *, + jclass); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: setNativeLogLevel * Signature: (I)V */ -JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeLogLevel(JNIEnv *, jclass, jint); +JNIEXPORT void JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeLogLevel(JNIEnv *, jclass, + jint); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: getNativeLogLevel * Signature: ()I */ -JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeLogLevel(JNIEnv *, jclass); +JNIEXPORT jint JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeLogLevel(JNIEnv *, + jclass); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: getNativeFFmpegVersion * Signature: ()Ljava/lang/String; */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeFFmpegVersion(JNIEnv *, jclass); +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeFFmpegVersion(JNIEnv *, + jclass); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: getNativeVersion * Signature: ()Ljava/lang/String; */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeVersion(JNIEnv *, jclass); +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeVersion(JNIEnv *, jclass); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: getNativePackageName * Signature: ()Ljava/lang/String; */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativePackageName(JNIEnv *, jclass); +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativePackageName(JNIEnv *, + jclass); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: nativeFFmpegExecute * Signature: (J[Ljava/lang/String;)I */ -JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegExecute(JNIEnv *, jclass, jlong, jobjectArray); +JNIEXPORT jint JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegExecute(JNIEnv *, + jclass, jlong, + jobjectArray); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: nativeFFmpegCancel * Signature: (J)V */ -JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegCancel(JNIEnv *, jclass, jlong); +JNIEXPORT void JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFmpegCancel(JNIEnv *, + jclass, jlong); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: registerNewNativeFFmpegPipe * Signature: (Ljava/lang/String;)I */ -JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_registerNewNativeFFmpegPipe(JNIEnv *env, jclass object, jstring ffmpegPipePath); +JNIEXPORT int JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_registerNewNativeFFmpegPipe( + JNIEnv *env, jclass object, jstring ffmpegPipePath); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: getNativeBuildDate * Signature: ()Ljava/lang/String; */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeBuildDate(JNIEnv *env, jclass object); +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeBuildDate(JNIEnv *env, + jclass object); /** * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: setNativeEnvironmentVariable * Signature: (Ljava/lang/String;Ljava/lang/String;)I */ -JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeEnvironmentVariable(JNIEnv *env, jclass object, jstring variableName, jstring variableValue); +JNIEXPORT int JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_setNativeEnvironmentVariable( + JNIEnv *env, jclass object, jstring variableName, jstring variableValue); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: ignoreNativeSignal * Signature: (I)V */ -JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_ignoreNativeSignal(JNIEnv *env, jclass object, jint signum); +JNIEXPORT void JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_ignoreNativeSignal(JNIEnv *env, + jclass object, + jint signum); /* * Class: com_arthenica_ffmpegkit_FFmpegKitConfig * Method: messagesInTransmit * Signature: (J)I */ -JNIEXPORT int JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_messagesInTransmit(JNIEnv *env, jclass object, jlong id); +JNIEXPORT int JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_messagesInTransmit(JNIEnv *env, + jclass object, + jlong id); #endif /* FFMPEG_KIT_H */ \ No newline at end of file diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.c b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.c index 698527e..b43a249 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.c @@ -17,20 +17,23 @@ * along with FFmpegKit. If not, see . */ +#include "ffmpegkit_abidetect.h" #include "cpu-features.h" #include "fftools_ffmpeg.h" -#include "ffmpegkit_abidetect.h" /** Full name of the Java class that owns native functions in this file. */ const char *abiDetectClassName = "com/arthenica/ffmpegkit/AbiDetect"; /** Prototypes of native functions defined by this file. */ JNINativeMethod abiDetectMethods[] = { - {"getNativeAbi", "()Ljava/lang/String;", (void*) Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi}, - {"getNativeCpuAbi", "()Ljava/lang/String;", (void*) Java_com_arthenica_ffmpegkit_AbiDetect_getNativeCpuAbi}, - {"isNativeLTSBuild", "()Z", (void*) Java_com_arthenica_ffmpegkit_AbiDetect_isNativeLTSBuild}, - {"getNativeBuildConf", "()Ljava/lang/String;", (void*) Java_com_arthenica_ffmpegkit_AbiDetect_getNativeBuildConf} -}; + {"getNativeAbi", "()Ljava/lang/String;", + (void *)Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi}, + {"getNativeCpuAbi", "()Ljava/lang/String;", + (void *)Java_com_arthenica_ffmpegkit_AbiDetect_getNativeCpuAbi}, + {"isNativeLTSBuild", "()Z", + (void *)Java_com_arthenica_ffmpegkit_AbiDetect_isNativeLTSBuild}, + {"getNativeBuildConf", "()Ljava/lang/String;", + (void *)Java_com_arthenica_ffmpegkit_AbiDetect_getNativeBuildConf}}; /** * Called when 'abidetect' native library is loaded. @@ -41,7 +44,7 @@ JNINativeMethod abiDetectMethods[] = { */ jint JNI_OnLoad(JavaVM *vm, void *reserved) { JNIEnv *env; - if ((*vm)->GetEnv(vm, (void**) &env, JNI_VERSION_1_6) != JNI_OK) { + if ((*vm)->GetEnv(vm, (void **)&env, JNI_VERSION_1_6) != JNI_OK) { LOGE("OnLoad failed to GetEnv for class %s.\n", abiDetectClassName); return JNI_FALSE; } @@ -53,7 +56,8 @@ jint JNI_OnLoad(JavaVM *vm, void *reserved) { } if ((*env)->RegisterNatives(env, abiDetectClass, abiDetectMethods, 4) < 0) { - LOGE("OnLoad failed to RegisterNatives for class %s.\n", abiDetectClassName); + LOGE("OnLoad failed to RegisterNatives for class %s.\n", + abiDetectClassName); return JNI_FALSE; } @@ -67,7 +71,8 @@ jint JNI_OnLoad(JavaVM *vm, void *reserved) { * @param object reference to the class on which this method is invoked * @return loaded ABI name as UTF string */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi(JNIEnv *env, jclass object) { +JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi( + JNIEnv *env, jclass object) { #ifdef FFMPEG_KIT_ARM_V7A return (*env)->NewStringUTF(env, "arm-v7a"); @@ -80,7 +85,6 @@ JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi(JN #else return (*env)->NewStringUTF(env, "unknown"); #endif - } /** @@ -90,7 +94,9 @@ JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi(JN * @param object reference to the class on which this method is invoked * @return ABI name of the running cpu as UTF string */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeCpuAbi(JNIEnv *env, jclass object) { +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_AbiDetect_getNativeCpuAbi(JNIEnv *env, + jclass object) { AndroidCpuFamily family = android_getCpuFamily(); if (family == ANDROID_CPU_FAMILY_ARM) { @@ -124,12 +130,14 @@ JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeCpuAbi * @param object reference to the class on which this method is invoked * @return yes or no */ -JNIEXPORT jboolean JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_isNativeLTSBuild(JNIEnv *env, jclass object) { - #if defined(FFMPEG_KIT_LTS) - return JNI_TRUE; - #else - return JNI_FALSE; - #endif +JNIEXPORT jboolean JNICALL +Java_com_arthenica_ffmpegkit_AbiDetect_isNativeLTSBuild(JNIEnv *env, + jclass object) { +#if defined(FFMPEG_KIT_LTS) + return JNI_TRUE; +#else + return JNI_FALSE; +#endif } /** @@ -139,6 +147,8 @@ JNIEXPORT jboolean JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_isNativeLTSBui * @param object reference to the class on which this method is invoked * @return build configuration string */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeBuildConf(JNIEnv *env, jclass object) { +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_AbiDetect_getNativeBuildConf(JNIEnv *env, + jclass object) { return (*env)->NewStringUTF(env, FFMPEG_CONFIGURATION); } diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.h b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.h index 558ecc5..3e96b2f 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.h +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_abidetect.h @@ -20,8 +20,8 @@ #ifndef FFMPEG_KIT_ABIDETECT_H #define FFMPEG_KIT_ABIDETECT_H -#include #include "ffmpegkit.h" +#include /** Represents armeabi-v7a ABI with NEON support. */ #define ABI_ARMV7A_NEON "armeabi-v7a-neon" @@ -49,27 +49,31 @@ * Method: getNativeAbi * Signature: ()Ljava/lang/String; */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi(JNIEnv *, jclass); +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_AbiDetect_getNativeAbi(JNIEnv *, jclass); /* * Class: com_arthenica_ffmpegkit_AbiDetect * Method: getNativeCpuAbi * Signature: ()Ljava/lang/String; */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeCpuAbi(JNIEnv *, jclass); +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_AbiDetect_getNativeCpuAbi(JNIEnv *, jclass); /** * Class: com_arthenica_ffmpegkit_AbiDetect * Method: isNativeLTSBuild * Signature: ()Z */ -JNIEXPORT jboolean JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_isNativeLTSBuild(JNIEnv *, jclass); +JNIEXPORT jboolean JNICALL +Java_com_arthenica_ffmpegkit_AbiDetect_isNativeLTSBuild(JNIEnv *, jclass); /* * Class: com_arthenica_ffmpegkit_AbiDetect * Method: getNativeBuildConf * Signature: ()Ljava/lang/String; */ -JNIEXPORT jstring JNICALL Java_com_arthenica_ffmpegkit_AbiDetect_getNativeBuildConf(JNIEnv *, jclass); +JNIEXPORT jstring JNICALL +Java_com_arthenica_ffmpegkit_AbiDetect_getNativeBuildConf(JNIEnv *, jclass); #endif /* FFMPEG_KIT_ABIDETECT_H */ diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_exception.h b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_exception.h index daf3acc..0a69047 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_exception.h +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffmpegkit_exception.h @@ -20,8 +20,8 @@ #ifndef FFMPEG_KIT_EXCEPTION_H #define FFMPEG_KIT_EXCEPTION_H -#include #include +#include /** Holds information to implement exception handling. */ extern __thread jmp_buf ex_buf__; diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.c b/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.c index 089cefe..4964589 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.c @@ -18,14 +18,14 @@ */ #include -#include #include +#include #include "config.h" +#include "ffmpegkit.h" #include "libavcodec/jni.h" #include "libavutil/bprint.h" #include "libavutil/mem.h" -#include "ffmpegkit.h" /** Forward declaration for function defined in fftools_ffprobe.c */ int ffprobe_execute(int argc, char **argv); @@ -45,7 +45,9 @@ extern void resetMessagesInTransmit(long sessionId); * @param stringArray reference to the object holding FFprobe command arguments * @return zero on successful execution, non-zero on error */ -JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprobeExecute(JNIEnv *env, jclass object, jlong id, jobjectArray stringArray) { +JNIEXPORT jint JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprobeExecute( + JNIEnv *env, jclass object, jlong id, jobjectArray stringArray) { jstring *tempArray = NULL; int argumentCount = 1; char **argv = NULL; @@ -57,30 +59,33 @@ JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprob int programArgumentCount = (*env)->GetArrayLength(env, stringArray); argumentCount = programArgumentCount + 1; - tempArray = (jstring *) av_malloc(sizeof(jstring) * programArgumentCount); + tempArray = + (jstring *)av_malloc(sizeof(jstring) * programArgumentCount); } /* PRESERVE USAGE FORMAT * * ffprobe */ - argv = (char **)av_malloc(sizeof(char*) * (argumentCount)); + argv = (char **)av_malloc(sizeof(char *) * (argumentCount)); argv[0] = (char *)av_malloc(sizeof(char) * (strlen(LIB_NAME) + 1)); strcpy(argv[0], LIB_NAME); // PREPARE ARRAY ELEMENTS if (stringArray) { for (int i = 0; i < (argumentCount - 1); i++) { - tempArray[i] = (jstring) (*env)->GetObjectArrayElement(env, stringArray, i); + tempArray[i] = + (jstring)(*env)->GetObjectArrayElement(env, stringArray, i); if (tempArray[i] != NULL) { - argv[i + 1] = (char *) (*env)->GetStringUTFChars(env, tempArray[i], 0); + argv[i + 1] = + (char *)(*env)->GetStringUTFChars(env, tempArray[i], 0); } } } // REGISTER THE ID BEFORE STARTING THE SESSION - globalSessionId = (long) id; - addSession((long) id); + globalSessionId = (long)id; + addSession((long)id); resetMessagesInTransmit(globalSessionId); @@ -88,7 +93,7 @@ JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprob int returnCode = ffprobe_execute(argumentCount, argv); // ALWAYS REMOVE THE ID FROM THE MAP - removeSession((long) id); + removeSession((long)id); // CLEANUP if (tempArray) { diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.h b/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.h index c01c210..393a69d 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.h +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/ffprobekit.h @@ -27,6 +27,9 @@ * Method: nativeFFprobeExecute * Signature: (J[Ljava/lang/String;)I */ -JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprobeExecute(JNIEnv *, jclass, jlong, jobjectArray); +JNIEXPORT jint JNICALL +Java_com_arthenica_ffmpegkit_FFmpegKitConfig_nativeFFprobeExecute(JNIEnv *, + jclass, jlong, + jobjectArray); #endif /* FFPROBE_KIT_H */ \ No newline at end of file diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c index 168abab..3c831c0 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.c @@ -2,7 +2,7 @@ * Various utilities for command line tools * Copyright (c) 2000-2003 Fabrice Bellard * Copyright (c) 2018-2022 Taner Sener - * Copyright (c) 2023 ARTHENICA LTD + * Copyright (c) 2023-2024 ARTHENICA LTD * * This file is part of FFmpeg. * @@ -22,12 +22,18 @@ */ /* - * This file is the modified version of cmdutils.c file living in ffmpeg source code under the fftools folder. We - * manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied - * by us to develop mobile-ffmpeg and later ffmpeg-kit libraries. + * This file is the modified version of cmdutils.c file living in ffmpeg source + * code under the fftools folder. We manually update it each time we depend on a + * new ffmpeg version. Below you can see the list of changes applied by us to + * develop mobile-ffmpeg and later ffmpeg-kit libraries. * * ffmpeg-kit changes by ARTHENICA LTD * + * 11.2024 + * -------------------------------------------------------- + * - FFmpeg 6.1 changes migrated + * - longjmp_value dropped + * * 07.2023 * -------------------------------------------------------- * - FFmpeg 6.0 changes migrated @@ -36,18 +42,21 @@ * * 09.2022 * -------------------------------------------------------- - * - Dropped the prototypes of init_report, ffmpegkit_log_callback_function and report_callback functions + * - Dropped the prototypes of init_report, ffmpegkit_log_callback_function and + * report_callback functions * - volatile dropped from thread local variables * * 01.2020 * -------------------------------------------------------- - * - ffprobe support added (variables used by ffprobe marked with "__thread" specifier, logs with AV_LOG_INFO level - * migrated to use AV_LOG_STDERR) - * - (optindex < argc) validation in parse_options() method updated with (optindex >= argc) check + * - ffprobe support added (variables used by ffprobe marked with "__thread" + * specifier, logs with AV_LOG_INFO level migrated to use AV_LOG_STDERR) + * - (optindex < argc) validation in parse_options() method updated with + * (optindex >= argc) check * * 12.2019 * -------------------------------------------------------- - * - concurrent execution support ("__thread" specifier added to variables used by multiple threads) + * - concurrent execution support ("__thread" specifier added to variables used + * by multiple threads) * - log_callback_report method re-added to fix -report option issues * * 08.2018 @@ -57,49 +66,51 @@ * 07.2018 * -------------------------------------------------------- * - unused headers removed - * - parentheses placed around assignments in conditions to prevent -Wparentheses warning + * - parentheses placed around assignments in conditions to prevent + * -Wparentheses warning * - exit_program updated with longjmp, disabling exit * - longjmp_value added to store exit code - * - (optindex < argc) validation added before accessing argv[optindex] inside split_commandline() - * and parse_options() - * - all av_log_set_callback invocations updated to set ffmpegkit_log_callback_function from ffmpegkit.c + * - (optindex < argc) validation added before accessing argv[optindex] inside + * split_commandline() and parse_options() + * - all av_log_set_callback invocations updated to set + * ffmpegkit_log_callback_function from ffmpegkit.c * - unused log_callback_help method removed * - (idx + 1 < argc) validation added in parse_loglevel() */ -#include -#include -#include #include #include +#include +#include +#include /* Include only the enabled headers since some compilers (namely, Sun Studio) will not omit unused inline functions and create undefined references to libraries that are not being built. */ #include "config.h" +#include "ffmpegkit_exception.h" +#include "fftools_cmdutils.h" +#include "fftools_fopen_utf8.h" +#include "fftools_opt_common.h" #include "libavformat/avformat.h" -#include "libswscale/swscale.h" -#include "libswresample/swresample.h" #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/channel_layout.h" +#include "libavutil/dict.h" #include "libavutil/display.h" +#include "libavutil/eval.h" #include "libavutil/getenv_utf8.h" -#include "libavutil/mathematics.h" #include "libavutil/imgutils.h" #include "libavutil/libm.h" -#include "libavutil/parseutils.h" -#include "libavutil/eval.h" -#include "libavutil/dict.h" +#include "libavutil/mathematics.h" #include "libavutil/opt.h" -#include "fftools_cmdutils.h" -#include "fftools_fopen_utf8.h" -#include "fftools_opt_common.h" -#include "ffmpegkit_exception.h" +#include "libavutil/parseutils.h" +#include "libswresample/swresample.h" +#include "libswscale/swscale.h" #ifdef _WIN32 -#include #include "compat/w32dlfcn.h" +#include #endif __thread char *program_name; @@ -110,53 +121,32 @@ __thread AVDictionary *swr_opts; __thread AVDictionary *format_opts, *codec_opts; __thread int hide_banner = 0; -__thread int longjmp_value = 0; -void uninit_opts(void) -{ +void uninit_opts(void) { av_dict_free(&swr_opts); av_dict_free(&sws_dict); av_dict_free(&format_opts); av_dict_free(&codec_opts); } -void init_dynload(void) -{ +void init_dynload(void) { #if HAVE_SETDLLDIRECTORY && defined(_WIN32) /* Calling SetDllDirectory with the empty string (but not NULL) removes the - * current working directory from the DLL search path as a security pre-caution. */ + * current working directory from the DLL search path as a security + * pre-caution. */ SetDllDirectory(""); #endif } -static __thread void (*program_exit)(int ret); - -void register_exit(void (*cb)(int ret)) -{ - program_exit = cb; -} - -void report_and_exit(int ret) -{ - av_log(NULL, AV_LOG_FATAL, "%s\n", av_err2str(ret)); - exit_program(AVUNERROR(ret)); -} - -void exit_program(int ret) -{ - if (program_exit) - program_exit(ret); - +void exit_program(int ret) { // FFmpegKit - // exit disabled and replaced with longjmp, exit value stored in longjmp_value + // exit disabled and replaced with longjmp, exit value returned by ret // exit(ret); - longjmp_value = ret; longjmp(ex_buf__, ret); } -double parse_number_or_die(const char *context, const char *numstr, int type, - double min, double max) -{ +int parse_number(const char *context, const char *numstr, int type, double min, + double max, double *dst) { char *tail; const char *error; double d = av_strtod(numstr, &tail); @@ -168,28 +158,17 @@ double parse_number_or_die(const char *context, const char *numstr, int type, error = "Expected int64 for %s but found %s\n"; else if (type == OPT_INT && (int)d != d) error = "Expected int for %s but found %s\n"; - else - return d; - av_log(NULL, AV_LOG_FATAL, error, context, numstr, min, max); - exit_program(1); - return 0; -} - -int64_t parse_time_or_die(const char *context, const char *timestr, - int is_duration) -{ - int64_t us; - if (av_parse_time(&us, timestr, is_duration) < 0) { - av_log(NULL, AV_LOG_FATAL, "Invalid %s specification for %s: %s\n", - is_duration ? "duration" : "date", context, timestr); - exit_program(1); + else { + *dst = d; + return 0; } - return us; + + av_log(NULL, AV_LOG_FATAL, error, context, numstr, min, max); + return AVERROR(EINVAL); } void show_help_options(const OptionDef *options, const char *msg, int req_flags, - int rej_flags, int alt_flags) -{ + int rej_flags, int alt_flags) { const OptionDef *po; int first; @@ -198,8 +177,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags, char buf[128]; if (((po->flags & req_flags) != req_flags) || - (alt_flags && !(po->flags & alt_flags)) || - (po->flags & rej_flags)) + (alt_flags && !(po->flags & alt_flags)) || (po->flags & rej_flags)) continue; if (first) { @@ -216,8 +194,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags, av_log(NULL, AV_LOG_STDERR, "\n"); } -void show_help_children(const AVClass *class, int flags) -{ +void show_help_children(const AVClass *class, int flags) { void *iter = NULL; const AVClass *child; if (class->option) { @@ -229,8 +206,7 @@ void show_help_children(const AVClass *class, int flags) show_help_children(child, flags); } -static const OptionDef *find_option(const OptionDef *po, const char *name) -{ +static const OptionDef *find_option(const OptionDef *po, const char *name) { while (po->name) { const char *end; if (av_strstart(name, po->name, &end) && (!*end || *end == ':')) @@ -246,7 +222,7 @@ static const OptionDef *find_option(const OptionDef *po, const char *name) #if HAVE_COMMANDLINETOARGVW && defined(_WIN32) #include /* Will be leaked on exit */ -static char** win32_argv_utf8 = NULL; +static char **win32_argv_utf8 = NULL; static int win32_argc = 0; /** @@ -256,8 +232,7 @@ static int win32_argc = 0; * @param argc_ptr Arguments number (including executable) * @param argv_ptr Arguments list. */ -static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) -{ +static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) { char *argstr_flat; wchar_t **argv_w; int i, buffsize = 0, offset = 0; @@ -275,11 +250,11 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) /* determine the UTF-8 buffer size (including NULL-termination symbols) */ for (i = 0; i < win32_argc; i++) - buffsize += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1, - NULL, 0, NULL, NULL); + buffsize += + WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1, NULL, 0, NULL, NULL); win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize); - argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1); + argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1); if (!win32_argv_utf8) { LocalFree(argv_w); return; @@ -287,9 +262,9 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) for (i = 0; i < win32_argc; i++) { win32_argv_utf8[i] = &argstr_flat[offset]; - offset += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1, - &argstr_flat[offset], - buffsize - offset, NULL, NULL); + offset += + WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1, &argstr_flat[offset], + buffsize - offset, NULL, NULL); } win32_argv_utf8[i] = NULL; LocalFree(argv_w); @@ -298,20 +273,21 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) *argv_ptr = win32_argv_utf8; } #else -static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) -{ +static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr) { /* nothing to do */ } #endif /* HAVE_COMMANDLINETOARGVW */ static int write_option(void *optctx, const OptionDef *po, const char *opt, - const char *arg) -{ + const char *arg) { /* new-style options contain an offset into optctx, old-style address of * a global var*/ - void *dst = po->flags & (OPT_OFFSET | OPT_SPEC) ? - (uint8_t *)optctx + po->u.off : po->u.dst_ptr; + void *dst = po->flags & (OPT_OFFSET | OPT_SPEC) + ? (uint8_t *)optctx + po->u.off + : po->u.dst_ptr; int *dstcount; + double num; + int ret; if (po->flags & OPT_SPEC) { SpecifierOpt **so = dst; @@ -319,7 +295,10 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt, char *str; dstcount = (int *)(so + 1); - *so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1); + ret = grow_array((void **)so, sizeof(**so), dstcount, *dstcount + 1); + if (ret < 0) + return ret; + str = av_strdup(p ? p + 1 : ""); if (!str) return AVERROR(ENOMEM); @@ -335,36 +314,57 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt, return AVERROR(ENOMEM); *(char **)dst = str; } else if (po->flags & OPT_BOOL || po->flags & OPT_INT) { - *(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX); + ret = parse_number(opt, arg, OPT_INT64, INT_MIN, INT_MAX, &num); + if (ret < 0) + return ret; + + *(int *)dst = num; } else if (po->flags & OPT_INT64) { - *(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, (double)INT64_MAX); + ret = parse_number(opt, arg, OPT_INT64, INT64_MIN, (double)INT64_MAX, + &num); + if (ret < 0) + return ret; + + *(int64_t *)dst = num; } else if (po->flags & OPT_TIME) { - *(int64_t *)dst = parse_time_or_die(opt, arg, 1); + ret = av_parse_time(dst, arg, 1); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Invalid duration for option %s: %s\n", + opt, arg); + return ret; + } } else if (po->flags & OPT_FLOAT) { - *(float *)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY); + ret = parse_number(opt, arg, OPT_FLOAT, -INFINITY, INFINITY, &num); + if (ret < 0) + return ret; + + *(float *)dst = num; } else if (po->flags & OPT_DOUBLE) { - *(double *)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY); + ret = parse_number(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY, &num); + if (ret < 0) + return ret; + + *(double *)dst = num; } else if (po->u.func_arg) { int ret = po->u.func_arg(optctx, opt, arg); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, - "Failed to set value '%s' for option '%s': %s\n", - arg, opt, av_err2str(ret)); + "Failed to set value '%s' for option '%s': %s\n", arg, opt, + av_err2str(ret)); return ret; } } if (po->flags & OPT_EXIT) - exit_program(0); + return AVERROR_EXIT; return 0; } int parse_option(void *optctx, const char *opt, const char *arg, - const OptionDef *options) -{ + const OptionDef *options) { static const OptionDef opt_avoptions = { - .name = "AVOption passthrough", - .flags = HAS_ARG, + .name = "AVOption passthrough", + .flags = HAS_ARG, .u.func_arg = opt_default, }; @@ -398,9 +398,8 @@ int parse_option(void *optctx, const char *opt, const char *arg, return !!(po->flags & HAS_ARG); } -void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, - void (*parse_arg_function)(void *, const char*)) -{ +int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, + int (*parse_arg_function)(void *, const char *)) { const char *opt; int optindex, handleoptions = 1, ret; @@ -420,21 +419,26 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options opt++; if (optindex >= argc) { if ((ret = parse_option(optctx, opt, NULL, options)) < 0) - exit_program(1); + return ret; } else { - if ((ret = parse_option(optctx, opt, argv[optindex], options)) < 0) - exit_program(1); + if ((ret = parse_option(optctx, opt, argv[optindex], options)) < + 0) + return ret; } optindex += ret; } else { - if (parse_arg_function) - parse_arg_function(optctx, opt); + if (parse_arg_function) { + ret = parse_arg_function(optctx, opt); + if (ret < 0) + return ret; + } } } + + return 0; } -int parse_optgroup(void *optctx, OptionGroup *g) -{ +int parse_optgroup(void *optctx, OptionGroup *g) { int i, ret; av_log(NULL, AV_LOG_DEBUG, "Parsing a group of options: %s %s.\n", @@ -443,18 +447,19 @@ int parse_optgroup(void *optctx, OptionGroup *g) for (i = 0; i < g->nb_opts; i++) { Option *o = &g->opts[i]; - if (g->group_def->flags && - !(g->group_def->flags & o->opt->flags)) { - av_log(NULL, AV_LOG_ERROR, "Option %s (%s) cannot be applied to " + if (g->group_def->flags && !(g->group_def->flags & o->opt->flags)) { + av_log(NULL, AV_LOG_ERROR, + "Option %s (%s) cannot be applied to " "%s %s -- you are trying to apply an input option to an " "output file or vice versa. Move this option before the " - "file it belongs to.\n", o->key, o->opt->help, - g->group_def->name, g->arg); + "file it belongs to.\n", + o->key, o->opt->help, g->group_def->name, g->arg); return AVERROR(EINVAL); } - av_log(NULL, AV_LOG_DEBUG, "Applying option %s (%s) with argument %s.\n", - o->key, o->opt->help, o->val); + av_log(NULL, AV_LOG_DEBUG, + "Applying option %s (%s) with argument %s.\n", o->key, + o->opt->help, o->val); ret = write_option(optctx, o->opt, o->key, o->val); if (ret < 0) @@ -467,8 +472,7 @@ int parse_optgroup(void *optctx, OptionGroup *g) } int locate_option(int argc, char **argv, const OptionDef *options, - const char *optname) -{ + const char *optname) { const OptionDef *po; int i; @@ -483,7 +487,7 @@ int locate_option(int argc, char **argv, const OptionDef *options, po = find_option(options, cur_opt + 2); if ((!po->name && !strcmp(cur_opt, optname)) || - (po->name && !strcmp(optname, po->name))) + (po->name && !strcmp(optname, po->name))) return i; if (!po->name || po->flags & HAS_ARG) @@ -492,8 +496,7 @@ int locate_option(int argc, char **argv, const OptionDef *options, return 0; } -static void dump_argument(FILE *report_file, const char *a) -{ +static void dump_argument(FILE *report_file, const char *a) { const unsigned char *p; for (p = a; *p; p++) @@ -516,8 +519,7 @@ static void dump_argument(FILE *report_file, const char *a) fputc('"', report_file); } -static void check_options(const OptionDef *po) -{ +static void check_options(const OptionDef *po) { while (po->name) { if (po->flags & OPT_PERFILE) av_assert0(po->flags & (OPT_INPUT | OPT_OUTPUT)); @@ -525,8 +527,7 @@ static void check_options(const OptionDef *po) } } -void parse_loglevel(int argc, char **argv, const OptionDef *options) -{ +void parse_loglevel(int argc, char **argv, const OptionDef *options) { int idx = locate_option(argc, argv, options, "loglevel"); char *env; @@ -558,17 +559,18 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options) } static const AVOption *opt_find(void *obj, const char *name, const char *unit, - int opt_flags, int search_flags) -{ + int opt_flags, int search_flags) { const AVOption *o = av_opt_find(obj, name, unit, opt_flags, search_flags); - if(o && !o->flags) + if (o && !o->flags) return NULL; return o; } -#define FLAGS (o->type == AV_OPT_TYPE_FLAGS && (arg[0]=='-' || arg[0]=='+')) ? AV_DICT_APPEND : 0 -int opt_default(void *optctx, const char *opt, const char *arg) -{ +#define FLAGS \ + (o->type == AV_OPT_TYPE_FLAGS && (arg[0] == '-' || arg[0] == '+')) \ + ? AV_DICT_APPEND \ + : 0 +int opt_default(void *optctx, const char *opt, const char *arg) { const AVOption *o; int consumed = 0; char opt_stripped[128]; @@ -589,26 +591,30 @@ int opt_default(void *optctx, const char *opt, const char *arg) av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1)); if ((o = opt_find(&cc, opt_stripped, NULL, 0, - AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) || + AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) || ((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') && (o = opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)))) { av_dict_set(&codec_opts, opt, arg, FLAGS); consumed = 1; } if ((o = opt_find(&fc, opt, NULL, 0, - AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { + AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { av_dict_set(&format_opts, opt, arg, FLAGS); if (consumed) - av_log(NULL, AV_LOG_VERBOSE, "Routing option %s to both codec and muxer layer\n", opt); + av_log(NULL, AV_LOG_VERBOSE, + "Routing option %s to both codec and muxer layer\n", opt); consumed = 1; } #if CONFIG_SWSCALE - if (!consumed && (o = opt_find(&sc, opt, NULL, 0, - AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { + if (!consumed && + (o = opt_find(&sc, opt, NULL, 0, + AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { if (!strcmp(opt, "srcw") || !strcmp(opt, "srch") || !strcmp(opt, "dstw") || !strcmp(opt, "dsth") || !strcmp(opt, "src_format") || !strcmp(opt, "dst_format")) { - av_log(NULL, AV_LOG_ERROR, "Directly using swscale dimensions/format options is not supported, please use the -s or -pix_fmt options\n"); + av_log(NULL, AV_LOG_ERROR, + "Directly using swscale dimensions/format options is not " + "supported, please use the -s or -pix_fmt options\n"); return AVERROR(EINVAL); } av_dict_set(&sws_dict, opt, arg, FLAGS); @@ -617,13 +623,15 @@ int opt_default(void *optctx, const char *opt, const char *arg) } #else if (!consumed && !strcmp(opt, "sws_flags")) { - av_log(NULL, AV_LOG_WARNING, "Ignoring %s %s, due to disabled swscale\n", opt, arg); + av_log(NULL, AV_LOG_WARNING, + "Ignoring %s %s, due to disabled swscale\n", opt, arg); consumed = 1; } #endif #if CONFIG_SWRESAMPLE - if (!consumed && (o=opt_find(&swr_class, opt, NULL, 0, - AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { + if (!consumed && + (o = opt_find(&swr_class, opt, NULL, 0, + AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) { av_dict_set(&swr_opts, opt, arg, FLAGS); consumed = 1; } @@ -640,8 +648,7 @@ int opt_default(void *optctx, const char *opt, const char *arg) * @return index of the group definition that matched or -1 if none */ static int match_group_separator(const OptionGroupDef *groups, int nb_groups, - const char *opt) -{ + const char *opt) { int i; for (i = 0; i < nb_groups; i++) { @@ -659,68 +666,78 @@ static int match_group_separator(const OptionGroupDef *groups, int nb_groups, * @param group_idx which group definition should this group belong to * @param arg argument of the group delimiting option */ -static void finish_group(OptionParseContext *octx, int group_idx, - const char *arg) -{ +static int finish_group(OptionParseContext *octx, int group_idx, + const char *arg) { OptionGroupList *l = &octx->groups[group_idx]; OptionGroup *g; + int ret; + + ret = GROW_ARRAY(l->groups, l->nb_groups); + if (ret < 0) + return ret; - GROW_ARRAY(l->groups, l->nb_groups); g = &l->groups[l->nb_groups - 1]; - *g = octx->cur_group; - g->arg = arg; - g->group_def = l->group_def; - g->sws_dict = sws_dict; - g->swr_opts = swr_opts; - g->codec_opts = codec_opts; + *g = octx->cur_group; + g->arg = arg; + g->group_def = l->group_def; + g->sws_dict = sws_dict; + g->swr_opts = swr_opts; + g->codec_opts = codec_opts; g->format_opts = format_opts; - codec_opts = NULL; + codec_opts = NULL; format_opts = NULL; - sws_dict = NULL; - swr_opts = NULL; + sws_dict = NULL; + swr_opts = NULL; memset(&octx->cur_group, 0, sizeof(octx->cur_group)); + + return ret; } /* * Add an option instance to currently parsed group. */ -static void add_opt(OptionParseContext *octx, const OptionDef *opt, - const char *key, const char *val) -{ +static int add_opt(OptionParseContext *octx, const OptionDef *opt, + const char *key, const char *val) { int global = !(opt->flags & (OPT_PERFILE | OPT_SPEC | OPT_OFFSET)); OptionGroup *g = global ? &octx->global_opts : &octx->cur_group; + int ret; + + ret = GROW_ARRAY(g->opts, g->nb_opts); + if (ret < 0) + return ret; - GROW_ARRAY(g->opts, g->nb_opts); g->opts[g->nb_opts - 1].opt = opt; g->opts[g->nb_opts - 1].key = key; g->opts[g->nb_opts - 1].val = val; + + return 0; } -static void init_parse_context(OptionParseContext *octx, - const OptionGroupDef *groups, int nb_groups) -{ - static const OptionGroupDef global_group = { "global" }; +static int init_parse_context(OptionParseContext *octx, + const OptionGroupDef *groups, int nb_groups) { + static const OptionGroupDef global_group = {"global"}; int i; memset(octx, 0, sizeof(*octx)); octx->nb_groups = nb_groups; - octx->groups = av_calloc(octx->nb_groups, sizeof(*octx->groups)); + octx->groups = av_calloc(octx->nb_groups, sizeof(*octx->groups)); if (!octx->groups) - report_and_exit(AVERROR(ENOMEM)); + return AVERROR(ENOMEM); for (i = 0; i < octx->nb_groups; i++) octx->groups[i].group_def = &groups[i]; octx->global_opts.group_def = &global_group; - octx->global_opts.arg = ""; + octx->global_opts.arg = ""; + + return 0; } -void uninit_parse_context(OptionParseContext *octx) -{ +void uninit_parse_context(OptionParseContext *octx) { int i, j; for (i = 0; i < octx->nb_groups; i++) { @@ -745,16 +762,19 @@ void uninit_parse_context(OptionParseContext *octx) } int split_commandline(OptionParseContext *octx, int argc, char *argv[], - const OptionDef *options, - const OptionGroupDef *groups, int nb_groups) -{ + const OptionDef *options, const OptionGroupDef *groups, + int nb_groups) { + int ret; int optindex = 1; int dashdash = -2; /* perform system-dependent conversions for arguments list */ prepare_app_arguments(&argc, &argv); - init_parse_context(octx, groups, nb_groups); + ret = init_parse_context(octx, groups, nb_groups); + if (ret < 0) + return ret; + av_log(NULL, AV_LOG_DEBUG, "Splitting the commandline.\n"); while (optindex < argc) { @@ -769,27 +789,37 @@ int split_commandline(OptionParseContext *octx, int argc, char *argv[], continue; } /* unnamed group separators, e.g. output filename */ - if (opt[0] != '-' || !opt[1] || dashdash+1 == optindex) { - finish_group(octx, 0, opt); + if (opt[0] != '-' || !opt[1] || dashdash + 1 == optindex) { + ret = finish_group(octx, 0, opt); + if (ret < 0) + return ret; + av_log(NULL, AV_LOG_DEBUG, " matched as %s.\n", groups[0].name); continue; } opt++; #define GET_ARG(arg) \ -do { \ - if (optindex < argc) { \ - arg = argv[optindex++]; \ - } else { \ - av_log(NULL, AV_LOG_ERROR, "Missing argument for option '%s'.\n", opt);\ - return AVERROR(EINVAL); \ - } \ -} while (0) + do { \ + if (optindex < argc) { \ + arg = argv[optindex++]; \ + } else { \ + arg = NULL; \ + } \ + if (!arg) { \ + av_log(NULL, AV_LOG_ERROR, "Missing argument for option '%s'.\n", \ + opt); \ + return AVERROR(EINVAL); \ + } \ + } while (0) /* named group separators, e.g. -i */ if ((ret = match_group_separator(groups, nb_groups, opt)) >= 0) { GET_ARG(arg); - finish_group(octx, ret, arg); + ret = finish_group(octx, ret, arg); + if (ret < 0) + return ret; + av_log(NULL, AV_LOG_DEBUG, " matched as %s with argument '%s'.\n", groups[ret].name, arg); continue; @@ -811,9 +841,14 @@ do { \ arg = "1"; } - add_opt(octx, po, opt, arg); - av_log(NULL, AV_LOG_DEBUG, " matched as option '%s' (%s) with " - "argument '%s'.\n", po->name, po->help, arg); + ret = add_opt(octx, po, opt, arg); + if (ret < 0) + return ret; + + av_log(NULL, AV_LOG_DEBUG, + " matched as option '%s' (%s) with " + "argument '%s'.\n", + po->name, po->help, arg); continue; } @@ -821,24 +856,33 @@ do { \ if ((optindex < argc) && argv[optindex]) { ret = opt_default(NULL, opt, argv[optindex]); if (ret >= 0) { - av_log(NULL, AV_LOG_DEBUG, " matched as AVOption '%s' with " - "argument '%s'.\n", opt, argv[optindex]); + av_log(NULL, AV_LOG_DEBUG, + " matched as AVOption '%s' with " + "argument '%s'.\n", + opt, argv[optindex]); optindex++; continue; } else if (ret != AVERROR_OPTION_NOT_FOUND) { - av_log(NULL, AV_LOG_ERROR, "Error parsing option '%s' " - "with argument '%s'.\n", opt, argv[optindex]); + av_log(NULL, AV_LOG_ERROR, + "Error parsing option '%s' " + "with argument '%s'.\n", + opt, argv[optindex]); return ret; } } /* boolean -nofoo options */ if (opt[0] == 'n' && opt[1] == 'o' && - (po = find_option(options, opt + 2)) && - po->name && po->flags & OPT_BOOL) { - add_opt(octx, po, opt, "0"); - av_log(NULL, AV_LOG_DEBUG, " matched as option '%s' (%s) with " - "argument 0.\n", po->name, po->help); + (po = find_option(options, opt + 2)) && po->name && + po->flags & OPT_BOOL) { + ret = add_opt(octx, po, opt, "0"); + if (ret < 0) + return ret; + + av_log(NULL, AV_LOG_DEBUG, + " matched as option '%s' (%s) with " + "argument 0.\n", + po->name, po->help); continue; } @@ -847,7 +891,8 @@ do { \ } if (octx->cur_group.nb_opts || codec_opts || format_opts) - av_log(NULL, AV_LOG_WARNING, "Trailing option(s) found in the " + av_log(NULL, AV_LOG_WARNING, + "Trailing option(s) found in the " "command: may be ignored.\n"); av_log(NULL, AV_LOG_DEBUG, "Finished splitting the commandline.\n"); @@ -855,13 +900,11 @@ do { \ return 0; } -void print_error(const char *filename, int err) -{ +void print_error(const char *filename, int err) { av_log(NULL, AV_LOG_ERROR, "%s: %s\n", filename, av_err2str(err)); } -int read_yesno(void) -{ +int read_yesno(void) { int c = getchar(); int yesno = (av_toupper(c) == 'Y'); @@ -873,8 +916,7 @@ int read_yesno(void) FILE *get_preset_file(char *filename, size_t filename_size, const char *preset_name, int is_path, - const char *codec_name) -{ + const char *codec_name) { FILE *f = NULL; int i; #if HAVE_GETMODULEHANDLE && defined(_WIN32) @@ -882,9 +924,11 @@ FILE *get_preset_file(char *filename, size_t filename_size, #endif char *env_home = getenv_utf8("HOME"); char *env_ffmpeg_datadir = getenv_utf8("FFMPEG_DATADIR"); - const char *base[3] = { env_ffmpeg_datadir, - env_home, /* index=1(HOME) is special: search in a .ffmpeg subfolder */ - FFMPEG_DATADIR, }; + const char *base[3] = { + env_ffmpeg_datadir, + env_home, /* index=1(HOME) is special: search in a .ffmpeg subfolder */ + FFMPEG_DATADIR, + }; if (is_path) { av_strlcpy(filename, preset_name, filename_size); @@ -898,22 +942,22 @@ FILE *get_preset_file(char *filename, size_t filename_size, datadir = NULL; av_free(datadir_w); - if (datadir) - { + if (datadir) { char *ls; for (ls = datadir; *ls; ls++) - if (*ls == '\\') *ls = '/'; + if (*ls == '\\') + *ls = '/'; - if (ls = strrchr(datadir, '/')) - { + if (ls = strrchr(datadir, '/')) { ptrdiff_t datadir_len = ls - datadir; size_t desired_size = datadir_len + strlen("/ffpresets") + 1; - char *new_datadir = av_realloc_array( - datadir, desired_size, sizeof *datadir); + char *new_datadir = + av_realloc_array(datadir, desired_size, sizeof *datadir); if (new_datadir) { datadir = new_datadir; datadir[datadir_len] = 0; - strncat(datadir, "/ffpresets", desired_size - 1 - datadir_len); + strncat(datadir, "/ffpresets", + desired_size - 1 - datadir_len); base[2] = datadir; } } @@ -926,8 +970,7 @@ FILE *get_preset_file(char *filename, size_t filename_size, i != 1 ? "" : "/.ffmpeg", preset_name); f = fopen_utf8(filename, "r"); if (!f && codec_name) { - snprintf(filename, filename_size, - "%s%s/%s-%s.ffpreset", + snprintf(filename, filename_size, "%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, preset_name); f = fopen_utf8(filename, "r"); @@ -943,40 +986,39 @@ FILE *get_preset_file(char *filename, size_t filename_size, return f; } -int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec) -{ +int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec) { int ret = avformat_match_stream_specifier(s, st, spec); if (ret < 0) av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec); return ret; } -AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, - AVFormatContext *s, AVStream *st, const AVCodec *codec) -{ - AVDictionary *ret = NULL; +int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, + AVFormatContext *s, AVStream *st, const AVCodec *codec, + AVDictionary **dst) { + AVDictionary *ret = NULL; const AVDictionaryEntry *t = NULL; - int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM - : AV_OPT_FLAG_DECODING_PARAM; - char prefix = 0; - const AVClass *cc = avcodec_get_class(); + int flags = + s->oformat ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM; + char prefix = 0; + const AVClass *cc = avcodec_get_class(); if (!codec) - codec = s->oformat ? avcodec_find_encoder(codec_id) - : avcodec_find_decoder(codec_id); + codec = s->oformat ? avcodec_find_encoder(codec_id) + : avcodec_find_decoder(codec_id); switch (st->codecpar->codec_type) { case AVMEDIA_TYPE_VIDEO: - prefix = 'v'; - flags |= AV_OPT_FLAG_VIDEO_PARAM; + prefix = 'v'; + flags |= AV_OPT_FLAG_VIDEO_PARAM; break; case AVMEDIA_TYPE_AUDIO: - prefix = 'a'; - flags |= AV_OPT_FLAG_AUDIO_PARAM; + prefix = 'a'; + flags |= AV_OPT_FLAG_AUDIO_PARAM; break; case AVMEDIA_TYPE_SUBTITLE: - prefix = 's'; - flags |= AV_OPT_FLAG_SUBTITLE_PARAM; + prefix = 's'; + flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break; } @@ -985,12 +1027,16 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, char *p = strchr(t->key, ':'); /* check stream specification in opt name */ - if (p) - switch (check_stream_specifier(s, st, p + 1)) { - case 1: *p = 0; break; - case 0: continue; - default: exit_program(1); - } + if (p) { + int err = check_stream_specifier(s, st, p + 1); + if (err < 0) { + av_dict_free(&ret); + return err; + } else if (!err) + continue; + + *p = 0; + } if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) || !codec || @@ -1006,66 +1052,80 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, if (p) *p = ':'; } - return ret; + + *dst = ret; + return 0; } -AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, - AVDictionary *codec_opts) -{ - int i; +int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts, + AVDictionary ***dst) { + int ret; AVDictionary **opts; + *dst = NULL; + if (!s->nb_streams) - return NULL; + return 0; + opts = av_calloc(s->nb_streams, sizeof(*opts)); if (!opts) - report_and_exit(AVERROR(ENOMEM)); - for (i = 0; i < s->nb_streams; i++) - opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id, - s, s->streams[i], NULL); - return opts; + return AVERROR(ENOMEM); + + for (int i = 0; i < s->nb_streams; i++) { + ret = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id, + s, s->streams[i], NULL, &opts[i]); + if (ret < 0) + goto fail; + } + *dst = opts; + return 0; +fail: + for (int i = 0; i < s->nb_streams; i++) + av_dict_free(&opts[i]); + av_freep(&opts); + return ret; } -void *grow_array(void *array, int elem_size, int *size, int new_size) -{ +int grow_array(void **array, int elem_size, int *size, int new_size) { if (new_size >= INT_MAX / elem_size) { av_log(NULL, AV_LOG_ERROR, "Array too big.\n"); - exit_program(1); + return AVERROR(ERANGE); } if (*size < new_size) { - uint8_t *tmp = av_realloc_array(array, new_size, elem_size); + uint8_t *tmp = av_realloc_array(*array, new_size, elem_size); if (!tmp) - report_and_exit(AVERROR(ENOMEM)); - memset(tmp + *size*elem_size, 0, (new_size-*size) * elem_size); + return AVERROR(ENOMEM); + memset(tmp + *size * elem_size, 0, (new_size - *size) * elem_size); *size = new_size; - return tmp; + *array = tmp; + return 0; } - return array; + return 0; } -void *allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems) -{ +void *allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems) { void *new_elem; if (!(new_elem = av_mallocz(elem_size)) || av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0) - report_and_exit(AVERROR(ENOMEM)); + return NULL; return new_elem; } -double get_rotation(int32_t *displaymatrix) -{ +double get_rotation(const int32_t *displaymatrix) { double theta = 0; if (displaymatrix) - theta = -round(av_display_rotation_get((int32_t*) displaymatrix)); + theta = -round(av_display_rotation_get((int32_t *)displaymatrix)); - theta -= 360*floor(theta/360 + 0.9/360); + theta -= 360 * floor(theta / 360 + 0.9 / 360); - if (fabs(theta - 90*round(theta/90)) > 2) - av_log(NULL, AV_LOG_WARNING, "Odd rotation angle.\n" + if (fabs(theta - 90 * round(theta / 90)) > 2) + av_log(NULL, AV_LOG_WARNING, + "Odd rotation angle.\n" "If you want to help, upload a sample " "of this file to https://streams.videolan.org/upload/ " - "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)"); + "and contact the ffmpeg-devel mailing list. " + "(ffmpeg-devel@ffmpeg.org)"); return theta; } diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.h b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.h index b925cf0..b879222 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.h +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_cmdutils.h @@ -2,7 +2,7 @@ * Various utilities for command line tools * copyright (c) 2003 Fabrice Bellard * copyright (c) 2018-2022 Taner Sener - * copyright (c) 2023 ARTHENICA LTD + * copyright (c) 2023-2024 ARTHENICA LTD * * This file is part of FFmpeg. * @@ -22,12 +22,17 @@ */ /* - * This file is the modified version of cmdutils.h file living in ffmpeg source code under the fftools folder. We - * manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied - * by us to develop mobile-ffmpeg and later ffmpeg-kit libraries. + * This file is the modified version of cmdutils.h file living in ffmpeg source + * code under the fftools folder. We manually update it each time we depend on a + * new ffmpeg version. Below you can see the list of changes applied by us to + * develop mobile-ffmpeg and later ffmpeg-kit libraries. * * ffmpeg-kit changes by ARTHENICA LTD * + * 11.2024 + * -------------------------------------------------------- + * - FFmpeg 6.1 changes migrated + * * 07.2023 * -------------------------------------------------------- * - FFmpeg 6.0 changes migrated @@ -40,12 +45,14 @@ * * 01.2020 * -------------------------------------------------------- - * - ffprobe support added (variables used by ffprobe marked with "__thread" specifier) + * - ffprobe support added (variables used by ffprobe marked with "__thread" + * specifier) * - AV_LOG_STDERR log level added * * 12.2019 * -------------------------------------------------------- - * - concurrent execution support ("__thread" specifier added to variables used by multiple threads) + * - concurrent execution support ("__thread" specifier added to variables used + * by multiple threads) * * 03.2019 * -------------------------------------------------------- @@ -77,9 +84,10 @@ #endif /** - * Defines logs printed to stderr by ffmpeg. They are not filtered and always redirected. + * Defines logs printed to stderr by ffmpeg. They are not filtered and always + * redirected. */ -#define AV_LOG_STDERR -16 +#define AV_LOG_STDERR -16 /** * program name, defined by the program for show_version(). @@ -97,22 +105,6 @@ extern __thread AVDictionary *format_opts, *codec_opts; extern __thread int hide_banner; extern __thread int find_stream_info; -/** - * Register a program-specific cleanup routine. - */ -void register_exit(void (*cb)(int ret)); - -/** - * Reports an error corresponding to the provided - * AVERROR code and calls exit_program() with the - * corresponding POSIX error code. - * @note ret must be an AVERROR-value of a POSIX error code - * (i.e. AVERROR(EFOO) and not AVERROR_FOO). - * library functions can return both, so call this only - * with AVERROR(EFOO) of your own. - */ -void report_and_exit(int ret) av_noreturn; - /** * Wraps exit with a program-specific cleanup routine. */ @@ -129,12 +121,6 @@ void init_dynload(void); */ void uninit_opts(void); -/** - * Trivial log callback. - * Only suitable for opt_help and similar since it lacks prefix handling. - */ -void log_callback_help(void* ptr, int level, const char* fmt, va_list vl); - /** * Fallback for options that are not explicitly handled, these will be * parsed through AVOptions. @@ -148,8 +134,6 @@ int opt_timelimit(void *optctx, const char *opt, const char *arg); /** * Parse a string and return its corresponding value as a double. - * Exit from the application if the string cannot be correctly - * parsed or the corresponding value is invalid. * * @param context the context of the value to be set (e.g. the * corresponding command line option name) @@ -159,64 +143,50 @@ int opt_timelimit(void *optctx, const char *opt, const char *arg); * @param min the minimum valid accepted value * @param max the maximum valid accepted value */ -double parse_number_or_die(const char *context, const char *numstr, int type, - double min, double max); - -/** - * Parse a string specifying a time and return its corresponding - * value as a number of microseconds. Exit from the application if - * the string cannot be correctly parsed. - * - * @param context the context of the value to be set (e.g. the - * corresponding command line option name) - * @param timestr the string to be parsed - * @param is_duration a flag which tells how to interpret timestr, if - * not zero timestr is interpreted as a duration, otherwise as a - * date - * - * @see av_parse_time() - */ -int64_t parse_time_or_die(const char *context, const char *timestr, - int is_duration); +int parse_number(const char *context, const char *numstr, int type, double min, + double max, double *dst); typedef struct SpecifierOpt { - char *specifier; /**< stream/chapter/program/... specifier */ + char *specifier; /**< stream/chapter/program/... specifier */ union { uint8_t *str; - int i; - int64_t i64; + int i; + int64_t i64; uint64_t ui64; - float f; - double dbl; + float f; + double dbl; } u; } SpecifierOpt; typedef struct OptionDef { const char *name; int flags; -#define HAS_ARG 0x0001 -#define OPT_BOOL 0x0002 +#define HAS_ARG 0x0001 +#define OPT_BOOL 0x0002 #define OPT_EXPERT 0x0004 #define OPT_STRING 0x0008 -#define OPT_VIDEO 0x0010 -#define OPT_AUDIO 0x0020 -#define OPT_INT 0x0080 -#define OPT_FLOAT 0x0100 +#define OPT_VIDEO 0x0010 +#define OPT_AUDIO 0x0020 +#define OPT_INT 0x0080 +#define OPT_FLOAT 0x0100 #define OPT_SUBTITLE 0x0200 -#define OPT_INT64 0x0400 -#define OPT_EXIT 0x0800 -#define OPT_DATA 0x1000 -#define OPT_PERFILE 0x2000 /* the option is per-file (currently ffmpeg-only). - implied by OPT_OFFSET or OPT_SPEC */ -#define OPT_OFFSET 0x4000 /* option is specified as an offset in a passed optctx */ -#define OPT_SPEC 0x8000 /* option is to be stored in an array of SpecifierOpt. - Implies OPT_OFFSET. Next element after the offset is - an int containing element count in the array. */ -#define OPT_TIME 0x10000 +#define OPT_INT64 0x0400 +#define OPT_EXIT 0x0800 +#define OPT_DATA 0x1000 +#define OPT_PERFILE \ + 0x2000 /* the option is per-file (currently ffmpeg-only). \ + implied by OPT_OFFSET or OPT_SPEC */ +#define OPT_OFFSET \ + 0x4000 /* option is specified as an offset in a passed optctx */ +#define OPT_SPEC \ + 0x8000 /* option is to be stored in an array of SpecifierOpt. \ + Implies OPT_OFFSET. Next element after the offset is \ + an int containing element count in the array. */ +#define OPT_TIME 0x10000 #define OPT_DOUBLE 0x20000 -#define OPT_INPUT 0x40000 +#define OPT_INPUT 0x40000 #define OPT_OUTPUT 0x80000 - union { + union { void *dst_ptr; int (*func_arg)(void *, const char *, const char *); size_t off; @@ -262,13 +232,14 @@ void show_help_default_ffprobe(const char *opt, const char *arg); * argument without a leading option name flag. NULL if such arguments do * not have to be processed. */ -void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, - void (* parse_arg_function)(void *optctx, const char*)); +int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, + int (*parse_arg_function)(void *optctx, const char *)); /** * Parse one given option. * - * @return on success 1 if arg was consumed, 0 otherwise; negative number on error + * @return on success 1 if arg was consumed, 0 otherwise; negative number on + * error */ int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options); @@ -279,9 +250,9 @@ int parse_option(void *optctx, const char *opt, const char *arg, * used multiple times. */ typedef struct Option { - const OptionDef *opt; - const char *key; - const char *val; + const OptionDef *opt; + const char *key; + const char *val; } Option; typedef struct OptionGroupDef { @@ -304,7 +275,7 @@ typedef struct OptionGroup { const char *arg; Option *opts; - int nb_opts; + int nb_opts; AVDictionary *codec_opts; AVDictionary *format_opts; @@ -320,14 +291,14 @@ typedef struct OptionGroupList { const OptionGroupDef *group_def; OptionGroup *groups; - int nb_groups; + int nb_groups; } OptionGroupList; typedef struct OptionParseContext { OptionGroup global_opts; OptionGroupList *groups; - int nb_groups; + int nb_groups; /* parsing state */ OptionGroup cur_group; @@ -360,8 +331,8 @@ int parse_optgroup(void *optctx, OptionGroup *g); * same as the order of group definitions. */ int split_commandline(OptionParseContext *octx, int argc, char *argv[], - const OptionDef *options, - const OptionGroupDef *groups, int nb_groups); + const OptionDef *options, const OptionGroupDef *groups, + int nb_groups); /** * Free all allocated memory in an OptionParseContext. @@ -402,10 +373,12 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec); * @param st A stream from s for which the options should be filtered. * @param codec The particular codec for which the options should be filtered. * If null, the default one is looked up according to the codec id. - * @return a pointer to the created dictionary + * @param dst a pointer to the created dictionary + * @return a non-negative number on success, a negative error code on failure */ -AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, - AVFormatContext *s, AVStream *st, const AVCodec *codec); +int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, + AVFormatContext *s, AVStream *st, const AVCodec *codec, + AVDictionary **dst); /** * Setup AVCodecContext options for avformat_find_stream_info(). @@ -414,12 +387,9 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, * contained in s. * Each dictionary will contain the options from codec_opts which can * be applied to the corresponding stream codec context. - * - * @return pointer to the created array of dictionaries. - * Calls exit() on failure. */ -AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, - AVDictionary *codec_opts); +int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts, + AVDictionary ***dst); /** * Print an error message to stderr, indicating filename and a human @@ -464,53 +434,49 @@ int read_yesno(void); * preset, may be NULL */ FILE *get_preset_file(char *filename, size_t filename_size, - const char *preset_name, int is_path, const char *codec_name); + const char *preset_name, int is_path, + const char *codec_name); /** * Realloc array to hold new_size elements of elem_size. - * Calls exit() on failure. * - * @param array array to reallocate + * @param array pointer to the array to reallocate, will be updated + * with a new pointer on success * @param elem_size size in bytes of each element * @param size new element count will be written here * @param new_size number of elements to place in reallocated array - * @return reallocated array + * @return a non-negative number on success, a negative error code on failure */ -void *grow_array(void *array, int elem_size, int *size, int new_size); +int grow_array(void **array, int elem_size, int *size, int new_size); /** * Atomically add a new element to an array of pointers, i.e. allocate * a new entry, reallocate the array of pointers and make the new last * member of this array point to the newly allocated buffer. - * Calls exit() on failure. * * @param array array of pointers to reallocate * @param elem_size size of the new element to allocate * @param nb_elems pointer to the number of elements of the array array; * *nb_elems will be incremented by one by this function. - * @return pointer to the newly allocated entry + * @return pointer to the newly allocated entry or NULL on failure */ void *allocate_array_elem(void *array, size_t elem_size, int *nb_elems); -#define GROW_ARRAY(array, nb_elems)\ - array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1) - -#define ALLOC_ARRAY_ELEM(array, nb_elems)\ - allocate_array_elem(&array, sizeof(*array[0]), &nb_elems) +#define GROW_ARRAY(array, nb_elems) \ + grow_array((void **)&array, sizeof(*array), &nb_elems, nb_elems + 1) -#define GET_PIX_FMT_NAME(pix_fmt)\ +#define GET_PIX_FMT_NAME(pix_fmt) \ const char *name = av_get_pix_fmt_name(pix_fmt); -#define GET_CODEC_NAME(id)\ - const char *name = avcodec_descriptor_get(id)->name; +#define GET_CODEC_NAME(id) const char *name = avcodec_descriptor_get(id)->name; -#define GET_SAMPLE_FMT_NAME(sample_fmt)\ +#define GET_SAMPLE_FMT_NAME(sample_fmt) \ const char *name = av_get_sample_fmt_name(sample_fmt) -#define GET_SAMPLE_RATE_NAME(rate)\ - char name[16];\ +#define GET_SAMPLE_RATE_NAME(rate) \ + char name[16]; \ snprintf(name, sizeof(name), "%d", rate); -double get_rotation(int32_t *displaymatrix); +double get_rotation(const int32_t *displaymatrix); #endif /* FFTOOLS_CMDUTILS_H */ diff --git a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c index 28aa83a..175f14e 100644 --- a/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c +++ b/android/ffmpeg-kit-android-lib/src/main/cpp/fftools_ffmpeg.c @@ -1,7 +1,7 @@ /* * Copyright (c) 2000-2003 Fabrice Bellard * Copyright (c) 2018-2022 Taner Sener - * Copyright (c) 2023 ARTHENICA LTD + * Copyright (c) 2023-2024 ARTHENICA LTD * * This file is part of FFmpeg. * @@ -26,12 +26,18 @@ */ /* - * This file is the modified version of ffmpeg.c file living in ffmpeg source code under the fftools folder. We - * manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied - * by us to develop mobile-ffmpeg and later ffmpeg-kit libraries. + * This file is the modified version of ffmpeg.c file living in ffmpeg source + * code under the fftools folder. We manually update it each time we depend on a + * new ffmpeg version. Below you can see the list of changes applied by us to + * develop mobile-ffmpeg and later ffmpeg-kit libraries. * * ffmpeg-kit changes by ARTHENICA LTD * + * 11.2024 + * -------------------------------------------------------- + * - FFmpeg 6.1 changes migrated + * - longjmp_value dropped + * * 09.2023 * -------------------------------------------------------- * - forward_report method signature accepts pts to calculate the time @@ -40,9 +46,11 @@ * -------------------------------------------------------- * - FFmpeg 6.0 changes migrated * - cherry-picked commit 7357012bb5205e0d03634aff48fc0167a9248190 - * - vstats_file, received_sigterm and received_nb_signals updated as thread-local + * - vstats_file, received_sigterm and received_nb_signals updated as + * thread-local * - forward_report method signature updated - * - time field in report_callback/forward_report/set_report_callback updated as double + * - time field in report_callback/forward_report/set_report_callback updated as + * double * * mobile-ffmpeg / ffmpeg-kit changes by Taner Sener * @@ -67,22 +75,25 @@ * * 01.2020 * -------------------------------------------------------- - * - ffprobe support (added ffmpeg_ prefix to methods and variables defined for both ffmpeg and ffprobe) + * - ffprobe support (added ffmpeg_ prefix to methods and variables defined for + * both ffmpeg and ffprobe) * * 12.2019 * -------------------------------------------------------- - * - concurrent execution support ("__thread" specifier added to variables used by multiple threads, - * extern signatures of ffmpeg_opt.c methods called by both ffmpeg and ffprobe added, copied options from - * ffmpeg_opt.c and defined them as inline in execute method) + * - concurrent execution support ("__thread" specifier added to variables used + * by multiple threads, extern signatures of ffmpeg_opt.c methods called by both + * ffmpeg and ffprobe added, copied options from ffmpeg_opt.c and defined them + * as inline in execute method) * * 08.2018 * -------------------------------------------------------- * - fftools_ prefix added to file name and parent headers - * - forward_report() method, report_callback function pointer and set_report_callback() setter - * method added to forward stats + * - forward_report() method, report_callback function pointer and + * set_report_callback() setter method added to forward stats * - forward_report() call added from print_report() * - cancel_operation() method added to trigger sigterm_handler - * - (!received_sigterm) validation added inside ifilter_send_eof() to complete cancellation + * - (!received_sigterm) validation added inside ifilter_send_eof() to complete + * cancellation * * 07.2018 * -------------------------------------------------------- @@ -93,17 +104,14 @@ */ #include "config.h" -#include -#include -#include -#include + #include #include #include #include - -#include "ffmpegkit_exception.h" -#include "fftools_opt_common.h" +#include +#include +#include #if HAVE_IO_H #include @@ -112,52 +120,21 @@ #include #endif -#include "libavformat/avformat.h" -#include "libavdevice/avdevice.h" -#include "libswresample/swresample.h" -#include "libavutil/opt.h" -#include "libavutil/channel_layout.h" -#include "libavutil/parseutils.h" -#include "libavutil/samplefmt.h" -#include "libavutil/fifo.h" -#include "libavutil/hwcontext.h" -#include "libavutil/internal.h" -#include "libavutil/intreadwrite.h" -#include "libavutil/dict.h" -#include "libavutil/display.h" -#include "libavutil/mathematics.h" -#include "libavutil/pixdesc.h" -#include "libavutil/avstring.h" -#include "libavutil/libm.h" -#include "libavutil/imgutils.h" -#include "libavutil/timestamp.h" -#include "libavutil/bprint.h" -#include "libavutil/time.h" -#include "libavutil/thread.h" -#include "libavutil/threadmessage.h" -#include "libavcodec/mathops.h" -#include "libavformat/os_support.h" - -# include "libavfilter/avfilter.h" -# include "libavfilter/buffersrc.h" -# include "libavfilter/buffersink.h" - #if HAVE_SYS_RESOURCE_H +#include #include #include -#include #elif HAVE_GETPROCESSTIMES #include #endif #if HAVE_GETPROCESSMEMORYINFO -#include #include +#include #endif #if HAVE_SETCONSOLECTRLHANDLER #include #endif - #if HAVE_SYS_SELECT_H #include #endif @@ -171,111 +148,111 @@ #include #endif -#include +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/bprint.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/display.h" +#include "libavutil/fifo.h" +#include "libavutil/hwcontext.h" +#include "libavutil/imgutils.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/libm.h" +#include "libavutil/mathematics.h" +#include "libavutil/opt.h" +#include "libavutil/parseutils.h" +#include "libavutil/pixdesc.h" +#include "libavutil/samplefmt.h" +#include "libavutil/thread.h" +#include "libavutil/threadmessage.h" +#include "libavutil/time.h" +#include "libavutil/timestamp.h" -#include "fftools_ffmpeg.h" -#include "fftools_cmdutils.h" -#include "fftools_sync_queue.h" +#include "libavcodec/version.h" -#include "libavutil/avassert.h" +#include "libavformat/avformat.h" + +#include "libavdevice/avdevice.h" -static __thread FILE *vstats_file; +#include "libswresample/swresample.h" -// optionally attached as opaque_ref to decoded AVFrames -typedef struct FrameData { - uint64_t idx; - int64_t pts; - AVRational tb; -} FrameData; +#include "ffmpegkit_exception.h" +#include "fftools_cmdutils.h" +#include "fftools_ffmpeg.h" +#include "fftools_opt_common.h" +#include "fftools_sync_queue.h" -typedef struct BenchmarkTimeStamps { - int64_t real_usec; - int64_t user_usec; - int64_t sys_usec; -} BenchmarkTimeStamps; +__thread FILE *vstats_file; -static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt); static BenchmarkTimeStamps get_benchmark_time_stamps(void); static int64_t getmaxrss(void); -static int ifilter_has_all_input_formats(FilterGraph *fg); __thread int64_t nb_frames_dup = 0; -__thread uint64_t dup_warning = 1000; __thread int64_t nb_frames_drop = 0; -__thread int64_t decode_error_stat[2]; __thread unsigned nb_output_dumped = 0; __thread BenchmarkTimeStamps current_time; __thread AVIOContext *progress_avio = NULL; -__thread InputFile **input_files = NULL; -__thread int nb_input_files = 0; +__thread InputFile **input_files = NULL; +__thread int nb_input_files = 0; -__thread OutputFile **output_files = NULL; -__thread int nb_output_files = 0; +__thread OutputFile **output_files = NULL; +__thread int nb_output_files = 0; __thread FilterGraph **filtergraphs; -__thread int nb_filtergraphs; +__thread int nb_filtergraphs; __thread int64_t last_time = -1; __thread int64_t keyboard_last_time = 0; __thread int first_report = 1; -__thread int qp_histogram[52]; -void (*report_callback)(int, float, float, int64_t, double, double, double) = NULL; +void (*report_callback)(int, float, float, int64_t, double, double, + double) = NULL; extern int opt_map(void *optctx, const char *opt, const char *arg); extern int opt_map_channel(void *optctx, const char *opt, const char *arg); -extern int opt_recording_timestamp(void *optctx, const char *opt, const char *arg); +extern int opt_recording_timestamp(void *optctx, const char *opt, + const char *arg); extern int opt_data_frames(void *optctx, const char *opt, const char *arg); extern int opt_progress(void *optctx, const char *opt, const char *arg); extern int opt_target(void *optctx, const char *opt, const char *arg); extern int opt_vsync(void *optctx, const char *opt, const char *arg); +extern int opt_adrift_threshold(void *optctx, const char *opt, const char *arg); extern int opt_abort_on(void *optctx, const char *opt, const char *arg); -extern int opt_stats_period(void *optctx, const char *opt, const char *arg); extern int opt_qscale(void *optctx, const char *opt, const char *arg); extern int opt_profile(void *optctx, const char *opt, const char *arg); +extern int opt_filter_threads(void *optctx, const char *opt, const char *arg); extern int opt_filter_complex(void *optctx, const char *opt, const char *arg); -extern int opt_filter_complex_script(void *optctx, const char *opt, const char *arg); +extern int opt_filter_complex_script(void *optctx, const char *opt, + const char *arg); +extern int opt_stats_period(void *optctx, const char *opt, const char *arg); extern int opt_attach(void *optctx, const char *opt, const char *arg); extern int opt_video_frames(void *optctx, const char *opt, const char *arg); extern int opt_video_codec(void *optctx, const char *opt, const char *arg); -extern int opt_sameq(void *optctx, const char *opt, const char *arg); extern int opt_timecode(void *optctx, const char *opt, const char *arg); -extern int opt_vstats_file(void *optctx, const char *opt, const char *arg); extern int opt_vstats(void *optctx, const char *opt, const char *arg); -extern int opt_video_frames(void *optctx, const char *opt, const char *arg); +extern int opt_vstats_file(void *optctx, const char *opt, const char *arg); +extern int opt_video_filters(void *optctx, const char *opt, const char *arg); extern int opt_old2new(void *optctx, const char *opt, const char *arg); +extern int opt_qphist(void *optctx, const char *opt, const char *arg); extern int opt_streamid(void *optctx, const char *opt, const char *arg); extern int opt_bitrate(void *optctx, const char *opt, const char *arg); extern int show_hwaccels(void *optctx, const char *opt, const char *arg); -extern int opt_video_filters(void *optctx, const char *opt, const char *arg); extern int opt_audio_frames(void *optctx, const char *opt, const char *arg); extern int opt_audio_qscale(void *optctx, const char *opt, const char *arg); extern int opt_audio_codec(void *optctx, const char *opt, const char *arg); -extern int opt_channel_layout(void *optctx, const char *opt, const char *arg); -extern int opt_preset(void *optctx, const char *opt, const char *arg); extern int opt_audio_filters(void *optctx, const char *opt, const char *arg); extern int opt_subtitle_codec(void *optctx, const char *opt, const char *arg); -extern int opt_video_channel(void *optctx, const char *opt, const char *arg); -extern int opt_video_standard(void *optctx, const char *opt, const char *arg); extern int opt_sdp_file(void *optctx, const char *opt, const char *arg); -#if CONFIG_VAAPI -extern int opt_vaapi_device(void *optctx, const char *opt, const char *arg); -#endif -#if CONFIG_QSV -extern int opt_qsv_device(void *optctx, const char *opt, const char *arg); -#endif +extern int opt_preset(void *optctx, const char *opt, const char *arg); extern int opt_data_codec(void *optctx, const char *opt, const char *arg); extern int opt_init_hw_device(void *optctx, const char *opt, const char *arg); extern int opt_filter_hw_device(void *optctx, const char *opt, const char *arg); -extern int opt_filter_threads(void *optctx, const char *opt, const char *arg); + extern __thread int file_overwrite; extern __thread int no_file_overwrite; -extern __thread int do_psnr; -extern __thread int ignore_unknown_streams; -extern __thread int copy_unknown_streams; -extern __thread int recast_media; #if HAVE_TERMIOS_H @@ -299,194 +276,55 @@ extern int cancelRequested(long sessionId); This is a temporary solution until libavfilter gets real subtitles support. */ -static int sub2video_get_blank_frame(InputStream *ist) -{ - int ret; - AVFrame *frame = ist->sub2video.frame; - - av_frame_unref(frame); - ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w; - ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h; - ist->sub2video.frame->format = AV_PIX_FMT_RGB32; - if ((ret = av_frame_get_buffer(frame, 0)) < 0) - return ret; - memset(frame->data[0], 0, frame->height * frame->linesize[0]); - return 0; -} - -static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, - AVSubtitleRect *r) -{ - uint32_t *pal, *dst2; - uint8_t *src, *src2; - int x, y; - - if (r->type != SUBTITLE_BITMAP) { - av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n"); - return; - } - if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) { - av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n", - r->x, r->y, r->w, r->h, w, h - ); - return; - } - - dst += r->y * dst_linesize + r->x * 4; - src = r->data[0]; - pal = (uint32_t *)r->data[1]; - for (y = 0; y < r->h; y++) { - dst2 = (uint32_t *)dst; - src2 = src; - for (x = 0; x < r->w; x++) - *(dst2++) = pal[*(src2++)]; - dst += dst_linesize; - src += r->linesize[0]; - } -} - -static void sub2video_push_ref(InputStream *ist, int64_t pts) -{ - AVFrame *frame = ist->sub2video.frame; - int i; - int ret; - - av_assert1(frame->data[0]); - ist->sub2video.last_pts = frame->pts = pts; - for (i = 0; i < ist->nb_filters; i++) { - ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame, - AV_BUFFERSRC_FLAG_KEEP_REF | - AV_BUFFERSRC_FLAG_PUSH); - if (ret != AVERROR_EOF && ret < 0) - av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n", - av_err2str(ret)); - } -} - -void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) -{ - AVFrame *frame = ist->sub2video.frame; - int8_t *dst; - int dst_linesize; - int num_rects, i; - int64_t pts, end_pts; - - if (!frame) - return; - if (sub) { - pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL, - AV_TIME_BASE_Q, ist->st->time_base); - end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL, - AV_TIME_BASE_Q, ist->st->time_base); - num_rects = sub->num_rects; - } else { - /* If we are initializing the system, utilize current heartbeat - PTS as the start time, and show until the following subpicture - is received. Otherwise, utilize the previous subpicture's end time - as the fall-back value. */ - pts = ist->sub2video.initialize ? - heartbeat_pts : ist->sub2video.end_pts; - end_pts = INT64_MAX; - num_rects = 0; - } - if (sub2video_get_blank_frame(ist) < 0) { - av_log(NULL, AV_LOG_ERROR, - "Impossible to get a blank canvas.\n"); - return; - } - dst = frame->data [0]; - dst_linesize = frame->linesize[0]; - for (i = 0; i < num_rects; i++) - sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); - sub2video_push_ref(ist, pts); - ist->sub2video.end_pts = end_pts; - ist->sub2video.initialize = 0; -} - -static void sub2video_heartbeat(InputStream *ist, int64_t pts) -{ - InputFile *infile = input_files[ist->file_index]; - int i, j, nb_reqs; - int64_t pts2; - +static void sub2video_heartbeat(InputFile *infile, int64_t pts, AVRational tb) { /* When a frame is read from a file, examine all sub2video streams in the same file and send the sub2video frame again. Otherwise, decoded video frames could be accumulating in the filter graph while a filter (possibly overlay) is desperately waiting for a subtitle frame. */ - for (i = 0; i < infile->nb_streams; i++) { - InputStream *ist2 = infile->streams[i]; - if (!ist2->sub2video.frame) - continue; - /* subtitles seem to be usually muxed ahead of other streams; - if not, subtracting a larger time here is necessary */ - pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1; - /* do not send the heartbeat frame if the subtitle is already ahead */ - if (pts2 <= ist2->sub2video.last_pts) - continue; - if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) - /* if we have hit the end of the current displayed subpicture, - or if we need to initialize the system, update the - overlayed subpicture and its start/end times */ - sub2video_update(ist2, pts2 + 1, NULL); - for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) - nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); - if (nb_reqs) - sub2video_push_ref(ist2, pts2); - } -} + for (int i = 0; i < infile->nb_streams; i++) { + InputStream *ist = infile->streams[i]; -static void sub2video_flush(InputStream *ist) -{ - int i; - int ret; + if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) + continue; - if (ist->sub2video.end_pts < INT64_MAX) - sub2video_update(ist, INT64_MAX, NULL); - for (i = 0; i < ist->nb_filters; i++) { - ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); - if (ret != AVERROR_EOF && ret < 0) - av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n"); + for (int j = 0; j < ist->nb_filters; j++) + ifilter_sub2video_heartbeat(ist->filters[j], pts, tb); } } /* end of sub2video hack */ -static void term_exit_sigsafe(void) -{ +static void term_exit_sigsafe(void) { #if HAVE_TERMIOS_H - if(restore_tty) - tcsetattr (0, TCSANOW, &oldtty); + if (restore_tty) + tcsetattr(0, TCSANOW, &oldtty); #endif } -void term_exit(void) -{ +void term_exit(void) { av_log(NULL, AV_LOG_QUIET, "%s", ""); term_exit_sigsafe(); } -static volatile int received_sigterm = 0; -static volatile int received_nb_signals = 0; +__thread volatile int received_sigterm = 0; +__thread volatile int received_nb_signals = 0; __thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0); -__thread int ffmpeg_exited = 0; +__thread volatile int ffmpeg_exited = 0; __thread int main_ffmpeg_return_code = 0; __thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE; -extern __thread int longjmp_value; extern __thread int want_sdp; -struct EncStatsFile; extern __thread struct EncStatsFile *enc_stats_files; extern __thread int nb_enc_stats_files; -static void -sigterm_handler(int sig) -{ +static void sigterm_handler(int sig) { // int ret; received_sigterm = sig; received_nb_signals++; term_exit_sigsafe(); // FFmpegKit - Hard Exit Disabled // if(received_nb_signals > 3) { - // ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n", + // ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard + // exiting\n", // strlen("Received > 3 system signals, hard exiting\n")); // if (ret < 0) { /* Do nothing */ }; // exit(123); @@ -494,12 +332,10 @@ sigterm_handler(int sig) } #if HAVE_SETCONSOLECTRLHANDLER -static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) -{ +static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) { av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType); - switch (fdwCtrlType) - { + switch (fdwCtrlType) { case CTRL_C_EVENT: case CTRL_BREAK_EVENT: sigterm_handler(SIGINT); @@ -519,31 +355,30 @@ static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) return TRUE; default: - av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType); + av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", + fdwCtrlType); return FALSE; } } #endif #ifdef __linux__ -#define SIGNAL(sig, func) \ - do { \ - action.sa_handler = func; \ - sigaction(sig, &action, NULL); \ +#define SIGNAL(sig, func) \ + do { \ + action.sa_handler = func; \ + sigaction(sig, &action, NULL); \ } while (0) #else -#define SIGNAL(sig, func) \ - signal(sig, func) +#define SIGNAL(sig, func) signal(sig, func) #endif -void term_init(void) -{ +void term_init(void) { #if defined __linux__ - #if defined __aarch64__ || defined __amd64__ || defined __x86_64__ - struct sigaction action = {0}; - #else - struct sigaction action = {{0}}; - #endif +#if defined __aarch64__ || defined __amd64__ || defined __x86_64__ + struct sigaction action = {0}; +#else + struct sigaction action = {{0}}; +#endif action.sa_handler = sigterm_handler; @@ -557,20 +392,20 @@ void term_init(void) #if HAVE_TERMIOS_H if (stdin_interaction) { struct termios tty; - if (tcgetattr (0, &tty) == 0) { + if (tcgetattr(0, &tty) == 0) { oldtty = tty; restore_tty = 1; - tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP - |INLCR|IGNCR|ICRNL|IXON); + tty.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | + ICRNL | IXON); tty.c_oflag |= OPOST; - tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN); - tty.c_cflag &= ~(CSIZE|PARENB); + tty.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN); + tty.c_cflag &= ~(CSIZE | PARENB); tty.c_cflag |= CS8; tty.c_cc[VMIN] = 1; tty.c_cc[VTIME] = 0; - tcsetattr (0, TCSANOW, &tty); + tcsetattr(0, TCSANOW, &tty); } if (handleSIGQUIT == 1) { SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */ @@ -579,7 +414,7 @@ void term_init(void) #endif if (handleSIGINT == 1) { - SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ + SIGNAL(SIGINT, sigterm_handler); /* Interrupt (ANSI). */ } if (handleSIGTERM == 1) { SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */ @@ -591,17 +426,16 @@ void term_init(void) #endif #ifdef SIGPIPE if (handleSIGPIPE == 1) { - SIGNAL(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */ + signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */ } #endif #if HAVE_SETCONSOLECTRLHANDLER - SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE); + SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE); #endif } /* read a key without blocking */ -static int read_key(void) -{ +static int read_key(void) { unsigned char ch; #if HAVE_TERMIOS_H int n = 1; @@ -621,11 +455,11 @@ static int read_key(void) return n; } #elif HAVE_KBHIT -# if HAVE_PEEKNAMEDPIPE +#if HAVE_PEEKNAMEDPIPE && HAVE_GETSTDHANDLE static int is_pipe; static HANDLE input_handle; DWORD dw, nchars; - if(!input_handle){ + if (!input_handle) { input_handle = GetStdHandle(STD_INPUT_HANDLE); is_pipe = !GetConsoleMode(input_handle, &dw); } @@ -636,95 +470,60 @@ static int read_key(void) // input pipe may have been closed by the program that ran ffmpeg return -1; } - //Read it - if(nchars != 0) { - read(0, &ch, 1); - return ch; - }else{ + // Read it + if (nchars != 0) { + if (read(0, &ch, 1) == 1) + return ch; + return 0; + } else { return -1; } } -# endif - if(kbhit()) - return(getch()); +#endif + if (kbhit()) + return (getch()); #endif return -1; } int decode_interrupt_cb(void *ctx); -int decode_interrupt_cb(void *ctx) -{ +int decode_interrupt_cb(void *ctx) { return received_nb_signals > atomic_load(&transcode_init_done); } -__thread const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; +__thread const AVIOInterruptCB int_cb = {decode_interrupt_cb, NULL}; -static void ffmpeg_cleanup(int ret) -{ - int i, j; +static void ffmpeg_cleanup(int ret) { + int i; if (do_benchmark) { int maxrss = getmaxrss() / 1024; av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss); } - for (i = 0; i < nb_filtergraphs; i++) { - FilterGraph *fg = filtergraphs[i]; - avfilter_graph_free(&fg->graph); - for (j = 0; j < fg->nb_inputs; j++) { - InputFilter *ifilter = fg->inputs[j]; - struct InputStream *ist = ifilter->ist; - - if (ifilter->frame_queue) { - AVFrame *frame; - while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0) - av_frame_free(&frame); - av_fifo_freep2(&ifilter->frame_queue); - } - av_freep(&ifilter->displaymatrix); - if (ist->sub2video.sub_queue) { - AVSubtitle sub; - while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0) - avsubtitle_free(&sub); - av_fifo_freep2(&ist->sub2video.sub_queue); - } - av_buffer_unref(&ifilter->hw_frames_ctx); - av_freep(&ifilter->name); - av_freep(&fg->inputs[j]); - } - av_freep(&fg->inputs); - for (j = 0; j < fg->nb_outputs; j++) { - OutputFilter *ofilter = fg->outputs[j]; - - avfilter_inout_free(&ofilter->out_tmp); - av_freep(&ofilter->name); - av_channel_layout_uninit(&ofilter->ch_layout); - av_freep(&fg->outputs[j]); - } - av_freep(&fg->outputs); - av_freep(&fg->graph_desc); - - av_freep(&filtergraphs[i]); - } + for (i = 0; i < nb_filtergraphs; i++) + fg_free(&filtergraphs[i]); av_freep(&filtergraphs); - /* close files */ for (i = 0; i < nb_output_files; i++) - of_close(&output_files[i]); + of_free(&output_files[i]); for (i = 0; i < nb_input_files; i++) ifile_close(&input_files[i]); if (vstats_file) { if (fclose(vstats_file)) - av_log(NULL, AV_LOG_ERROR, - "Error closing vstats file, loss of information possible: %s\n", - av_err2str(AVERROR(errno))); + av_log( + NULL, AV_LOG_ERROR, + "Error closing vstats file, loss of information possible: %s\n", + av_err2str(AVERROR(errno))); } av_freep(&vstats_filename); of_enc_stats_close(); + hw_device_free_all(); + av_freep(&filter_nbthreads); av_freep(&input_files); @@ -736,9 +535,10 @@ static void ffmpeg_cleanup(int ret) if (received_sigterm) { av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n", - (int) received_sigterm); + (int)received_sigterm); } else if (cancelRequested(globalSessionId)) { - av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n"); + av_log(NULL, AV_LOG_INFO, + "Exiting normally, received cancel request.\n"); } else if (ret && atomic_load(&transcode_init_done)) { av_log(NULL, AV_LOG_INFO, "Conversion failed!\n"); } @@ -746,12 +546,9 @@ static void ffmpeg_cleanup(int ret) ffmpeg_exited = 1; } -/* iterate over all output streams in all output files; - * pass NULL to start iteration */ -static OutputStream *ost_iter(OutputStream *prev) -{ - int of_idx = prev ? prev->file_index : 0; - int ost_idx = prev ? prev->index + 1 : 0; +OutputStream *ost_iter(OutputStream *prev) { + int of_idx = prev ? prev->file_index : 0; + int ost_idx = prev ? prev->index + 1 : 0; for (; of_idx < nb_output_files; of_idx++) { OutputFile *of = output_files[of_idx]; @@ -764,10 +561,9 @@ static OutputStream *ost_iter(OutputStream *prev) return NULL; } -InputStream *ist_iter(InputStream *prev) -{ - int if_idx = prev ? prev->file_index : 0; - int ist_idx = prev ? prev->st->index + 1 : 0; +InputStream *ist_iter(InputStream *prev) { + int if_idx = prev ? prev->file_index : 0; + int ist_idx = prev ? prev->index + 1 : 0; for (; if_idx < nb_input_files; if_idx++) { InputFile *f = input_files[if_idx]; @@ -780,8 +576,23 @@ InputStream *ist_iter(InputStream *prev) return NULL; } -void remove_avoptions(AVDictionary **a, AVDictionary *b) -{ +FrameData *frame_data(AVFrame *frame) { + if (!frame->opaque_ref) { + FrameData *fd; + + frame->opaque_ref = av_buffer_allocz(sizeof(*fd)); + if (!frame->opaque_ref) + return NULL; + fd = (FrameData *)frame->opaque_ref->data; + + fd->dec.frame_num = UINT64_MAX; + fd->dec.pts = AV_NOPTS_VALUE; + } + + return (FrameData *)frame->opaque_ref->data; +} + +void remove_avoptions(AVDictionary **a, AVDictionary *b) { const AVDictionaryEntry *t = NULL; while ((t = av_dict_iterate(b, t))) { @@ -789,22 +600,17 @@ void remove_avoptions(AVDictionary **a, AVDictionary *b) } } -void assert_avoptions(AVDictionary *m) -{ +int check_avoptions(AVDictionary *m) { const AVDictionaryEntry *t; if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); - exit_program(1); + return AVERROR_OPTION_NOT_FOUND; } -} -static void abort_codec_experimental(const AVCodec *c, int encoder) -{ - exit_program(1); + return 0; } -static void update_benchmark(const char *fmt, ...) -{ +void update_benchmark(const char *fmt, ...) { if (do_benchmark_all) { BenchmarkTimeStamps t = get_benchmark_time_stamps(); va_list va; @@ -815,7 +621,8 @@ static void update_benchmark(const char *fmt, ...) vsnprintf(buf, sizeof(buf), fmt, va); va_end(va); av_log(NULL, AV_LOG_INFO, - "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n", + "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 + " real %s \n", t.user_usec - current_time.user_usec, t.sys_usec - current_time.sys_usec, t.real_usec - current_time.real_usec, buf); @@ -824,8 +631,7 @@ static void update_benchmark(const char *fmt, ...) } } -static void close_output_stream(OutputStream *ost) -{ +void close_output_stream(OutputStream *ost) { OutputFile *of = output_files[ost->file_index]; ost->finished |= ENCODER_FINISHED; @@ -833,1086 +639,160 @@ static void close_output_stream(OutputStream *ost) sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL)); } -static int check_recording_time(OutputStream *ost, int64_t ts, AVRational tb) -{ - OutputFile *of = output_files[ost->file_index]; - - if (of->recording_time != INT64_MAX && - av_compare_ts(ts, tb, of->recording_time, AV_TIME_BASE_Q) >= 0) { - close_output_stream(ost); - return 0; - } - return 1; -} - -static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, - AVFrame *frame) -{ - double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision - const int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? - 0 : of->start_time; - - AVCodecContext *const enc = ost->enc_ctx; - - AVRational tb = enc->time_base; - AVRational filter_tb = frame->time_base; - const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16); - - if (frame->pts == AV_NOPTS_VALUE) - goto early_exit; - - tb.den <<= extra_bits; - float_pts = av_rescale_q(frame->pts, filter_tb, tb) - - av_rescale_q(start_time, AV_TIME_BASE_Q, tb); - float_pts /= 1 << extra_bits; - // avoid exact midoints to reduce the chance of rounding differences, this - // can be removed in case the fps code is changed to work with integers - float_pts += FFSIGN(float_pts) * 1.0 / (1<<17); - - frame->pts = av_rescale_q(frame->pts, filter_tb, enc->time_base) - - av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); - frame->time_base = enc->time_base; - -early_exit: - - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n", - frame ? av_ts2str(frame->pts) : "NULL", - (enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL", - float_pts, - enc ? enc->time_base.num : -1, - enc ? enc->time_base.den : -1); +static void forward_report(uint64_t frame_number, float fps, float quality, + int64_t total_size, int64_t pts, double bitrate, + double speed) { + // FORWARD DATA + if (report_callback != NULL) { + double milliseconds = 0; + if (pts != AV_NOPTS_VALUE) { + milliseconds = ((double)FFABS64U(pts)) / 1000; + } + if (pts < 0) { + report_callback(frame_number, fps, quality, total_size, + 0 - milliseconds, bitrate, speed); + } else { + report_callback(frame_number, fps, quality, total_size, + milliseconds, bitrate, speed); + } } - - return float_pts; } -static int init_output_stream(OutputStream *ost, AVFrame *frame, - char *error, int error_len); - -static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, - unsigned int fatal) -{ - int ret = AVERROR_BUG; - char error[1024] = {0}; +void print_report(int is_last_report, int64_t timer_start, int64_t cur_time) { + AVBPrint buf, buf_script; + int64_t total_size = of_filesize(output_files[0]); + int vid; + double bitrate; + double speed; + int64_t pts = AV_NOPTS_VALUE; + int mins, secs, us; + int64_t hours; + const char *hours_sign; + int ret; + float t; - if (ost->initialized) - return 0; + // FFmpegKit field declarations + int local_print_stats = 1; + uint64_t frame_number = 0; + float fps = 0; + float q = 0; - ret = init_output_stream(ost, frame, error, sizeof(error)); - if (ret < 0) { - av_log(ost, AV_LOG_ERROR, "Error initializing output stream: %s\n", - error); + if (!print_stats && !is_last_report && !progress_avio) + local_print_stats = 0; - if (fatal) - exit_program(1); + if (!is_last_report) { + if (last_time == -1) { + last_time = cur_time; + } + if (((cur_time - last_time) < stats_period && !first_report) || + (first_report && nb_output_dumped < nb_output_files)) + return; + last_time = cur_time; } - return ret; -} - -static double psnr(double d) -{ - return -10.0 * log10(d); -} - -static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats) -{ - const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, - NULL); - AVCodecContext *enc = ost->enc_ctx; - int64_t frame_number; - double ti1, bitrate, avg_bitrate; - - ost->quality = sd ? AV_RL32(sd) : -1; - ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE; + t = (cur_time - timer_start) / 1000000.0; - for (int i = 0; ierror); i++) { - if (sd && i < sd[5]) - ost->error[i] = AV_RL64(sd + 8 + 8*i); - else - ost->error[i] = -1; + vid = 0; + if (local_print_stats) { + av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC); + av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC); } + for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { + q = ost->enc ? ost->quality / (float)FF_QP2LAMBDA : -1; - if (!write_vstats) - return; - - /* this is executed just the first time update_video_stats is called */ - if (!vstats_file) { - vstats_file = fopen(vstats_filename, "w"); - if (!vstats_file) { - perror("fopen"); - exit_program(1); + if (local_print_stats && vid && ost->type == AVMEDIA_TYPE_VIDEO) { + av_bprintf(&buf, "q=%2.1f ", q); + av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", ost->file_index, + ost->index, q); } - } - - frame_number = ost->packets_encoded; - if (vstats_version <= 1) { - fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number, - ost->quality / (float)FF_QP2LAMBDA); - } else { - fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number, - ost->quality / (float)FF_QP2LAMBDA); - } - - if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR)) - fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0))); - - fprintf(vstats_file,"f_size= %6d ", pkt->size); - /* compute pts value */ - ti1 = pkt->dts * av_q2d(pkt->time_base); - if (ti1 < 0.01) - ti1 = 0.01; - - bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0; - avg_bitrate = (double)(ost->data_size_enc * 8) / ti1 / 1000.0; - fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", - (double)ost->data_size_enc / 1024, ti1, bitrate, avg_bitrate); - fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type)); -} - -void enc_stats_write(OutputStream *ost, EncStats *es, - const AVFrame *frame, const AVPacket *pkt, - uint64_t frame_num) -{ - AVIOContext *io = es->io; - AVRational tb = frame ? frame->time_base : pkt->time_base; - int64_t pts = frame ? frame->pts : pkt->pts; - - AVRational tbi = (AVRational){ 0, 1}; - int64_t ptsi = INT64_MAX; + if (!vid && ost->type == AVMEDIA_TYPE_VIDEO && ost->filter) { + frame_number = atomic_load(&ost->packets_written); - const FrameData *fd; + fps = t > 1 ? frame_number / t : 0; + if (local_print_stats) { + av_bprintf(&buf, "frame=%5" PRId64 " fps=%3.*f q=%3.1f ", + frame_number, fps < 9.95, fps, q); + av_bprintf(&buf_script, "frame=%" PRId64 "\n", frame_number); + av_bprintf(&buf_script, "fps=%.2f\n", fps); + av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", + ost->file_index, ost->index, q); + if (is_last_report) + av_bprintf(&buf, "L"); + } - if ((frame && frame->opaque_ref) || (pkt && pkt->opaque_ref)) { - fd = (const FrameData*)(frame ? frame->opaque_ref->data : pkt->opaque_ref->data); - tbi = fd->tb; - ptsi = fd->pts; - } + nb_frames_dup = ost->filter->nb_frames_dup; + nb_frames_drop = ost->filter->nb_frames_drop; - for (size_t i = 0; i < es->nb_components; i++) { - const EncStatsComponent *c = &es->components[i]; - - switch (c->type) { - case ENC_STATS_LITERAL: avio_write (io, c->str, c->str_len); continue; - case ENC_STATS_FILE_IDX: avio_printf(io, "%d", ost->file_index); continue; - case ENC_STATS_STREAM_IDX: avio_printf(io, "%d", ost->index); continue; - case ENC_STATS_TIMEBASE: avio_printf(io, "%d/%d", tb.num, tb.den); continue; - case ENC_STATS_TIMEBASE_IN: avio_printf(io, "%d/%d", tbi.num, tbi.den); continue; - case ENC_STATS_PTS: avio_printf(io, "%"PRId64, pts); continue; - case ENC_STATS_PTS_IN: avio_printf(io, "%"PRId64, ptsi); continue; - case ENC_STATS_PTS_TIME: avio_printf(io, "%g", pts * av_q2d(tb)); continue; - case ENC_STATS_PTS_TIME_IN: avio_printf(io, "%g", ptsi == INT64_MAX ? - INFINITY : ptsi * av_q2d(tbi)); continue; - case ENC_STATS_FRAME_NUM: avio_printf(io, "%"PRIu64, frame_num); continue; - case ENC_STATS_FRAME_NUM_IN: avio_printf(io, "%"PRIu64, fd ? fd->idx : -1); continue; + vid = 1; } - - if (frame) { - switch (c->type) { - case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, ost->samples_encoded); continue; - case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue; - default: av_assert0(0); - } - } else { - switch (c->type) { - case ENC_STATS_DTS: avio_printf(io, "%"PRId64, pkt->dts); continue; - case ENC_STATS_DTS_TIME: avio_printf(io, "%g", pkt->dts * av_q2d(tb)); continue; - case ENC_STATS_PKT_SIZE: avio_printf(io, "%d", pkt->size); continue; - case ENC_STATS_BITRATE: { - double duration = FFMAX(pkt->duration, 1) * av_q2d(tb); - avio_printf(io, "%g", 8.0 * pkt->size / duration); - continue; - } - case ENC_STATS_AVG_BITRATE: { - double duration = pkt->dts * av_q2d(tb); - avio_printf(io, "%g", duration > 0 ? 8.0 * ost->data_size_enc / duration : -1.); - continue; - } - default: av_assert0(0); + /* compute min output value */ + if (ost->last_mux_dts != AV_NOPTS_VALUE) { + if (pts == AV_NOPTS_VALUE || ost->last_mux_dts > pts) + pts = ost->last_mux_dts; + if (copy_ts) { + if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1) + copy_ts_first_pts = pts; + if (copy_ts_first_pts != AV_NOPTS_VALUE) + pts -= copy_ts_first_pts; } } } - avio_w8(io, '\n'); - avio_flush(io); -} -static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame) -{ - AVCodecContext *enc = ost->enc_ctx; - AVPacket *pkt = ost->pkt; - const char *type_desc = av_get_media_type_string(enc->codec_type); - const char *action = frame ? "encode" : "flush"; - int ret; + us = FFABS64U(pts) % AV_TIME_BASE; + secs = FFABS64U(pts) / AV_TIME_BASE % 60; + mins = FFABS64U(pts) / AV_TIME_BASE / 60 % 60; + hours = FFABS64U(pts) / AV_TIME_BASE / 3600; + hours_sign = (pts < 0) ? "-" : ""; - if (frame) { - if (ost->enc_stats_pre.io) - enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL, - ost->frames_encoded); + bitrate = pts != AV_NOPTS_VALUE && pts && total_size >= 0 + ? total_size * 8 / (pts / 1000.0) + : -1; + speed = + pts != AV_NOPTS_VALUE && t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1; - ost->frames_encoded++; - ost->samples_encoded += frame->nb_samples; + // FFmpegKit forward report + forward_report(frame_number, fps, q, total_size, pts, bitrate, speed); - if (debug_ts) { - av_log(ost, AV_LOG_INFO, "encoder <- type:%s " - "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n", - type_desc, - av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base), - enc->time_base.num, enc->time_base.den); - } - } - - update_benchmark(NULL); - - ret = avcodec_send_frame(enc, frame); - if (ret < 0 && !(ret == AVERROR_EOF && !frame)) { - av_log(ost, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n", - type_desc); - return ret; - } - - while (1) { - ret = avcodec_receive_packet(enc, pkt); - update_benchmark("%s_%s %d.%d", action, type_desc, - ost->file_index, ost->index); - - pkt->time_base = enc->time_base; - - /* if two pass, output log on success and EOF */ - if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out) - fprintf(ost->logfile, "%s", enc->stats_out); - - if (ret == AVERROR(EAGAIN)) { - av_assert0(frame); // should never happen during flushing - return 0; - } else if (ret == AVERROR_EOF) { - of_output_packet(of, pkt, ost, 1); - return ret; - } else if (ret < 0) { - av_log(ost, AV_LOG_ERROR, "%s encoding failed\n", type_desc); - return ret; - } - - if (enc->codec_type == AVMEDIA_TYPE_VIDEO) - update_video_stats(ost, pkt, !!vstats_filename); - if (ost->enc_stats_post.io) - enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt, - ost->packets_encoded); - - if (debug_ts) { - av_log(ost, AV_LOG_INFO, "encoder -> type:%s " - "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s " - "duration:%s duration_time:%s\n", - type_desc, - av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base), - av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base), - av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base)); - } - - av_packet_rescale_ts(pkt, pkt->time_base, ost->mux_timebase); - pkt->time_base = ost->mux_timebase; - - if (debug_ts) { - av_log(ost, AV_LOG_INFO, "encoder -> type:%s " - "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s " - "duration:%s duration_time:%s\n", - type_desc, - av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base), - av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base), - av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base)); - } - - if ((ret = trigger_fix_sub_duration_heartbeat(ost, pkt)) < 0) { - av_log(NULL, AV_LOG_ERROR, - "Subtitle heartbeat logic failed in %s! (%s)\n", - __func__, av_err2str(ret)); - exit_program(1); - } - - ost->data_size_enc += pkt->size; - - ost->packets_encoded++; - - of_output_packet(of, pkt, ost, 0); - } - - av_assert0(0); -} - -static int submit_encode_frame(OutputFile *of, OutputStream *ost, - AVFrame *frame) -{ - int ret; - - if (ost->sq_idx_encode < 0) - return encode_frame(of, ost, frame); - - if (frame) { - ret = av_frame_ref(ost->sq_frame, frame); - if (ret < 0) - return ret; - frame = ost->sq_frame; - } - - ret = sq_send(of->sq_encode, ost->sq_idx_encode, - SQFRAME(frame)); - if (ret < 0) { - if (frame) - av_frame_unref(frame); - if (ret != AVERROR_EOF) - return ret; - } - - while (1) { - AVFrame *enc_frame = ost->sq_frame; - - ret = sq_receive(of->sq_encode, ost->sq_idx_encode, - SQFRAME(enc_frame)); - if (ret == AVERROR_EOF) { - enc_frame = NULL; - } else if (ret < 0) { - return (ret == AVERROR(EAGAIN)) ? 0 : ret; - } - - ret = encode_frame(of, ost, enc_frame); - if (enc_frame) - av_frame_unref(enc_frame); - if (ret < 0) { - if (ret == AVERROR_EOF) - close_output_stream(ost); - return ret; - } - } -} - -static void do_audio_out(OutputFile *of, OutputStream *ost, - AVFrame *frame) -{ - AVCodecContext *enc = ost->enc_ctx; - int ret; - - if (frame->pts == AV_NOPTS_VALUE) - frame->pts = ost->next_pts; - else { - int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; - frame->pts = - av_rescale_q(frame->pts, frame->time_base, enc->time_base) - - av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base); - } - frame->time_base = enc->time_base; - - if (!check_recording_time(ost, frame->pts, frame->time_base)) - return; - - ost->next_pts = frame->pts + frame->nb_samples; - - ret = submit_encode_frame(of, ost, frame); - if (ret < 0 && ret != AVERROR_EOF) - exit_program(1); -} - -static void do_subtitle_out(OutputFile *of, - OutputStream *ost, - AVSubtitle *sub) -{ - int subtitle_out_max_size = 1024 * 1024; - int subtitle_out_size, nb, i, ret; - AVCodecContext *enc; - AVPacket *pkt = ost->pkt; - int64_t pts; - - if (sub->pts == AV_NOPTS_VALUE) { - av_log(ost, AV_LOG_ERROR, "Subtitle packets must have a pts\n"); - if (exit_on_error) - exit_program(1); - return; - } - - enc = ost->enc_ctx; - - /* Note: DVB subtitle need one packet to draw them and one other - packet to clear them */ - /* XXX: signal it in the codec context ? */ - if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) - nb = 2; - else - nb = 1; - - /* shift timestamp to honor -ss and make check_recording_time() work with -t */ - pts = sub->pts; - if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE) - pts -= output_files[ost->file_index]->start_time; - for (i = 0; i < nb; i++) { - unsigned save_num_rects = sub->num_rects; - - if (!check_recording_time(ost, pts, AV_TIME_BASE_Q)) - return; - - ret = av_new_packet(pkt, subtitle_out_max_size); - if (ret < 0) - report_and_exit(AVERROR(ENOMEM)); - - sub->pts = pts; - // start_display_time is required to be 0 - sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q); - sub->end_display_time -= sub->start_display_time; - sub->start_display_time = 0; - if (i == 1) - sub->num_rects = 0; - - ost->frames_encoded++; - - subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub); - if (i == 1) - sub->num_rects = save_num_rects; - if (subtitle_out_size < 0) { - av_log(ost, AV_LOG_FATAL, "Subtitle encoding failed\n"); - exit_program(1); - } - - av_shrink_packet(pkt, subtitle_out_size); - pkt->time_base = ost->mux_timebase; - pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, pkt->time_base); - pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base); - if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { - /* XXX: the pts correction is handled here. Maybe handling - it in the codec would be better */ - if (i == 0) - pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, pkt->time_base); - else - pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base); - } - pkt->dts = pkt->pts; - - of_output_packet(of, pkt, ost, 0); - } -} - -/* Convert frame timestamps to the encoder timebase and decide how many times - * should this (and possibly previous) frame be repeated in order to conform to - * desired target framerate (if any). - */ -static void video_sync_process(OutputFile *of, OutputStream *ost, - AVFrame *next_picture, double duration, - int64_t *nb_frames, int64_t *nb_frames_prev) -{ - double delta0, delta; - - double sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture); - /* delta0 is the "drift" between the input frame (next_picture) and - * where it would fall in the output. */ - delta0 = sync_ipts - ost->next_pts; - delta = delta0 + duration; - - // tracks the number of times the PREVIOUS frame should be duplicated, - // mostly for variable framerate (VFR) - *nb_frames_prev = 0; - /* by default, we output a single frame */ - *nb_frames = 1; - - if (delta0 < 0 && - delta > 0 && - ost->vsync_method != VSYNC_PASSTHROUGH && - ost->vsync_method != VSYNC_DROP) { - if (delta0 < -0.6) { - av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0); - } else - av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0); - sync_ipts = ost->next_pts; - duration += delta0; - delta0 = 0; - } - - switch (ost->vsync_method) { - case VSYNC_VSCFR: - if (ost->vsync_frame_number == 0 && delta0 >= 0.5) { - av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0)); - delta = duration; - delta0 = 0; - ost->next_pts = llrint(sync_ipts); - } - case VSYNC_CFR: - // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c - if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) { - *nb_frames = 0; - } else if (delta < -1.1) - *nb_frames = 0; - else if (delta > 1.1) { - *nb_frames = llrintf(delta); - if (delta0 > 1.1) - *nb_frames_prev = llrintf(delta0 - 0.6); - } - next_picture->duration = 1; - break; - case VSYNC_VFR: - if (delta <= -0.6) - *nb_frames = 0; - else if (delta > 0.6) - ost->next_pts = llrint(sync_ipts); - next_picture->duration = duration; - break; - case VSYNC_DROP: - case VSYNC_PASSTHROUGH: - next_picture->duration = duration; - ost->next_pts = llrint(sync_ipts); - break; - default: - av_assert0(0); - } -} - -enum AVPictureType forced_kf_apply(void *logctx, KeyframeForceCtx *kf, - AVRational tb, const AVFrame *in_picture, - int dup_idx) -{ - double pts_time; - - if (kf->ref_pts == AV_NOPTS_VALUE) - kf->ref_pts = in_picture->pts; - - pts_time = (in_picture->pts - kf->ref_pts) * av_q2d(tb); - if (kf->index < kf->nb_pts && - av_compare_ts(in_picture->pts, tb, kf->pts[kf->index], AV_TIME_BASE_Q) >= 0) { - kf->index++; - goto force_keyframe; - } else if (kf->pexpr) { - double res; - kf->expr_const_values[FKF_T] = pts_time; - res = av_expr_eval(kf->pexpr, - kf->expr_const_values, NULL); - ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n", - kf->expr_const_values[FKF_N], - kf->expr_const_values[FKF_N_FORCED], - kf->expr_const_values[FKF_PREV_FORCED_N], - kf->expr_const_values[FKF_T], - kf->expr_const_values[FKF_PREV_FORCED_T], - res); - - kf->expr_const_values[FKF_N] += 1; - - if (res) { - kf->expr_const_values[FKF_PREV_FORCED_N] = kf->expr_const_values[FKF_N] - 1; - kf->expr_const_values[FKF_PREV_FORCED_T] = kf->expr_const_values[FKF_T]; - kf->expr_const_values[FKF_N_FORCED] += 1; - goto force_keyframe; - } - } else if (kf->type == KF_FORCE_SOURCE && - in_picture->key_frame == 1 && !dup_idx) { - goto force_keyframe; - } else if (kf->type == KF_FORCE_SOURCE_NO_DROP && !dup_idx) { - kf->dropped_keyframe = 0; - if ((in_picture->key_frame == 1) || kf->dropped_keyframe) - goto force_keyframe; - } - - return AV_PICTURE_TYPE_NONE; - -force_keyframe: - av_log(logctx, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time); - return AV_PICTURE_TYPE_I; -} - -/* May modify/reset next_picture */ -static void do_video_out(OutputFile *of, - OutputStream *ost, - AVFrame *next_picture) -{ - int ret; - AVCodecContext *enc = ost->enc_ctx; - AVRational frame_rate; - int64_t nb_frames, nb_frames_prev, i; - double duration = 0; - InputStream *ist = ost->ist; - AVFilterContext *filter = ost->filter->filter; - - init_output_stream_wrapper(ost, next_picture, 1); - - frame_rate = av_buffersink_get_frame_rate(filter); - if (frame_rate.num > 0 && frame_rate.den > 0) - duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base)); - - if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num) - duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base))); - - if (!ost->filters_script && - !ost->filters && - (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) && - next_picture && - ist && - lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) { - duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)); - } - - if (!next_picture) { - //end, flushing - nb_frames_prev = nb_frames = mid_pred(ost->last_nb0_frames[0], - ost->last_nb0_frames[1], - ost->last_nb0_frames[2]); - } else { - video_sync_process(of, ost, next_picture, duration, - &nb_frames, &nb_frames_prev); - } - - memmove(ost->last_nb0_frames + 1, - ost->last_nb0_frames, - sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1)); - ost->last_nb0_frames[0] = nb_frames_prev; - - if (nb_frames_prev == 0 && ost->last_dropped) { - nb_frames_drop++; - av_log(ost, AV_LOG_VERBOSE, - "*** dropping frame %"PRId64" at ts %"PRId64"\n", - ost->vsync_frame_number, ost->last_frame->pts); - } - if (nb_frames > (nb_frames_prev && ost->last_dropped) + (nb_frames > nb_frames_prev)) { - if (nb_frames > dts_error_threshold * 30) { - av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1); - nb_frames_drop++; - return; - } - nb_frames_dup += nb_frames - (nb_frames_prev && ost->last_dropped) - (nb_frames > nb_frames_prev); - av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1); - if (nb_frames_dup > dup_warning) { - av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning); - dup_warning *= 10; - } - } - ost->last_dropped = nb_frames == nb_frames_prev && next_picture; - ost->kf.dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame; - - /* duplicates frame if needed */ - for (i = 0; i < nb_frames; i++) { - AVFrame *in_picture; - - if (i < nb_frames_prev && ost->last_frame->buf[0]) { - in_picture = ost->last_frame; - } else - in_picture = next_picture; - - if (!in_picture) - return; - - in_picture->pts = ost->next_pts; - - if (!check_recording_time(ost, in_picture->pts, ost->enc_ctx->time_base)) - return; - - in_picture->quality = enc->global_quality; - in_picture->pict_type = forced_kf_apply(ost, &ost->kf, enc->time_base, in_picture, i); - - ret = submit_encode_frame(of, ost, in_picture); - if (ret == AVERROR_EOF) - break; - else if (ret < 0) - exit_program(1); - - ost->next_pts++; - ost->vsync_frame_number++; - } - - av_frame_unref(ost->last_frame); - if (next_picture) - av_frame_move_ref(ost->last_frame, next_picture); -} - -/** - * Get and encode new output from any of the filtergraphs, without causing - * activity. - * - * @return 0 for success, <0 for severe errors - */ -static int reap_filters(int flush) -{ - AVFrame *filtered_frame = NULL; - - /* Reap all buffers present in the buffer sinks */ - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - OutputFile *of = output_files[ost->file_index]; - AVFilterContext *filter; - AVCodecContext *enc = ost->enc_ctx; - int ret = 0; - - if (!ost->filter || !ost->filter->graph->graph) - continue; - filter = ost->filter->filter; - - /* - * Unlike video, with audio the audio frame size matters. - * Currently we are fully reliant on the lavfi filter chain to - * do the buffering deed for us, and thus the frame size parameter - * needs to be set accordingly. Where does one get the required - * frame size? From the initialized AVCodecContext of an audio - * encoder. Thus, if we have gotten to an audio stream, initialize - * the encoder earlier than receiving the first AVFrame. - */ - if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO) - init_output_stream_wrapper(ost, NULL, 1); - - filtered_frame = ost->filtered_frame; - - while (1) { - ret = av_buffersink_get_frame_flags(filter, filtered_frame, - AV_BUFFERSINK_FLAG_NO_REQUEST); - if (ret < 0) { - if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) { - av_log(NULL, AV_LOG_WARNING, - "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret)); - } else if (flush && ret == AVERROR_EOF) { - if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO) - do_video_out(of, ost, NULL); - } - break; - } - if (ost->finished) { - av_frame_unref(filtered_frame); - continue; - } - - if (filtered_frame->pts != AV_NOPTS_VALUE) { - AVRational tb = av_buffersink_get_time_base(filter); - ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb, - AV_TIME_BASE_Q); - filtered_frame->time_base = tb; - - if (debug_ts) - av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n", - av_ts2str(filtered_frame->pts), - av_ts2timestr(filtered_frame->pts, &tb), - tb.num, tb.den); - } - - switch (av_buffersink_get_type(filter)) { - case AVMEDIA_TYPE_VIDEO: - if (!ost->frame_aspect_ratio.num) - enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio; - - do_video_out(of, ost, filtered_frame); - break; - case AVMEDIA_TYPE_AUDIO: - if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && - enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) { - av_log(NULL, AV_LOG_ERROR, - "Audio filter graph output is not normalized and encoder does not support parameter changes\n"); - break; - } - do_audio_out(of, ost, filtered_frame); - break; - default: - // TODO support subtitle filters - av_assert0(0); - } - - av_frame_unref(filtered_frame); - } - } - - return 0; -} - -static void print_final_stats(int64_t total_size) -{ - uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0; - uint64_t subtitle_size = 0; - uint64_t data_size = 0; - float percent = -1.0; - int i, j; - int pass1_used = 1; - - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - AVCodecParameters *par = ost->st->codecpar; - const uint64_t s = ost->data_size_mux; - - switch (par->codec_type) { - case AVMEDIA_TYPE_VIDEO: video_size += s; break; - case AVMEDIA_TYPE_AUDIO: audio_size += s; break; - case AVMEDIA_TYPE_SUBTITLE: subtitle_size += s; break; - default: other_size += s; break; - } - extra_size += par->extradata_size; - data_size += s; - if (ost->enc_ctx && - (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) - != AV_CODEC_FLAG_PASS1) - pass1_used = 0; - } - - if (data_size && total_size>0 && total_size >= data_size) - percent = 100.0 * (total_size - data_size) / data_size; - - av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ", - video_size / 1024.0, - audio_size / 1024.0, - subtitle_size / 1024.0, - other_size / 1024.0, - extra_size / 1024.0); - if (percent >= 0.0) - av_log(NULL, AV_LOG_INFO, "%f%%", percent); - else - av_log(NULL, AV_LOG_INFO, "unknown"); - av_log(NULL, AV_LOG_INFO, "\n"); - - /* print verbose per-stream stats */ - for (i = 0; i < nb_input_files; i++) { - InputFile *f = input_files[i]; - uint64_t total_packets = 0, total_size = 0; - - av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n", - i, f->ctx->url); - - for (j = 0; j < f->nb_streams; j++) { - InputStream *ist = f->streams[j]; - enum AVMediaType type = ist->par->codec_type; - - total_size += ist->data_size; - total_packets += ist->nb_packets; - - av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ", - i, j, av_get_media_type_string(type)); - av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ", - ist->nb_packets, ist->data_size); - - if (ist->decoding_needed) { - av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded", - ist->frames_decoded); - if (type == AVMEDIA_TYPE_AUDIO) - av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded); - av_log(NULL, AV_LOG_VERBOSE, "; "); - } - - av_log(NULL, AV_LOG_VERBOSE, "\n"); - } - - av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n", - total_packets, total_size); - } - - for (i = 0; i < nb_output_files; i++) { - OutputFile *of = output_files[i]; - uint64_t total_packets = 0, total_size = 0; - - av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n", - i, of->url); - - for (j = 0; j < of->nb_streams; j++) { - OutputStream *ost = of->streams[j]; - enum AVMediaType type = ost->st->codecpar->codec_type; - - total_size += ost->data_size_mux; - total_packets += atomic_load(&ost->packets_written); - - av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ", - i, j, av_get_media_type_string(type)); - if (ost->enc_ctx) { - av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded", - ost->frames_encoded); - if (type == AVMEDIA_TYPE_AUDIO) - av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded); - av_log(NULL, AV_LOG_VERBOSE, "; "); - } - - av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ", - atomic_load(&ost->packets_written), ost->data_size_mux); - - av_log(NULL, AV_LOG_VERBOSE, "\n"); - } - - av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n", - total_packets, total_size); - } - if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){ - av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded "); - if (pass1_used) { - av_log(NULL, AV_LOG_WARNING, "\n"); - } else { - av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n"); - } - } -} - -static void forward_report(uint64_t frame_number, float fps, float quality, int64_t total_size, int64_t pts, double bitrate, double speed) -{ - // FORWARD DATA - if (report_callback != NULL) { - double milliseconds = 0; - if (pts != AV_NOPTS_VALUE) { - milliseconds = ((double)FFABS64U(pts)) / 1000; - } - if (pts < 0) { - report_callback(frame_number, fps, quality, total_size, 0 - milliseconds, bitrate, speed); - } else { - report_callback(frame_number, fps, quality, total_size, milliseconds, bitrate, speed); - } - } -} - -static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time) -{ - AVBPrint buf, buf_script; - int64_t total_size = of_filesize(output_files[0]); - int vid; - double bitrate; - double speed; - int64_t pts = AV_NOPTS_VALUE; - int mins, secs, us; - int64_t hours; - const char *hours_sign; - int ret; - float t; - - // FFmpegKit field declarations - int local_print_stats = 1; - uint64_t frame_number = 0; - float fps = 0; - float q = 0; - - if (!print_stats && !is_last_report && !progress_avio) - local_print_stats = 0; - - if (!is_last_report) { - if (last_time == -1) { - last_time = cur_time; - } - if (((cur_time - last_time) < stats_period && !first_report) || - (first_report && nb_output_dumped < nb_output_files)) - return; - last_time = cur_time; - } - - t = (cur_time-timer_start) / 1000000.0; - - vid = 0; - if (local_print_stats) { - av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC); - av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC); - } - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - const AVCodecContext * const enc = ost->enc_ctx; - q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1; - - if (local_print_stats && vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { - av_bprintf(&buf, "q=%2.1f ", q); - av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", - ost->file_index, ost->index, q); - } - if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { - frame_number = atomic_load(&ost->packets_written); - fps = t > 1 ? frame_number / t : 0; - if (local_print_stats) { - av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ", - frame_number, fps < 9.95, fps, q); - av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number); - av_bprintf(&buf_script, "fps=%.2f\n", fps); - av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", - ost->file_index, ost->index, q); - } - if (local_print_stats && is_last_report) - av_bprintf(&buf, "L"); - if (qp_hist) { - int j; - int qp = lrintf(q); - if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram)) - qp_histogram[qp]++; - if (local_print_stats) { - for (j = 0; j < 32; j++) - av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1)); - } - } - - if (local_print_stats && enc && (enc->flags & AV_CODEC_FLAG_PSNR) && - (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) { - int j; - double error, error_sum = 0; - double scale, scale_sum = 0; - double p; - char type[3] = { 'Y','U','V' }; - av_bprintf(&buf, "PSNR="); - for (j = 0; j < 3; j++) { - if (is_last_report) { - error = enc->error[j]; - scale = enc->width * enc->height * 255.0 * 255.0 * frame_number; - } else { - error = ost->error[j]; - scale = enc->width * enc->height * 255.0 * 255.0; - } - if (j) - scale /= 4; - error_sum += error; - scale_sum += scale; - p = psnr(error / scale); - av_bprintf(&buf, "%c:%2.2f ", type[j], p); - av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n", - ost->file_index, ost->index, type[j] | 32, p); - } - p = psnr(error_sum / scale_sum); - av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum)); - av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n", - ost->file_index, ost->index, p); - } - vid = 1; - } - /* compute min output value */ - if (ost->last_mux_dts != AV_NOPTS_VALUE) { - if (pts == AV_NOPTS_VALUE || ost->last_mux_dts > pts) - pts = ost->last_mux_dts; - if (copy_ts) { - if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1) - copy_ts_first_pts = pts; - if (copy_ts_first_pts != AV_NOPTS_VALUE) - pts -= copy_ts_first_pts; - } - } - - if (is_last_report) - nb_frames_drop += ost->last_dropped; - } - - us = FFABS64U(pts) % AV_TIME_BASE; - secs = FFABS64U(pts) / AV_TIME_BASE % 60; - mins = FFABS64U(pts) / AV_TIME_BASE / 60 % 60; - hours = FFABS64U(pts) / AV_TIME_BASE / 3600; - hours_sign = (pts < 0) ? "-" : ""; - - bitrate = pts != AV_NOPTS_VALUE && pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1; - speed = pts != AV_NOPTS_VALUE && t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1; - - // FFmpegKit forward report - forward_report(frame_number, fps, q, total_size, pts, bitrate, speed); - - if (local_print_stats) { - if (total_size < 0) av_bprintf(&buf, "size=N/A time="); - else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0); - if (pts == AV_NOPTS_VALUE) { - av_bprintf(&buf, "N/A "); - } else { - av_bprintf(&buf, "%s%02"PRId64":%02d:%02d.%02d ", - hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE); + if (local_print_stats) { + if (total_size < 0) + av_bprintf(&buf, "size=N/A time="); + else + av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0); + if (pts == AV_NOPTS_VALUE) { + av_bprintf(&buf, "N/A "); + } else { + av_bprintf(&buf, "%s%02" PRId64 ":%02d:%02d.%02d ", hours_sign, + hours, mins, secs, (100 * us) / AV_TIME_BASE); } if (bitrate < 0) { av_bprintf(&buf, "bitrate=N/A"); av_bprintf(&buf_script, "bitrate=N/A\n"); - }else{ + } else { av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate); av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate); } - if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n"); - else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size); + if (total_size < 0) + av_bprintf(&buf_script, "total_size=N/A\n"); + else + av_bprintf(&buf_script, "total_size=%" PRId64 "\n", total_size); if (pts == AV_NOPTS_VALUE) { av_bprintf(&buf_script, "out_time_us=N/A\n"); av_bprintf(&buf_script, "out_time_ms=N/A\n"); av_bprintf(&buf_script, "out_time=N/A\n"); } else { - av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts); - av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts); - av_bprintf(&buf_script, "out_time=%s%02"PRId64":%02d:%02d.%06d\n", + av_bprintf(&buf_script, "out_time_us=%" PRId64 "\n", pts); + av_bprintf(&buf_script, "out_time_ms=%" PRId64 "\n", pts); + av_bprintf(&buf_script, "out_time=%s%02" PRId64 ":%02d:%02d.%06d\n", hours_sign, hours, mins, secs, us); } if (nb_frames_dup || nb_frames_drop) - av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop); - av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup); - av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop); + av_bprintf(&buf, " dup=%" PRId64 " drop=%" PRId64, nb_frames_dup, + nb_frames_drop); + av_bprintf(&buf_script, "dup_frames=%" PRId64 "\n", nb_frames_dup); + av_bprintf(&buf_script, "drop_frames=%" PRId64 "\n", nb_frames_drop); if (speed < 0) { av_bprintf(&buf, " speed=N/A"); @@ -1924,7 +804,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti if (print_stats || is_last_report) { const char end = is_last_report ? '\n' : '\r'; - if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) { + if (print_stats == 1 && AV_LOG_INFO > av_log_get_level()) { av_log(NULL, AV_LOG_STDERR, "%s %c", buf.str, end); } else av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end); @@ -1941,651 +821,24 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti if (is_last_report) { if ((ret = avio_closep(&progress_avio)) < 0) av_log(NULL, AV_LOG_ERROR, - "Error closing progress log, loss of information possible: %s\n", av_err2str(ret)); + "Error closing progress log, loss of information " + "possible: %s\n", + av_err2str(ret)); } } first_report = 0; - - if (is_last_report) - print_final_stats(total_size); - } -} - -static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par) -{ - int ret; - - // We never got any input. Set a fake format, which will - // come from libavformat. - ifilter->format = par->format; - ifilter->sample_rate = par->sample_rate; - ifilter->width = par->width; - ifilter->height = par->height; - ifilter->sample_aspect_ratio = par->sample_aspect_ratio; - ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout); - if (ret < 0) - return ret; - - return 0; -} - -static void flush_encoders(void) -{ - int ret; - - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - OutputFile *of = output_files[ost->file_index]; - if (ost->sq_idx_encode >= 0) - sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL)); - } - - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - AVCodecContext *enc = ost->enc_ctx; - OutputFile *of = output_files[ost->file_index]; - - if (!enc) - continue; - - // Try to enable encoding with no input frames. - // Maybe we should just let encoding fail instead. - if (!ost->initialized) { - FilterGraph *fg = ost->filter->graph; - - av_log(ost, AV_LOG_WARNING, - "Finishing stream without any data written to it.\n"); - - if (ost->filter && !fg->graph) { - int x; - for (x = 0; x < fg->nb_inputs; x++) { - InputFilter *ifilter = fg->inputs[x]; - if (ifilter->format < 0 && - ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par) < 0) { - av_log(ost, AV_LOG_ERROR, "Error copying paramerets from input stream\n"); - exit_program(1); - } - } - - if (!ifilter_has_all_input_formats(fg)) - continue; - - ret = configure_filtergraph(fg); - if (ret < 0) { - av_log(ost, AV_LOG_ERROR, "Error configuring filter graph\n"); - exit_program(1); - } - - of_output_packet(of, ost->pkt, ost, 1); - } - - init_output_stream_wrapper(ost, NULL, 1); - } - - if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO) - continue; - - ret = submit_encode_frame(of, ost, NULL); - if (ret != AVERROR_EOF) - exit_program(1); - } -} - -/* - * Check whether a packet from ist should be written into ost at this time - */ -static int check_output_constraints(InputStream *ist, OutputStream *ost) -{ - OutputFile *of = output_files[ost->file_index]; - - if (ost->ist != ist) - return 0; - - if (ost->finished & MUXER_FINISHED) - return 0; - - if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time) - return 0; - - return 1; -} - -static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt) -{ - OutputFile *of = output_files[ost->file_index]; - InputFile *f = input_files [ist->file_index]; - int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time; - int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase); - AVPacket *opkt = ost->pkt; - - av_packet_unref(opkt); - // EOF: flush output bitstream filters. - if (!pkt) { - of_output_packet(of, opkt, ost, 1); - return; - } - - if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) && - !ost->copy_initial_nonkeyframes) - return; - - if (!ost->streamcopy_started && !ost->copy_prior_start) { - if (pkt->pts == AV_NOPTS_VALUE ? - ist->pts < ost->ts_copy_start : - pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base)) - return; - } - - if (of->recording_time != INT64_MAX && - ist->pts >= of->recording_time + start_time) { - close_output_stream(ost); - return; - } - - if (f->recording_time != INT64_MAX) { - start_time = 0; - if (copy_ts) { - start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0; - start_time += start_at_zero ? 0 : f->start_time_effective; - } - if (ist->pts >= f->recording_time + start_time) { - close_output_stream(ost); - return; - } - } - - if (av_packet_ref(opkt, pkt) < 0) - exit_program(1); - - opkt->time_base = ost->mux_timebase; - - if (pkt->pts != AV_NOPTS_VALUE) - opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, opkt->time_base) - ost_tb_start_time; - - if (pkt->dts == AV_NOPTS_VALUE) { - opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, opkt->time_base); - } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { - int duration = av_get_audio_frame_duration2(ist->par, pkt->size); - if(!duration) - duration = ist->par->frame_size; - opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts, - (AVRational){1, ist->par->sample_rate}, duration, - &ist->filter_in_rescale_delta_last, opkt->time_base); - /* dts will be set immediately afterwards to what pts is now */ - opkt->pts = opkt->dts - ost_tb_start_time; - } else - opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, opkt->time_base); - opkt->dts -= ost_tb_start_time; - - opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, opkt->time_base); - - { - int ret = trigger_fix_sub_duration_heartbeat(ost, pkt); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, - "Subtitle heartbeat logic failed in %s! (%s)\n", - __func__, av_err2str(ret)); - exit_program(1); - } - } - - of_output_packet(of, opkt, ost, 0); - - ost->streamcopy_started = 1; -} - -static void check_decode_result(InputStream *ist, int *got_output, int ret) -{ - if (*got_output || ret<0) - decode_error_stat[ret<0] ++; - - if (ret < 0 && exit_on_error) - exit_program(1); - - if (*got_output && ist) { - if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) { - av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING, - "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index); - if (exit_on_error) - exit_program(1); - } - } -} - -// Filters can be configured only if the formats of all inputs are known. -static int ifilter_has_all_input_formats(FilterGraph *fg) -{ - int i; - for (i = 0; i < fg->nb_inputs; i++) { - if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO || - fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO)) - return 0; - } - return 1; -} - -static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference) -{ - FilterGraph *fg = ifilter->graph; - AVFrameSideData *sd; - int need_reinit, ret; - int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH; - - if (keep_reference) - buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF; - - /* determine if the parameters for this input changed */ - need_reinit = ifilter->format != frame->format; - - switch (ifilter->ist->par->codec_type) { - case AVMEDIA_TYPE_AUDIO: - need_reinit |= ifilter->sample_rate != frame->sample_rate || - av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout); - break; - case AVMEDIA_TYPE_VIDEO: - need_reinit |= ifilter->width != frame->width || - ifilter->height != frame->height; - break; - } - - if (!ifilter->ist->reinit_filters && fg->graph) - need_reinit = 0; - - if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx || - (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data)) - need_reinit = 1; - - if ((sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX))) { - if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9)) - need_reinit = 1; - } else if (ifilter->displaymatrix) - need_reinit = 1; - - if (need_reinit) { - ret = ifilter_parameters_from_frame(ifilter, frame); - if (ret < 0) - return ret; - } - - /* (re)init the graph if possible, otherwise buffer the frame and return */ - if (need_reinit || !fg->graph) { - if (!ifilter_has_all_input_formats(fg)) { - AVFrame *tmp = av_frame_clone(frame); - if (!tmp) - return AVERROR(ENOMEM); - - ret = av_fifo_write(ifilter->frame_queue, &tmp, 1); - if (ret < 0) - av_frame_free(&tmp); - - return ret; - } - - ret = reap_filters(1); - if (ret < 0 && ret != AVERROR_EOF) { - av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); - return ret; - } - - ret = configure_filtergraph(fg); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n"); - return ret; - } - } - - ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags); - if (ret < 0) { - if (ret != AVERROR_EOF) - av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); - return ret; - } - - return 0; -} - -static int ifilter_send_eof(InputFilter *ifilter, int64_t pts) -{ - int ret = 0; - - ifilter->eof = 1; - - if (ifilter->filter) { - - /* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */ - if (!received_sigterm && !cancelRequested(globalSessionId)) { - ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH); - } - if (ret < 0) - return ret; - } else { - // the filtergraph was never configured - if (ifilter->format < 0) { - ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par); - if (ret < 0) - return ret; - } - if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) { - av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index); - return AVERROR_INVALIDDATA; - } - } - - return 0; -} - -// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2. -// There is the following difference: if you got a frame, you must call -// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0 -// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet) -static int decode(InputStream *ist, AVCodecContext *avctx, - AVFrame *frame, int *got_frame, AVPacket *pkt) -{ - int ret; - - *got_frame = 0; - - if (pkt) { - ret = avcodec_send_packet(avctx, pkt); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0 && ret != AVERROR_EOF) - return ret; - } - - ret = avcodec_receive_frame(avctx, frame); - if (ret < 0 && ret != AVERROR(EAGAIN)) - return ret; - if (ret >= 0) { - if (ist->want_frame_data) { - FrameData *fd; - - av_assert0(!frame->opaque_ref); - frame->opaque_ref = av_buffer_allocz(sizeof(*fd)); - if (!frame->opaque_ref) { - av_frame_unref(frame); - return AVERROR(ENOMEM); - } - fd = (FrameData*)frame->opaque_ref->data; - fd->pts = frame->pts; - fd->tb = avctx->pkt_timebase; - fd->idx = avctx->frame_num - 1; - } - - *got_frame = 1; - } - - return 0; -} - -static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame) -{ - int i, ret; - - av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */ - for (i = 0; i < ist->nb_filters; i++) { - ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1); - if (ret == AVERROR_EOF) - ret = 0; /* ignore */ - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, - "Failed to inject frame into filter network: %s\n", av_err2str(ret)); - break; - } } - return ret; } -static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, - int *decode_failed) -{ - AVFrame *decoded_frame = ist->decoded_frame; - AVCodecContext *avctx = ist->dec_ctx; - int ret, err = 0; - AVRational decoded_frame_tb; - - update_benchmark(NULL); - ret = decode(ist, avctx, decoded_frame, got_output, pkt); - update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index); - if (ret < 0) - *decode_failed = 1; - - if (ret != AVERROR_EOF) - check_decode_result(ist, got_output, ret); - - if (!*got_output || ret < 0) - return ret; - - ist->samples_decoded += decoded_frame->nb_samples; - ist->frames_decoded++; - - /* increment next_dts to use for the case where the input stream does not - have timestamps or there are multiple frames in the packet */ - ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / - decoded_frame->sample_rate; - ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / - decoded_frame->sample_rate; - - if (decoded_frame->pts != AV_NOPTS_VALUE) { - decoded_frame_tb = ist->st->time_base; - } else if (pkt && pkt->pts != AV_NOPTS_VALUE) { - decoded_frame->pts = pkt->pts; - decoded_frame_tb = ist->st->time_base; - }else { - decoded_frame->pts = ist->dts; - decoded_frame_tb = AV_TIME_BASE_Q; - } - if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE && - pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration) - ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE; - if (pkt) - ist->prev_pkt_pts = pkt->pts; - if (decoded_frame->pts != AV_NOPTS_VALUE) - decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts, - (AVRational){1, decoded_frame->sample_rate}, - decoded_frame->nb_samples, - &ist->filter_in_rescale_delta_last, - (AVRational){1, decoded_frame->sample_rate}); - ist->nb_samples = decoded_frame->nb_samples; - err = send_frame_to_filters(ist, decoded_frame); - - av_frame_unref(decoded_frame); - return err < 0 ? err : ret; -} - -static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, - int *decode_failed) -{ - AVFrame *decoded_frame = ist->decoded_frame; - int i, ret = 0, err = 0; - int64_t best_effort_timestamp; - int64_t dts = AV_NOPTS_VALUE; - - // With fate-indeo3-2, we're getting 0-sized packets before EOF for some - // reason. This seems like a semi-critical bug. Don't trigger EOF, and - // skip the packet. - if (!eof && pkt && pkt->size == 0) - return 0; - - if (ist->dts != AV_NOPTS_VALUE) - dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base); - if (pkt) { - pkt->dts = dts; // ffmpeg.c probably shouldn't do this - } - - // The old code used to set dts on the drain packet, which does not work - // with the new API anymore. - if (eof) { - void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0])); - if (!new) - return AVERROR(ENOMEM); - ist->dts_buffer = new; - ist->dts_buffer[ist->nb_dts_buffer++] = dts; - } - - update_benchmark(NULL); - ret = decode(ist, ist->dec_ctx, decoded_frame, got_output, pkt); - update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index); - if (ret < 0) - *decode_failed = 1; - - // The following line may be required in some cases where there is no parser - // or the parser does not has_b_frames correctly - if (ist->par->video_delay < ist->dec_ctx->has_b_frames) { - if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) { - ist->par->video_delay = ist->dec_ctx->has_b_frames; - } else - av_log(ist->dec_ctx, AV_LOG_WARNING, - "video_delay is larger in decoder than demuxer %d > %d.\n" - "If you want to help, upload a sample " - "of this file to https://streams.videolan.org/upload/ " - "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", - ist->dec_ctx->has_b_frames, - ist->par->video_delay); - } - - if (ret != AVERROR_EOF) - check_decode_result(ist, got_output, ret); - - if (*got_output && ret >= 0) { - if (ist->dec_ctx->width != decoded_frame->width || - ist->dec_ctx->height != decoded_frame->height || - ist->dec_ctx->pix_fmt != decoded_frame->format) { - av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n", - decoded_frame->width, - decoded_frame->height, - decoded_frame->format, - ist->dec_ctx->width, - ist->dec_ctx->height, - ist->dec_ctx->pix_fmt); - } - } - - if (!*got_output || ret < 0) - return ret; - - if(ist->top_field_first>=0) - decoded_frame->top_field_first = ist->top_field_first; - - ist->frames_decoded++; - - if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) { - err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame); - if (err < 0) - goto fail; - } - - best_effort_timestamp= decoded_frame->best_effort_timestamp; - *duration_pts = decoded_frame->duration; - - if (ist->framerate.num) - best_effort_timestamp = ist->cfr_next_pts++; - - if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) { - best_effort_timestamp = ist->dts_buffer[0]; - - for (i = 0; i < ist->nb_dts_buffer - 1; i++) - ist->dts_buffer[i] = ist->dts_buffer[i + 1]; - ist->nb_dts_buffer--; - } - - if(best_effort_timestamp != AV_NOPTS_VALUE) { - int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q); - - if (ts != AV_NOPTS_VALUE) - ist->next_pts = ist->pts = ts; - } - - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video " - "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n", - ist->st->index, av_ts2str(decoded_frame->pts), - av_ts2timestr(decoded_frame->pts, &ist->st->time_base), - best_effort_timestamp, - av_ts2timestr(best_effort_timestamp, &ist->st->time_base), - decoded_frame->key_frame, decoded_frame->pict_type, - ist->st->time_base.num, ist->st->time_base.den); - } - - if (ist->st->sample_aspect_ratio.num) - decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; - - err = send_frame_to_filters(ist, decoded_frame); - -fail: - av_frame_unref(decoded_frame); - return err < 0 ? err : ret; -} - -static int process_subtitle(InputStream *ist, AVSubtitle *subtitle, int *got_output) -{ - int ret = 0; - int free_sub = 1; - - if (ist->fix_sub_duration) { - int end = 1; - if (ist->prev_sub.got_output) { - end = av_rescale(subtitle->pts - ist->prev_sub.subtitle.pts, - 1000, AV_TIME_BASE); - if (end < ist->prev_sub.subtitle.end_display_time) { - av_log(NULL, AV_LOG_DEBUG, - "Subtitle duration reduced from %"PRId32" to %d%s\n", - ist->prev_sub.subtitle.end_display_time, end, - end <= 0 ? ", dropping it" : ""); - ist->prev_sub.subtitle.end_display_time = end; - } - } - FFSWAP(int, *got_output, ist->prev_sub.got_output); - FFSWAP(int, ret, ist->prev_sub.ret); - FFSWAP(AVSubtitle, *subtitle, ist->prev_sub.subtitle); - if (end <= 0) - goto out; - } - - if (!*got_output) - return ret; - - if (ist->sub2video.frame) { - sub2video_update(ist, INT64_MIN, subtitle); - } else if (ist->nb_filters) { - if (!ist->sub2video.sub_queue) - ist->sub2video.sub_queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW); - if (!ist->sub2video.sub_queue) - report_and_exit(AVERROR(ENOMEM)); - - ret = av_fifo_write(ist->sub2video.sub_queue, subtitle, 1); - if (ret < 0) - exit_program(1); - free_sub = 0; - } - - if (!subtitle->num_rects) - goto out; - - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - if (!check_output_constraints(ist, ost) || !ost->enc_ctx - || ost->enc_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) - continue; - - do_subtitle_out(output_files[ost->file_index], ost, subtitle); - } - -out: - if (free_sub) - avsubtitle_free(subtitle); - return ret; -} - -static int copy_av_subtitle(AVSubtitle *dst, AVSubtitle *src) -{ +int copy_av_subtitle(AVSubtitle *dst, const AVSubtitle *src) { int ret = AVERROR_BUG; - AVSubtitle tmp = { - .format = src->format, - .start_display_time = src->start_display_time, - .end_display_time = src->end_display_time, - .num_rects = 0, - .rects = NULL, - .pts = src->pts - }; + AVSubtitle tmp = {.format = src->format, + .start_display_time = src->start_display_time, + .end_display_time = src->end_display_time, + .num_rects = 0, + .rects = NULL, + .pts = src->pts}; if (!src->num_rects) goto success; @@ -2604,972 +857,178 @@ static int copy_av_subtitle(AVSubtitle *dst, AVSubtitle *src) tmp.num_rects++; - dst_rect->type = src_rect->type; - dst_rect->flags = src_rect->flags; + dst_rect->type = src_rect->type; + dst_rect->flags = src_rect->flags; - dst_rect->x = src_rect->x; - dst_rect->y = src_rect->y; - dst_rect->w = src_rect->w; - dst_rect->h = src_rect->h; + dst_rect->x = src_rect->x; + dst_rect->y = src_rect->y; + dst_rect->w = src_rect->w; + dst_rect->h = src_rect->h; dst_rect->nb_colors = src_rect->nb_colors; - if (src_rect->text) - if (!(dst_rect->text = av_strdup(src_rect->text))) { - ret = AVERROR(ENOMEM); - goto cleanup; - } - - if (src_rect->ass) - if (!(dst_rect->ass = av_strdup(src_rect->ass))) { - ret = AVERROR(ENOMEM); - goto cleanup; - } - - for (int j = 0; j < 4; j++) { - // SUBTITLE_BITMAP images are special in the sense that they - // are like PAL8 images. first pointer to data, second to - // palette. This makes the size calculation match this. - size_t buf_size = src_rect->type == SUBTITLE_BITMAP && j == 1 ? - AVPALETTE_SIZE : - src_rect->h * src_rect->linesize[j]; - - if (!src_rect->data[j]) - continue; - - if (!(dst_rect->data[j] = av_memdup(src_rect->data[j], buf_size))) { - ret = AVERROR(ENOMEM); - goto cleanup; - } - dst_rect->linesize[j] = src_rect->linesize[j]; - } - } - -success: - *dst = tmp; - - return 0; - -cleanup: - avsubtitle_free(&tmp); - - return ret; -} - -static int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts) -{ - int ret = AVERROR_BUG; - int got_output = 1; - AVSubtitle *prev_subtitle = &ist->prev_sub.subtitle; - AVSubtitle subtitle; - - if (!ist->fix_sub_duration || !prev_subtitle->num_rects || - signal_pts <= prev_subtitle->pts) - return 0; - - if ((ret = copy_av_subtitle(&subtitle, prev_subtitle)) < 0) - return ret; - - subtitle.pts = signal_pts; - - return process_subtitle(ist, &subtitle, &got_output); -} - -static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt) -{ - OutputFile *of = output_files[ost->file_index]; - int64_t signal_pts = av_rescale_q(pkt->pts, pkt->time_base, - AV_TIME_BASE_Q); - - if (!ost->fix_sub_duration_heartbeat || !(pkt->flags & AV_PKT_FLAG_KEY)) - // we are only interested in heartbeats on streams configured, and - // only on random access points. - return 0; - - for (int i = 0; i < of->nb_streams; i++) { - OutputStream *iter_ost = of->streams[i]; - InputStream *ist = iter_ost->ist; - int ret = AVERROR_BUG; - - if (iter_ost == ost || !ist || !ist->decoding_needed || - ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) - // We wish to skip the stream that causes the heartbeat, - // output streams without an input stream, streams not decoded - // (as fix_sub_duration is only done for decoded subtitles) as - // well as non-subtitle streams. - continue; - - if ((ret = fix_sub_duration_heartbeat(ist, signal_pts)) < 0) - return ret; - } - - return 0; -} - -static int transcode_subtitles(InputStream *ist, const AVPacket *pkt, - int *got_output, int *decode_failed) -{ - AVSubtitle subtitle; - int ret = avcodec_decode_subtitle2(ist->dec_ctx, - &subtitle, got_output, pkt); - - check_decode_result(NULL, got_output, ret); - - if (ret < 0 || !*got_output) { - *decode_failed = 1; - if (!pkt->size) - sub2video_flush(ist); - return ret; - } - - ist->frames_decoded++; - - return process_subtitle(ist, &subtitle, got_output); -} - -static int send_filter_eof(InputStream *ist) -{ - int i, ret; - /* TODO keep pts also in stream time base to avoid converting back */ - int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base, - AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); - - for (i = 0; i < ist->nb_filters; i++) { - ret = ifilter_send_eof(ist->filters[i], pts); - if (ret < 0) - return ret; - } - return 0; -} - -/* pkt = NULL means EOF (needed to flush decoder buffers) */ -static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof) -{ - const AVCodecParameters *par = ist->par; - int ret = 0; - int repeating = 0; - int eof_reached = 0; - - AVPacket *avpkt = ist->pkt; - - if (!ist->saw_first_ts) { - ist->first_dts = - ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; - ist->pts = 0; - if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) { - ist->first_dts = - ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); - ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong - } - ist->saw_first_ts = 1; - } - - if (ist->next_dts == AV_NOPTS_VALUE) - ist->next_dts = ist->dts; - if (ist->next_pts == AV_NOPTS_VALUE) - ist->next_pts = ist->pts; - - if (pkt) { - av_packet_unref(avpkt); - ret = av_packet_ref(avpkt, pkt); - if (ret < 0) - return ret; - } - - if (pkt && pkt->dts != AV_NOPTS_VALUE) { - ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); - if (par->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) - ist->next_pts = ist->pts = ist->dts; - } - - // while we have more to decode or while the decoder did output something on EOF - while (ist->decoding_needed) { - int64_t duration_dts = 0; - int64_t duration_pts = 0; - int got_output = 0; - int decode_failed = 0; - - ist->pts = ist->next_pts; - ist->dts = ist->next_dts; - - switch (par->codec_type) { - case AVMEDIA_TYPE_AUDIO: - ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output, - &decode_failed); - av_packet_unref(avpkt); - break; - case AVMEDIA_TYPE_VIDEO: - ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt, - &decode_failed); - if (!repeating || !pkt || got_output) { - if (pkt && pkt->duration) { - duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); - } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) { - int ticks = ist->last_pkt_repeat_pict >= 0 ? - ist->last_pkt_repeat_pict + 1 : - ist->dec_ctx->ticks_per_frame; - duration_dts = ((int64_t)AV_TIME_BASE * - ist->dec_ctx->framerate.den * ticks) / - ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; - } - - if(ist->dts != AV_NOPTS_VALUE && duration_dts) { - ist->next_dts += duration_dts; - }else - ist->next_dts = AV_NOPTS_VALUE; - } - - if (got_output) { - if (duration_pts > 0) { - ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q); - } else { - ist->next_pts += duration_dts; - } - } - av_packet_unref(avpkt); - break; - case AVMEDIA_TYPE_SUBTITLE: - if (repeating) - break; - ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed); - if (!pkt && ret >= 0) - ret = AVERROR_EOF; - av_packet_unref(avpkt); - break; - default: - return -1; - } - - if (ret == AVERROR_EOF) { - eof_reached = 1; - break; - } - - if (ret < 0) { - if (decode_failed) { - av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n", - ist->file_index, ist->st->index, av_err2str(ret)); - } else { - av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded " - "data for stream #%d:%d\n", ist->file_index, ist->st->index); - } - if (!decode_failed || exit_on_error) - exit_program(1); - break; - } - - if (got_output) - ist->got_output = 1; - - if (!got_output) - break; - - // During draining, we might get multiple output frames in this loop. - // ffmpeg.c does not drain the filter chain on configuration changes, - // which means if we send multiple frames at once to the filters, and - // one of those frames changes configuration, the buffered frames will - // be lost. This can upset certain FATE tests. - // Decode only 1 frame per call on EOF to appease these FATE tests. - // The ideal solution would be to rewrite decoding to use the new - // decoding API in a better way. - if (!pkt) - break; - - repeating = 1; - } - - /* after flushing, send an EOF on all the filter inputs attached to the stream */ - /* except when looping we need to flush but not to send an EOF */ - if (!pkt && ist->decoding_needed && eof_reached && !no_eof) { - int ret = send_filter_eof(ist); - if (ret < 0) { - av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n"); - exit_program(1); - } - } - - /* handle stream copy */ - if (!ist->decoding_needed && pkt) { - ist->dts = ist->next_dts; - switch (par->codec_type) { - case AVMEDIA_TYPE_AUDIO: - av_assert1(pkt->duration >= 0); - if (par->sample_rate) { - ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) / - par->sample_rate; - } else { - ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); - } - break; - case AVMEDIA_TYPE_VIDEO: - if (ist->framerate.num) { - // TODO: Remove work-around for c99-to-c89 issue 7 - AVRational time_base_q = AV_TIME_BASE_Q; - int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate)); - ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q); - } else if (pkt->duration) { - ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); - } else if(ist->dec_ctx->framerate.num != 0) { - int ticks = ist->last_pkt_repeat_pict >= 0 ? - ist->last_pkt_repeat_pict + 1 : - ist->dec_ctx->ticks_per_frame; - ist->next_dts += ((int64_t)AV_TIME_BASE * - ist->dec_ctx->framerate.den * ticks) / - ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; - } - break; - } - ist->pts = ist->dts; - ist->next_pts = ist->next_dts; - } else if (!ist->decoding_needed) - eof_reached = 1; - - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - if (!check_output_constraints(ist, ost) || ost->enc_ctx || - (!pkt && no_eof)) - continue; - - do_streamcopy(ist, ost, pkt); - } - - return !eof_reached; -} - -static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts) -{ - InputStream *ist = s->opaque; - const enum AVPixelFormat *p; - int ret; - - for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) { - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p); - const AVCodecHWConfig *config = NULL; - int i; - - if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) - break; - - if (ist->hwaccel_id == HWACCEL_GENERIC || - ist->hwaccel_id == HWACCEL_AUTO) { - for (i = 0;; i++) { - config = avcodec_get_hw_config(s->codec, i); - if (!config) - break; - if (!(config->methods & - AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)) - continue; - if (config->pix_fmt == *p) - break; - } - } - if (config && config->device_type == ist->hwaccel_device_type) { - ret = hwaccel_decode_init(s); - if (ret < 0) { - if (ist->hwaccel_id == HWACCEL_GENERIC) { - av_log(NULL, AV_LOG_FATAL, - "%s hwaccel requested for input stream #%d:%d, " - "but cannot be initialized.\n", - av_hwdevice_get_type_name(config->device_type), - ist->file_index, ist->st->index); - return AV_PIX_FMT_NONE; - } - continue; - } - - ist->hwaccel_pix_fmt = *p; - break; - } - } - - return *p; -} - -static int init_input_stream(InputStream *ist, char *error, int error_len) -{ - int ret; - - if (ist->decoding_needed) { - const AVCodec *codec = ist->dec; - if (!codec) { - snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d", - avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index); - return AVERROR(EINVAL); - } - - ist->dec_ctx->opaque = ist; - ist->dec_ctx->get_format = get_format; - - if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE && - (ist->decoding_needed & DECODING_FOR_OST)) { - av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE); - if (ist->decoding_needed & DECODING_FOR_FILTER) - av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n"); - } - - /* Useful for subtitles retiming by lavf (FIXME), skipping samples in - * audio, and video decoders such as cuvid or mediacodec */ - ist->dec_ctx->pkt_timebase = ist->st->time_base; - - if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0)) - av_dict_set(&ist->decoder_opts, "threads", "auto", 0); - /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */ - if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC) - av_dict_set(&ist->decoder_opts, "threads", "1", 0); - - ret = hw_device_setup_for_decode(ist); - if (ret < 0) { - snprintf(error, error_len, "Device setup failed for " - "decoder on input stream #%d:%d : %s", - ist->file_index, ist->st->index, av_err2str(ret)); - return ret; - } - - if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) { - if (ret == AVERROR_EXPERIMENTAL) - abort_codec_experimental(codec, 0); - - snprintf(error, error_len, - "Error while opening decoder for input stream " - "#%d:%d : %s", - ist->file_index, ist->st->index, av_err2str(ret)); - return ret; - } - assert_avoptions(ist->decoder_opts); - } - - ist->next_pts = AV_NOPTS_VALUE; - ist->next_dts = AV_NOPTS_VALUE; - - return 0; -} - -static int init_output_stream_streamcopy(OutputStream *ost) -{ - OutputFile *of = output_files[ost->file_index]; - InputStream *ist = ost->ist; - InputFile *ifile = input_files[ist->file_index]; - AVCodecParameters *par = ost->st->codecpar; - AVCodecContext *codec_ctx; - AVRational sar; - int i, ret; - uint32_t codec_tag = par->codec_tag; - - av_assert0(ist && !ost->filter); - - codec_ctx = avcodec_alloc_context3(NULL); - if (!codec_ctx) - return AVERROR(ENOMEM); - - ret = avcodec_parameters_to_context(codec_ctx, ist->par); - if (ret >= 0) - ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts); - if (ret < 0) { - av_log(ost, AV_LOG_FATAL, - "Error setting up codec context options.\n"); - avcodec_free_context(&codec_ctx); - return ret; - } - - ret = avcodec_parameters_from_context(par, codec_ctx); - avcodec_free_context(&codec_ctx); - if (ret < 0) { - av_log(ost, AV_LOG_FATAL, - "Error getting reference codec parameters.\n"); - return ret; - } - - if (!codec_tag) { - unsigned int codec_tag_tmp; - if (!of->format->codec_tag || - av_codec_get_id (of->format->codec_tag, par->codec_tag) == par->codec_id || - !av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp)) - codec_tag = par->codec_tag; - } - - par->codec_tag = codec_tag; - - if (!ost->frame_rate.num) - ost->frame_rate = ist->framerate; - - if (ost->frame_rate.num) - ost->st->avg_frame_rate = ost->frame_rate; - else - ost->st->avg_frame_rate = ist->st->avg_frame_rate; - - ret = avformat_transfer_internal_stream_timing_info(of->format, ost->st, ist->st, copy_tb); - if (ret < 0) - return ret; - - // copy timebase while removing common factors - if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) { - if (ost->frame_rate.num) - ost->st->time_base = av_inv_q(ost->frame_rate); - else - ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1}); - } - - // copy estimated duration as a hint to the muxer - if (ost->st->duration <= 0 && ist->st->duration > 0) - ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); - - if (!ost->copy_prior_start) { - ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ? - 0 : of->start_time; - if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) { - ost->ts_copy_start = FFMAX(ost->ts_copy_start, - ifile->start_time + ifile->ts_offset); - } - } - - if (ist->st->nb_side_data) { - for (i = 0; i < ist->st->nb_side_data; i++) { - const AVPacketSideData *sd_src = &ist->st->side_data[i]; - uint8_t *dst_data; - - dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size); - if (!dst_data) - return AVERROR(ENOMEM); - memcpy(dst_data, sd_src->data, sd_src->size); - } - } - -#if FFMPEG_ROTATION_METADATA - if (ost->rotate_overridden) { - uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX, - sizeof(int32_t) * 9); - if (sd) - av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value); - } -#endif - - switch (par->codec_type) { - case AVMEDIA_TYPE_AUDIO: - if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) && - par->codec_id == AV_CODEC_ID_MP3) - par->block_align = 0; - if (par->codec_id == AV_CODEC_ID_AC3) - par->block_align = 0; - break; - case AVMEDIA_TYPE_VIDEO: - if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option - sar = - av_mul_q(ost->frame_aspect_ratio, - (AVRational){ par->height, par->width }); - av_log(ost, AV_LOG_WARNING, "Overriding aspect ratio " - "with stream copy may produce invalid files\n"); - } - else if (ist->st->sample_aspect_ratio.num) - sar = ist->st->sample_aspect_ratio; - else - sar = par->sample_aspect_ratio; - ost->st->sample_aspect_ratio = par->sample_aspect_ratio = sar; - ost->st->avg_frame_rate = ist->st->avg_frame_rate; - ost->st->r_frame_rate = ist->st->r_frame_rate; - break; - } - - ost->mux_timebase = ist->st->time_base; - - return 0; -} - -static void set_encoder_id(OutputFile *of, OutputStream *ost) -{ - const char *cname = ost->enc_ctx->codec->name; - uint8_t *encoder_string; - int encoder_string_len; - - if (av_dict_get(ost->st->metadata, "encoder", NULL, 0)) - return; - - encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2; - encoder_string = av_mallocz(encoder_string_len); - if (!encoder_string) - report_and_exit(AVERROR(ENOMEM)); - - if (!of->bitexact && !ost->bitexact) - av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len); - else - av_strlcpy(encoder_string, "Lavc ", encoder_string_len); - av_strlcat(encoder_string, cname, encoder_string_len); - av_dict_set(&ost->st->metadata, "encoder", encoder_string, - AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE); -} - -static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base) -{ - InputStream *ist = ost->ist; - AVCodecContext *enc_ctx = ost->enc_ctx; - - if (ost->enc_timebase.num > 0) { - enc_ctx->time_base = ost->enc_timebase; - return; - } - - if (ost->enc_timebase.num < 0) { - if (ist) { - enc_ctx->time_base = ist->st->time_base; - return; - } - - av_log(ost, AV_LOG_WARNING, - "Input stream data not available, using default time base\n"); - } - - enc_ctx->time_base = default_time_base; -} - -static int init_output_stream_encode(OutputStream *ost, AVFrame *frame) -{ - InputStream *ist = ost->ist; - AVCodecContext *enc_ctx = ost->enc_ctx; - AVCodecContext *dec_ctx = NULL; - OutputFile *of = output_files[ost->file_index]; - int ret; - - set_encoder_id(output_files[ost->file_index], ost); - - if (ist) { - dec_ctx = ist->dec_ctx; - } - - if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { - if (!ost->frame_rate.num) - ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter); - if (!ost->frame_rate.num && !ost->max_frame_rate.num) { - ost->frame_rate = (AVRational){25, 1}; - av_log(ost, AV_LOG_WARNING, - "No information " - "about the input framerate is available. Falling " - "back to a default value of 25fps. Use the -r option " - "if you want a different framerate.\n"); - } - - if (ost->max_frame_rate.num && - (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) || - !ost->frame_rate.den)) - ost->frame_rate = ost->max_frame_rate; - - if (enc_ctx->codec->supported_framerates && !ost->force_fps) { - int idx = av_find_nearest_q_idx(ost->frame_rate, enc_ctx->codec->supported_framerates); - ost->frame_rate = enc_ctx->codec->supported_framerates[idx]; - } - // reduce frame rate for mpeg4 to be within the spec limits - if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) { - av_reduce(&ost->frame_rate.num, &ost->frame_rate.den, - ost->frame_rate.num, ost->frame_rate.den, 65535); - } - } - - switch (enc_ctx->codec_type) { - case AVMEDIA_TYPE_AUDIO: - enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter); - enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter); - ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout); - if (ret < 0) - return ret; - - if (ost->bits_per_raw_sample) - enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample; - else if (dec_ctx && ost->filter->graph->is_meta) - enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, - av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3); - - init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate)); - break; - - case AVMEDIA_TYPE_VIDEO: - init_encoder_time_base(ost, av_inv_q(ost->frame_rate)); - - if (!(enc_ctx->time_base.num && enc_ctx->time_base.den)) - enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter); - if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH - && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR || - (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){ - av_log(ost, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" - "Please consider specifying a lower framerate, a different muxer or " - "setting vsync/fps_mode to vfr\n"); - } - - enc_ctx->width = av_buffersink_get_w(ost->filter->filter); - enc_ctx->height = av_buffersink_get_h(ost->filter->filter); - enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio = - ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option - av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) : - av_buffersink_get_sample_aspect_ratio(ost->filter->filter); - - enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter); - - if (ost->bits_per_raw_sample) - enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample; - else if (dec_ctx && ost->filter->graph->is_meta) - enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, - av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); - - if (frame) { - enc_ctx->color_range = frame->color_range; - enc_ctx->color_primaries = frame->color_primaries; - enc_ctx->color_trc = frame->color_trc; - enc_ctx->colorspace = frame->colorspace; - enc_ctx->chroma_sample_location = frame->chroma_location; - } - - enc_ctx->framerate = ost->frame_rate; - - ost->st->avg_frame_rate = ost->frame_rate; + if (src_rect->text) + if (!(dst_rect->text = av_strdup(src_rect->text))) { + ret = AVERROR(ENOMEM); + goto cleanup; + } - // Field order: autodetection - if (frame) { - if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && - ost->top_field_first >= 0) - frame->top_field_first = !!ost->top_field_first; + if (src_rect->ass) + if (!(dst_rect->ass = av_strdup(src_rect->ass))) { + ret = AVERROR(ENOMEM); + goto cleanup; + } - if (frame->interlaced_frame) { - if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG) - enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB; - else - enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT; - } else - enc_ctx->field_order = AV_FIELD_PROGRESSIVE; - } + for (int j = 0; j < 4; j++) { + // SUBTITLE_BITMAP images are special in the sense that they + // are like PAL8 images. first pointer to data, second to + // palette. This makes the size calculation match this. + size_t buf_size = src_rect->type == SUBTITLE_BITMAP && j == 1 + ? AVPALETTE_SIZE + : src_rect->h * src_rect->linesize[j]; - // Field order: override - if (ost->top_field_first == 0) { - enc_ctx->field_order = AV_FIELD_BB; - } else if (ost->top_field_first == 1) { - enc_ctx->field_order = AV_FIELD_TT; - } + if (!src_rect->data[j]) + continue; - break; - case AVMEDIA_TYPE_SUBTITLE: - enc_ctx->time_base = AV_TIME_BASE_Q; - if (!enc_ctx->width) { - enc_ctx->width = ost->ist->par->width; - enc_ctx->height = ost->ist->par->height; - } - if (dec_ctx && dec_ctx->subtitle_header) { - /* ASS code assumes this buffer is null terminated so add extra byte. */ - ost->enc_ctx->subtitle_header = av_mallocz(dec_ctx->subtitle_header_size + 1); - if (!ost->enc_ctx->subtitle_header) - return AVERROR(ENOMEM); - memcpy(ost->enc_ctx->subtitle_header, dec_ctx->subtitle_header, - dec_ctx->subtitle_header_size); - ost->enc_ctx->subtitle_header_size = dec_ctx->subtitle_header_size; - } - if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && - enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) { - int input_props = 0, output_props = 0; - AVCodecDescriptor const *input_descriptor = - avcodec_descriptor_get(ist->dec->id); - AVCodecDescriptor const *output_descriptor = - avcodec_descriptor_get(ost->enc_ctx->codec_id); - if (input_descriptor) - input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); - if (output_descriptor) - output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB); - if (input_props && output_props && input_props != output_props) { - av_log(ost, AV_LOG_ERROR, - "Subtitle encoding currently only possible from text to text " - "or bitmap to bitmap"); - return AVERROR_INVALIDDATA; + if (!(dst_rect->data[j] = av_memdup(src_rect->data[j], buf_size))) { + ret = AVERROR(ENOMEM); + goto cleanup; } + dst_rect->linesize[j] = src_rect->linesize[j]; } - - break; - case AVMEDIA_TYPE_DATA: - break; - default: - abort(); - break; } - if (ost->bitexact) - enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT; - - if (ost->sq_idx_encode >= 0) - sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base); - - ost->mux_timebase = enc_ctx->time_base; +success: + *dst = tmp; return 0; -} - -static int init_output_stream(OutputStream *ost, AVFrame *frame, - char *error, int error_len) -{ - int ret = 0; - if (ost->enc_ctx) { - const AVCodec *codec = ost->enc_ctx->codec; - InputStream *ist = ost->ist; +cleanup: + avsubtitle_free(&tmp); - ret = init_output_stream_encode(ost, frame); - if (ret < 0) - return ret; + return ret; +} - if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0)) - av_dict_set(&ost->encoder_opts, "threads", "auto", 0); +static void subtitle_free(void *opaque, uint8_t *data) { + AVSubtitle *sub = (AVSubtitle *)data; + avsubtitle_free(sub); + av_free(sub); +} - if (codec->capabilities & AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE) { - ret = av_dict_set(&ost->encoder_opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY); - if (ret < 0) - return ret; - } +int subtitle_wrap_frame(AVFrame *frame, AVSubtitle *subtitle, int copy) { + AVBufferRef *buf; + AVSubtitle *sub; + int ret; - ret = hw_device_setup_for_encode(ost); + if (copy) { + sub = av_mallocz(sizeof(*sub)); + ret = sub ? copy_av_subtitle(sub, subtitle) : AVERROR(ENOMEM); if (ret < 0) { - snprintf(error, error_len, "Device setup failed for " - "encoder on output stream #%d:%d : %s", - ost->file_index, ost->index, av_err2str(ret)); + av_freep(&sub); return ret; } + } else { + sub = av_memdup(subtitle, sizeof(*subtitle)); + if (!sub) + return AVERROR(ENOMEM); + memset(subtitle, 0, sizeof(*subtitle)); + } - if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) { - if (ret == AVERROR_EXPERIMENTAL) - abort_codec_experimental(codec, 1); - snprintf(error, error_len, - "Error while opening encoder for output stream #%d:%d - " - "maybe incorrect parameters such as bit_rate, rate, width or height", - ost->file_index, ost->index); - return ret; - } - if (codec->type == AVMEDIA_TYPE_AUDIO && - !(codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) - av_buffersink_set_frame_size(ost->filter->filter, - ost->enc_ctx->frame_size); - assert_avoptions(ost->encoder_opts); - if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 && - ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */) - av_log(ost, AV_LOG_WARNING, "The bitrate parameter is set too low." - " It takes bits/s as argument, not kbits/s\n"); - - ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx); - if (ret < 0) { - av_log(ost, AV_LOG_FATAL, - "Error initializing the output stream codec context.\n"); - exit_program(1); - } + buf = + av_buffer_create((uint8_t *)sub, sizeof(*sub), subtitle_free, NULL, 0); + if (!buf) { + avsubtitle_free(sub); + av_freep(&sub); + return AVERROR(ENOMEM); + } - if (ost->enc_ctx->nb_coded_side_data) { - int i; + frame->buf[0] = buf; - for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) { - const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i]; - uint8_t *dst_data; + return 0; +} - dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size); - if (!dst_data) - return AVERROR(ENOMEM); - memcpy(dst_data, sd_src->data, sd_src->size); - } - } +int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt) { + OutputFile *of = output_files[ost->file_index]; + int64_t signal_pts = av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q); - /* - * Add global input side data. For now this is naive, and copies it - * from the input stream's global side data. All side data should - * really be funneled over AVFrame and libavfilter, then added back to - * packet side data, and then potentially using the first packet for - * global side data. - */ - if (ist) { - int i; - for (i = 0; i < ist->st->nb_side_data; i++) { - AVPacketSideData *sd = &ist->st->side_data[i]; - if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { - uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); - if (!dst) - return AVERROR(ENOMEM); - memcpy(dst, sd->data, sd->size); - if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) - av_display_rotation_set((int32_t *)dst, 0); - } - } - } + if (!ost->fix_sub_duration_heartbeat || !(pkt->flags & AV_PKT_FLAG_KEY)) + // we are only interested in heartbeats on streams configured, and + // only on random access points. + return 0; - // copy timebase while removing common factors - if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) - ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1}); + for (int i = 0; i < of->nb_streams; i++) { + OutputStream *iter_ost = of->streams[i]; + InputStream *ist = iter_ost->ist; + int ret = AVERROR_BUG; - // copy estimated duration as a hint to the muxer - if (ost->st->duration <= 0 && ist && ist->st->duration > 0) - ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base); - } else if (ost->ist) { - ret = init_output_stream_streamcopy(ost); - if (ret < 0) + if (iter_ost == ost || !ist || !ist->decoding_needed || + ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) + // We wish to skip the stream that causes the heartbeat, + // output streams without an input stream, streams not decoded + // (as fix_sub_duration is only done for decoded subtitles) as + // well as non-subtitle streams. + continue; + + if ((ret = fix_sub_duration_heartbeat(ist, signal_pts)) < 0) return ret; } - ret = of_stream_init(output_files[ost->file_index], ost); - if (ret < 0) - return ret; - - return ret; + return 0; } -static int transcode_init(void) -{ +/* pkt = NULL means EOF (needed to flush decoder buffers) */ +int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof) { + InputFile *f = input_files[ist->file_index]; + int64_t dts_est = AV_NOPTS_VALUE; int ret = 0; - char error[1024] = {0}; - - /* init framerate emulation */ - for (int i = 0; i < nb_input_files; i++) { - InputFile *ifile = input_files[i]; - if (ifile->readrate || ifile->rate_emu) - for (int j = 0; j < ifile->nb_streams; j++) - ifile->streams[j]->start = av_gettime_relative(); + int eof_reached = 0; + + if (ist->decoding_needed) { + ret = dec_packet(ist, pkt, no_eof); + if (ret < 0 && ret != AVERROR_EOF) + return ret; } + if (ret == AVERROR_EOF || (!pkt && !ist->decoding_needed)) + eof_reached = 1; - /* init input streams */ - for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) - if ((ret = init_input_stream(ist, error, sizeof(error))) < 0) - goto dump_format; - - /* - * initialize stream copy and subtitle/data streams. - * Encoded AVFrame based streams will get initialized as follows: - * - when the first AVFrame is received in do_video_out - * - just before the first AVFrame is received in either transcode_step - * or reap_filters due to us requiring the filter chain buffer sink - * to be configured with the correct audio frame size, which is only - * known after the encoder is initialized. - */ - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - if (ost->enc_ctx && - (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || - ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)) + if (pkt && pkt->opaque_ref) { + DemuxPktData *pd = (DemuxPktData *)pkt->opaque_ref->data; + dts_est = pd->dts_est; + } + + if (f->recording_time != INT64_MAX) { + int64_t start_time = 0; + if (copy_ts) { + start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0; + start_time += start_at_zero ? 0 : f->start_time_effective; + } + if (dts_est >= f->recording_time + start_time) + pkt = NULL; + } + + for (int oidx = 0; oidx < ist->nb_outputs; oidx++) { + OutputStream *ost = ist->outputs[oidx]; + if (ost->enc || (!pkt && no_eof)) continue; - ret = init_output_stream_wrapper(ost, NULL, 0); + ret = of_streamcopy(ost, pkt, dts_est); if (ret < 0) - goto dump_format; + return ret; } - /* discard unused programs */ - for (int i = 0; i < nb_input_files; i++) { - InputFile *ifile = input_files[i]; - for (int j = 0; j < ifile->ctx->nb_programs; j++) { - AVProgram *p = ifile->ctx->programs[j]; - int discard = AVDISCARD_ALL; - - for (int k = 0; k < p->nb_stream_indexes; k++) - if (!ifile->streams[p->stream_index[k]]->discard) { - discard = AVDISCARD_DEFAULT; - break; - } - p->discard = discard; - } - } + return !eof_reached; +} - dump_format: - /* dump the stream mapping */ +static void print_stream_maps(void) { av_log(NULL, AV_LOG_INFO, "Stream mapping:\n"); for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) { for (int j = 0; j < ist->nb_filters; j++) { if (!filtergraph_is_simple(ist->filters[j]->graph)) { av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s", - ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?", - ist->filters[j]->name); + ist->file_index, ist->index, + ist->dec ? ist->dec->name : "?", ist->filters[j]->name); if (nb_filtergraphs > 1) - av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index); + av_log(NULL, AV_LOG_INFO, " (graph %d)", + ist->filters[j]->graph->index); av_log(NULL, AV_LOG_INFO, "\n"); } } @@ -3587,29 +1046,28 @@ static int transcode_init(void) /* output from a complex graph */ av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name); if (nb_filtergraphs > 1) - av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index); + av_log(NULL, AV_LOG_INFO, " (graph %d)", + ost->filter->graph->index); - av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index, - ost->index, ost->enc_ctx->codec->name); + av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", + ost->file_index, ost->index, ost->enc_ctx->codec->name); continue; } av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d", - ost->ist->file_index, - ost->ist->st->index, - ost->file_index, + ost->ist->file_index, ost->ist->index, ost->file_index, ost->index); if (ost->enc_ctx) { - const AVCodec *in_codec = ost->ist->dec; - const AVCodec *out_codec = ost->enc_ctx->codec; - const char *decoder_name = "?"; - const char *in_codec_name = "?"; - const char *encoder_name = "?"; + const AVCodec *in_codec = ost->ist->dec; + const AVCodec *out_codec = ost->enc_ctx->codec; + const char *decoder_name = "?"; + const char *in_codec_name = "?"; + const char *encoder_name = "?"; const char *out_codec_name = "?"; const AVCodecDescriptor *desc; if (in_codec) { - decoder_name = in_codec->name; + decoder_name = in_codec->name; desc = avcodec_descriptor_get(in_codec->id); if (desc) in_codec_name = desc->name; @@ -3618,7 +1076,7 @@ static int transcode_init(void) } if (out_codec) { - encoder_name = out_codec->name; + encoder_name = out_codec->name; desc = avcodec_descriptor_get(out_codec->id); if (desc) out_codec_name = desc->name; @@ -3626,202 +1084,127 @@ static int transcode_init(void) encoder_name = "native"; } - av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))", - in_codec_name, decoder_name, - out_codec_name, encoder_name); + av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))", in_codec_name, + decoder_name, out_codec_name, encoder_name); } else av_log(NULL, AV_LOG_INFO, " (copy)"); av_log(NULL, AV_LOG_INFO, "\n"); } - - if (ret) { - av_log(NULL, AV_LOG_ERROR, "%s\n", error); - return ret; - } - - atomic_store(&transcode_init_done, 1); - - return 0; -} - -/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */ -static int need_output(void) -{ - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - if (ost->finished) - continue; - - return 1; - } - - return 0; } /** * Select the output stream to process. * - * @return selected output stream, or NULL if none available + * @retval 0 an output stream was selected + * @retval AVERROR(EAGAIN) need to wait until more input is available + * @retval AVERROR_EOF no more streams need output */ -static OutputStream *choose_output(void) -{ +static int choose_output(OutputStream **post) { int64_t opts_min = INT64_MAX; OutputStream *ost_min = NULL; for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { int64_t opts; - if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) { - opts = ost->last_filter_pts; + if (ost->filter && ost->filter->last_pts != AV_NOPTS_VALUE) { + opts = ost->filter->last_pts; } else { - opts = ost->last_mux_dts == AV_NOPTS_VALUE ? - INT64_MIN : ost->last_mux_dts; - if (ost->last_mux_dts == AV_NOPTS_VALUE) - av_log(ost, AV_LOG_DEBUG, - "cur_dts is invalid [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n", - ost->initialized, ost->inputs_done, ost->finished); + opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN + : ost->last_mux_dts; } - if (!ost->initialized && !ost->inputs_done) - return ost->unavailable ? NULL : ost; - + if (!ost->initialized && !ost->finished) { + ost_min = ost; + break; + } if (!ost->finished && opts < opts_min) { opts_min = opts; - ost_min = ost->unavailable ? NULL : ost; + ost_min = ost; } } - return ost_min; + if (!ost_min) + return AVERROR_EOF; + *post = ost_min; + return ost_min->unavailable ? AVERROR(EAGAIN) : 0; } -static void set_tty_echo(int on) -{ +static void set_tty_echo(int on) { #if HAVE_TERMIOS_H struct termios tty; if (tcgetattr(0, &tty) == 0) { - if (on) tty.c_lflag |= ECHO; - else tty.c_lflag &= ~ECHO; + if (on) + tty.c_lflag |= ECHO; + else + tty.c_lflag &= ~ECHO; tcsetattr(0, TCSANOW, &tty); } #endif } -static int check_keyboard_interaction(int64_t cur_time) -{ - int i, ret, key; +static int check_keyboard_interaction(int64_t cur_time) { + int i, key; if (received_nb_signals) return AVERROR_EXIT; /* read_key() returns 0 on EOF */ - if(cur_time - keyboard_last_time >= 100000){ - key = read_key(); + if (cur_time - keyboard_last_time >= 100000) { + key = read_key(); keyboard_last_time = cur_time; - }else + } else key = -1; if (key == 'q') { av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n"); return AVERROR_EXIT; } - if (key == '+') av_log_set_level(av_log_get_level()+10); - if (key == '-') av_log_set_level(av_log_get_level()-10); - if (key == 's') qp_hist ^= 1; - if (key == 'c' || key == 'C'){ + if (key == '+') + av_log_set_level(av_log_get_level() + 10); + if (key == '-') + av_log_set_level(av_log_get_level() - 10); + if (key == 'c' || key == 'C') { char buf[4096], target[64], command[256], arg[256] = {0}; double time; int k, n = 0; - av_log(NULL, AV_LOG_STDERR, "\nEnter command: |all