update version to FFmpeg4.2

update version to FFmpeg4.2
pull/166/head
xufulong 5 years ago
parent 8a0ad6be65
commit 3dc207caa1
  1. 202
      app/src/main/cpp/ffmpeg/cmdutils.c
  2. 82
      app/src/main/cpp/ffmpeg/cmdutils.h
  3. 1392
      app/src/main/cpp/ffmpeg/config.h
  4. 2537
      app/src/main/cpp/ffmpeg/ffmpeg.c
  5. 147
      app/src/main/cpp/ffmpeg/ffmpeg.h
  6. 379
      app/src/main/cpp/ffmpeg/ffmpeg_filter.c
  7. 829
      app/src/main/cpp/ffmpeg/ffmpeg_opt.c
  8. 518
      app/src/main/cpp/ffmpeg/ffprobe.c

@ -33,8 +33,10 @@
#include "compat/va_copy.h"
#include "libavformat/avformat.h"
#include "libavfilter/avfilter.h"
#include "libswresample/swresample.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libpostproc/postprocess.h"
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
@ -48,6 +50,7 @@
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/cpu.h"
#include "libavutil/ffversion.h"
#include "libavutil/version.h"
#include "cmdutils.h"
#if CONFIG_NETWORK
@ -57,6 +60,9 @@
#include <sys/time.h>
#include <sys/resource.h>
#endif
#ifdef _WIN32
#include <windows.h>
#endif
#include <setjmp.h>
@ -72,6 +78,12 @@ static FILE *report_file;
static int report_file_level = AV_LOG_DEBUG;
int hide_banner = 0;
enum show_muxdemuxers {
SHOW_DEFAULT,
SHOW_DEMUXERS,
SHOW_MUXERS,
};
void init_opts(void)
{
av_dict_set(&sws_dict, "flags", "bicubic", 0);
@ -107,6 +119,15 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
}
}
void init_dynload(void)
{
#ifdef _WIN32
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
* current working directory from the DLL search path as a security pre-caution. */
SetDllDirectory("");
#endif
}
static void (*program_exit)(int ret);
void register_exit(void (*cb)(int ret))
@ -114,13 +135,12 @@ void register_exit(void (*cb)(int ret))
program_exit = cb;
}
int exit_program(int ret)
void exit_program(int ret)
{
if (program_exit)
program_exit(ret);
longjmp(jump_buf, 1);
return ret;
}
double parse_number_or_die(const char *context, const char *numstr, int type,
@ -214,7 +234,6 @@ static const OptionDef *find_option(const OptionDef *po, const char *name)
* by default. HAVE_COMMANDLINETOARGVW is true on cygwin, while
* it doesn't provide the actual command line via GetCommandLineW(). */
#if HAVE_COMMANDLINETOARGVW && defined(_WIN32)
#include <windows.h>
#include <shellapi.h>
/* Will be leaked on exit */
static char** win32_argv_utf8 = NULL;
@ -864,28 +883,54 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
{ "debug" , AV_LOG_DEBUG },
{ "trace" , AV_LOG_TRACE },
};
const char *token;
char *tail;
int level;
int flags;
int i;
flags = av_log_get_flags();
tail = strstr(arg, "repeat");
if (tail)
flags &= ~AV_LOG_SKIP_REPEATED;
else
int flags = av_log_get_flags();
int level = av_log_get_level();
int cmd, i = 0;
av_assert0(arg);
while (*arg) {
token = arg;
if (*token == '+' || *token == '-') {
cmd = *token++;
} else {
cmd = 0;
}
if (!i && !cmd) {
flags = 0; /* missing relative prefix, build absolute value */
}
if (!strncmp(token, "repeat", 6)) {
if (cmd == '-') {
flags |= AV_LOG_SKIP_REPEATED;
av_log_set_flags(flags);
if (tail == arg)
arg += 6 + (arg[6]=='+');
if(tail && !*arg)
return 0;
} else {
flags &= ~AV_LOG_SKIP_REPEATED;
}
arg = token + 6;
} else if (!strncmp(token, "level", 5)) {
if (cmd == '-') {
flags &= ~AV_LOG_PRINT_LEVEL;
} else {
flags |= AV_LOG_PRINT_LEVEL;
}
arg = token + 5;
} else {
break;
}
i++;
}
if (!*arg) {
goto end;
} else if (*arg == '+') {
arg++;
} else if (!i) {
flags = av_log_get_flags(); /* level value without prefix, reset flags */
}
for (i = 0; i < FF_ARRAY_ELEMS(log_levels); i++) {
if (!strcmp(log_levels[i].name, arg)) {
av_log_set_level(log_levels[i].level);
return 0;
level = log_levels[i].level;
goto end;
}
}
@ -897,6 +942,9 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
av_log(NULL, AV_LOG_FATAL, "\"%s\"\n", log_levels[i].name);
exit_program(1);
}
end:
av_log_set_flags(flags);
av_log_set_level(level);
return 0;
}
@ -972,7 +1020,7 @@ static int init_report(const char *env)
av_free(key);
}
av_bprint_init(&filename, 0, 1);
av_bprint_init(&filename, 0, AV_BPRINT_SIZE_AUTOMATIC);
expand_filename_template(&filename,
av_x_if_null(filename_template, "%p-%t.log"), tm);
av_free(filename_template);
@ -1086,15 +1134,21 @@ static void print_all_libs_info(int flags, int level)
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
PRINT_LIB_INFO(swresample, SWRESAMPLE, flags, level);
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
}
static void print_program_info(int flags, int level)
{
const char *indent = flags & INDENT? " " : "";
av_log(NULL, level, "%s version " FFMPEG_VERSION, program_name);
if (flags & SHOW_COPYRIGHT)
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
program_birth_year, CONFIG_THIS_YEAR);
av_log(NULL, level, "\n");
av_log(NULL, level, FFMPEG_CONFIGURATION, indent);
av_log(NULL, level, "%sbuilt with %s\n", indent, CC_IDENT);
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
}
static void print_buildconf(int flags, int level)
@ -1232,10 +1286,12 @@ static int is_device(const AVClass *avclass)
return AV_IS_INPUT_DEVICE(avclass->category) || AV_IS_OUTPUT_DEVICE(avclass->category);
}
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only, int muxdemuxers)
{
AVInputFormat *ifmt = NULL;
AVOutputFormat *ofmt = NULL;
void *ifmt_opaque = NULL;
const AVInputFormat *ifmt = NULL;
void *ofmt_opaque = NULL;
const AVOutputFormat *ofmt = NULL;
const char *last_name;
int is_dev;
@ -1250,7 +1306,9 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
const char *name = NULL;
const char *long_name = NULL;
while ((ofmt = av_oformat_next(ofmt))) {
if (muxdemuxers !=SHOW_DEMUXERS) {
ofmt_opaque = NULL;
while ((ofmt = av_muxer_iterate(&ofmt_opaque))) {
is_dev = is_device(ofmt->priv_class);
if (!is_dev && device_only)
continue;
@ -1261,7 +1319,10 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
encode = 1;
}
}
while ((ifmt = av_iformat_next(ifmt))) {
}
if (muxdemuxers != SHOW_MUXERS) {
ifmt_opaque = NULL;
while ((ifmt = av_demuxer_iterate(&ifmt_opaque))) {
is_dev = is_device(ifmt->priv_class);
if (!is_dev && device_only)
continue;
@ -1274,6 +1335,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
if (name && strcmp(ifmt->name, name) == 0)
decode = 1;
}
}
if (!name)
break;
last_name = name;
@ -1289,12 +1351,22 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
int show_formats(void *optctx, const char *opt, const char *arg)
{
return show_formats_devices(optctx, opt, arg, 0);
return show_formats_devices(optctx, opt, arg, 0, SHOW_DEFAULT);
}
int show_muxers(void *optctx, const char *opt, const char *arg)
{
return show_formats_devices(optctx, opt, arg, 0, SHOW_MUXERS);
}
int show_demuxers(void *optctx, const char *opt, const char *arg)
{
return show_formats_devices(optctx, opt, arg, 0, SHOW_DEMUXERS);
}
int show_devices(void *optctx, const char *opt, const char *arg)
{
return show_formats_devices(optctx, opt, arg, 1);
return show_formats_devices(optctx, opt, arg, 1, SHOW_DEFAULT);
}
#define PRINT_CODEC_SUPPORTED(codec, field, type, list_name, term, get_name) \
@ -1342,6 +1414,16 @@ static void print_codec(const AVCodec *c)
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_AUTO_THREADS))
printf("threads ");
if (c->capabilities & AV_CODEC_CAP_AVOID_PROBING)
printf("avoidprobe ");
if (c->capabilities & AV_CODEC_CAP_INTRA_ONLY)
printf("intraonly ");
if (c->capabilities & AV_CODEC_CAP_LOSSLESS)
printf("lossless ");
if (c->capabilities & AV_CODEC_CAP_HARDWARE)
printf("hardware ");
if (c->capabilities & AV_CODEC_CAP_HYBRID)
printf("hybrid ");
if (!c->capabilities)
printf("none");
printf("\n");
@ -1362,6 +1444,17 @@ static void print_codec(const AVCodec *c)
printf("\n");
}
if (avcodec_get_hw_config(c, 0)) {
printf(" Supported hardware devices: ");
for (int i = 0;; i++) {
const AVCodecHWConfig *config = avcodec_get_hw_config(c, i);
if (!config)
break;
printf("%s ", av_hwdevice_get_type_name(config->device_type));
}
printf("\n");
}
if (c->supported_framerates) {
const AVRational *fps = c->supported_framerates;
@ -1560,10 +1653,11 @@ int show_encoders(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
{
AVBitStreamFilter *bsf = NULL;
const AVBitStreamFilter *bsf = NULL;
void *opaque = NULL;
printf("Bitstream filters:\n");
while ((bsf = av_bitstream_filter_next(bsf)))
while ((bsf = av_bsf_iterate(&opaque)))
printf("%s\n", bsf->name);
printf("\n");
return 0;
@ -1589,6 +1683,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
#if CONFIG_AVFILTER
const AVFilter *filter = NULL;
char descr[64], *descr_cur;
void *opaque = NULL;
int i, j;
const AVFilterPad *pad;
@ -1600,7 +1695,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
" V = Video input/output\n"
" N = Dynamic number and/or type of input/output\n"
" | = Source or sink filter\n");
while ((filter = avfilter_next(filter))) {
while ((filter = av_filter_iterate(&opaque))) {
descr_cur = descr;
for (i = 0; i < 2; i++) {
if (i) {
@ -1663,7 +1758,7 @@ int show_pix_fmts(void *optctx, const char *opt, const char *arg)
#endif
while ((pix_desc = av_pix_fmt_desc_next(pix_desc))) {
enum AVPixelFormat pix_fmt = av_pix_fmt_desc_get_id(pix_desc);
enum AVPixelFormat av_unused pix_fmt = av_pix_fmt_desc_get_id(pix_desc);
printf("%c%c%c%c%c %-16s %d %2d\n",
sws_isSupportedInput (pix_fmt) ? 'I' : '.',
sws_isSupportedOutput(pix_fmt) ? 'O' : '.',
@ -1857,6 +1952,25 @@ static void show_help_filter(const char *name)
}
#endif
static void show_help_bsf(const char *name)
{
const AVBitStreamFilter *bsf = av_bsf_get_by_name(name);
if (!name) {
av_log(NULL, AV_LOG_ERROR, "No bitstream filter name specified.\n");
return;
} else if (!bsf) {
av_log(NULL, AV_LOG_ERROR, "Unknown bit stream filter '%s'.\n", name);
return;
}
printf("Bit stream filter %s\n", bsf->name);
PRINT_CODEC_SUPPORTED(bsf, codec_ids, enum AVCodecID, "codecs",
AV_CODEC_ID_NONE, GET_CODEC_NAME);
if (bsf->priv_class)
show_help_children(bsf->priv_class, AV_OPT_FLAG_BSF_PARAM);
}
int show_help(void *optctx, const char *opt, const char *arg)
{
char *topic, *par;
@ -1883,6 +1997,8 @@ int show_help(void *optctx, const char *opt, const char *arg)
} else if (!strcmp(topic, "filter")) {
show_help_filter(par);
#endif
} else if (!strcmp(topic, "bsf")) {
show_help_bsf(par);
} else {
show_help_default(topic, par);
}
@ -1974,7 +2090,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
codec = s->oformat ? avcodec_find_encoder(codec_id)
: avcodec_find_decoder(codec_id);
switch (st->codec->codec_type) {
switch (st->codecpar->codec_type) {
case AVMEDIA_TYPE_VIDEO:
prefix = 'v';
flags |= AV_OPT_FLAG_VIDEO_PARAM;
@ -2032,7 +2148,7 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id,
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
s, s->streams[i], NULL);
return opts;
}
@ -2058,18 +2174,10 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
double get_rotation(AVStream *st)
{
AVDictionaryEntry *rotate_tag = av_dict_get(st->metadata, "rotate", NULL, 0);
uint8_t* displaymatrix = av_stream_get_side_data(st,
AV_PKT_DATA_DISPLAYMATRIX, NULL);
double theta = 0;
if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
char *tail;
theta = av_strtod(rotate_tag->value, &tail);
if (*tail)
theta = 0;
}
if (displaymatrix && !theta)
if (displaymatrix)
theta = -av_display_rotation_get((int32_t*) displaymatrix);
theta -= 360*floor(theta/360 + 0.9/360);
@ -2092,7 +2200,7 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
return AVERROR(EINVAL);
printf("Audo-detected sources for %s:\n", fmt->name);
printf("Auto-detected sources for %s:\n", fmt->name);
if (!fmt->get_device_list) {
ret = AVERROR(ENOSYS);
printf("Cannot list sources. Not implemented.\n");
@ -2122,7 +2230,7 @@ static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
return AVERROR(EINVAL);
printf("Audo-detected sinks for %s:\n", fmt->name);
printf("Auto-detected sinks for %s:\n", fmt->name);
if (!fmt->get_device_list) {
ret = AVERROR(ENOSYS);
printf("Cannot list sinks. Not implemented.\n");

@ -19,8 +19,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef CMDUTILS_H
#define CMDUTILS_H
#ifndef FFTOOLS_CMDUTILS_H
#define FFTOOLS_CMDUTILS_H
#include <stdint.h>
@ -59,7 +59,12 @@ void register_exit(void (*cb)(int ret));
/**
* Wraps exit with a program-specific cleanup routine.
*/
int exit_program(int ret);
void exit_program(int ret) av_noreturn;
/**
* Initialize dynamic library loading
*/
void init_dynload(void);
/**
* Initialize the cmdutils option system, in particular
@ -100,12 +105,6 @@ int opt_max_alloc(void *optctx, const char *opt, const char *arg);
int opt_codec_debug(void *optctx, const char *opt, const char *arg);
#if CONFIG_OPENCL
int opt_opencl(void *optctx, const char *opt, const char *arg);
int opt_opencl_bench(void *optctx, const char *opt, const char *arg);
#endif
/**
* Limit the execution time.
*/
@ -150,6 +149,7 @@ typedef struct SpecifierOpt {
uint8_t *str;
int i;
int64_t i64;
uint64_t ui64;
float f;
double dbl;
} u;
@ -201,6 +201,47 @@ typedef struct OptionDef {
void show_help_options(const OptionDef *options, const char *msg, int req_flags,
int rej_flags, int alt_flags);
#if CONFIG_AVDEVICE
#define CMDUTILS_COMMON_OPTIONS_AVDEVICE \
{ "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources }, \
"list sources of the input device", "device" }, \
{ "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks }, \
"list sinks of the output device", "device" }, \
#else
#define CMDUTILS_COMMON_OPTIONS_AVDEVICE
#endif
#define CMDUTILS_COMMON_OPTIONS \
{ "L", OPT_EXIT, { .func_arg = show_license }, "show license" }, \
{ "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
{ "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
{ "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
{ "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
{ "version", OPT_EXIT, { .func_arg = show_version }, "show version" }, \
{ "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" }, \
{ "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" }, \
{ "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" }, \
{ "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" }, \
{ "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" }, \
{ "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" }, \
{ "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" }, \
{ "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" }, \
{ "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" }, \
{ "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" }, \
{ "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" }, \
{ "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" }, \
{ "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" }, \
{ "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" }, \
{ "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" }, \
{ "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
{ "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
{ "report", 0, { (void*)opt_report }, "generate a report" }, \
{ "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" }, \
{ "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" }, \
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" }, \
CMDUTILS_COMMON_OPTIONS_AVDEVICE \
/**
* Show help for all options with given flags in class and all its
* children.
@ -436,6 +477,20 @@ int show_license(void *optctx, const char *opt, const char *arg);
*/
int show_formats(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing all the muxers supported by the
* program (including devices).
* This option processing function does not utilize the arguments.
*/
int show_muxers(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing all the demuxer supported by the
* program (including devices).
* This option processing function does not utilize the arguments.
*/
int show_demuxers(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing all the devices supported by the
* program.
@ -445,13 +500,13 @@ int show_devices(void *optctx, const char *opt, const char *arg);
#if CONFIG_AVDEVICE
/**
* Print a listing containing audodetected sinks of the output device.
* Print a listing containing autodetected sinks of the output device.
* Device name with options may be passed as an argument to limit results.
*/
int show_sinks(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing audodetected sources of the input device.
* Print a listing containing autodetected sources of the input device.
* Device name with options may be passed as an argument to limit results.
*/
int show_sources(void *optctx, const char *opt, const char *arg);
@ -570,6 +625,9 @@ void *grow_array(void *array, int elem_size, int *size, int new_size);
#define GET_PIX_FMT_NAME(pix_fmt)\
const char *name = av_get_pix_fmt_name(pix_fmt);
#define GET_CODEC_NAME(id)\
const char *name = avcodec_descriptor_get(id)->name;
#define GET_SAMPLE_FMT_NAME(sample_fmt)\
const char *name = av_get_sample_fmt_name(sample_fmt)
@ -587,4 +645,4 @@ void *grow_array(void *array, int elem_size, int *size, int new_size);
double get_rotation(AVStream *st);
#endif /* CMDUTILS_H */
#endif /* FFTOOLS_CMDUTILS_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -16,8 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef FFMPEG_H
#define FFMPEG_H
#ifndef FFTOOLS_FFMPEG_H
#define FFTOOLS_FFMPEG_H
#include "config.h"
@ -25,10 +25,6 @@
#include <stdio.h>
#include <signal.h>
#if HAVE_PTHREADS
#include <pthread.h>
#endif
#include "cmdutils.h"
#include "libavformat/avformat.h"
@ -42,8 +38,10 @@
#include "libavutil/dict.h"
#include "libavutil/eval.h"
#include "libavutil/fifo.h"
#include "libavutil/hwcontext.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
#include "libavutil/thread.h"
#include "libavutil/threadmessage.h"
#include "libswresample/swresample.h"
@ -60,11 +58,10 @@
enum HWAccelID {
HWACCEL_NONE = 0,
HWACCEL_AUTO,
HWACCEL_VDPAU,
HWACCEL_DXVA2,
HWACCEL_VDA,
HWACCEL_GENERIC,
HWACCEL_VIDEOTOOLBOX,
HWACCEL_QSV,
HWACCEL_CUVID,
};
typedef struct HWAccel {
@ -74,6 +71,12 @@ typedef struct HWAccel {
enum AVPixelFormat pix_fmt;
} HWAccel;
typedef struct HWDevice {
const char *name;
enum AVHWDeviceType type;
AVBufferRef *device_ref;
} HWDevice;
/* select an input stream for an output stream */
typedef struct StreamMap {
int disabled; /* 1 is this mapping is disabled by a negative map */
@ -126,6 +129,8 @@ typedef struct OptionsContext {
int nb_hwaccels;
SpecifierOpt *hwaccel_devices;
int nb_hwaccel_devices;
SpecifierOpt *hwaccel_output_formats;
int nb_hwaccel_output_formats;
SpecifierOpt *autorotate;
int nb_autorotate;
@ -148,6 +153,7 @@ typedef struct OptionsContext {
float mux_preload;
float mux_max_delay;
int shortest;
int bitexact;
int video_disable;
int audio_disable;
@ -208,6 +214,8 @@ typedef struct OptionsContext {
int nb_pass;
SpecifierOpt *passlogfiles;
int nb_passlogfiles;
SpecifierOpt *max_muxing_queue_size;
int nb_max_muxing_queue_size;
SpecifierOpt *guess_layout_max;
int nb_guess_layout_max;
SpecifierOpt *apad;
@ -218,6 +226,10 @@ typedef struct OptionsContext {
int nb_disposition;
SpecifierOpt *program;
int nb_program;
SpecifierOpt *time_bases;
int nb_time_bases;
SpecifierOpt *enc_time_bases;
int nb_enc_time_bases;
} OptionsContext;
typedef struct InputFilter {
@ -225,6 +237,23 @@ typedef struct InputFilter {
struct InputStream *ist;
struct FilterGraph *graph;
uint8_t *name;
enum AVMediaType type; // AVMEDIA_TYPE_SUBTITLE for sub2video
AVFifoBuffer *frame_queue;
// parameters configured for this input
int format;
int width, height;
AVRational sample_aspect_ratio;
int sample_rate;
int channels;
uint64_t channel_layout;
AVBufferRef *hw_frames_ctx;
int eof;
} InputFilter;
typedef struct OutputFilter {
@ -236,6 +265,18 @@ typedef struct OutputFilter {
/* temporary storage until stream maps are processed */
AVFilterInOut *out_tmp;
enum AVMediaType type;
/* desired output stream properties */
int width, height;
AVRational frame_rate;
int format;
int sample_rate;
uint64_t channel_layout;
// those are only set if no format is specified and the encoder gives us multiple options
int *formats;
uint64_t *channel_layouts;
int *sample_rates;
} OutputFilter;
typedef struct FilterGraph {
@ -279,25 +320,21 @@ typedef struct InputStream {
int64_t min_pts; /* pts with the smallest value in a current stream */
int64_t max_pts; /* pts with the higher value in a current stream */
// when forcing constant input framerate through -r,
// this contains the pts that will be given to the next decoded frame
int64_t cfr_next_pts;
int64_t nb_samples; /* number of samples in the last decoded audio frame before looping */
double ts_scale;
int saw_first_ts;
int showed_multi_packet_warning;
AVDictionary *decoder_opts;
AVRational framerate; /* framerate forced with -r */
int top_field_first;
int guess_layout_max;
int autorotate;
int resample_height;
int resample_width;
int resample_pix_fmt;
int resample_sample_fmt;
int resample_sample_rate;
int resample_channels;
uint64_t resample_channel_layout;
int fix_sub_duration;
struct { /* previous decoded subtitle and related variables */
@ -309,6 +346,7 @@ typedef struct InputStream {
struct sub2video {
int64_t last_pts;
int64_t end_pts;
AVFifoBuffer *sub_queue; ///< queue of AVSubtitle* before filter init
AVFrame *frame;
int w, h;
} sub2video;
@ -324,16 +362,18 @@ typedef struct InputStream {
/* hwaccel options */
enum HWAccelID hwaccel_id;
enum AVHWDeviceType hwaccel_device_type;
char *hwaccel_device;
enum AVPixelFormat hwaccel_output_format;
/* hwaccel context */
enum HWAccelID active_hwaccel_id;
void *hwaccel_ctx;
void (*hwaccel_uninit)(AVCodecContext *s);
int (*hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags);
int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
enum AVPixelFormat hwaccel_pix_fmt;
enum AVPixelFormat hwaccel_retrieved_pix_fmt;
AVBufferRef *hw_frames_ctx;
/* stats */
// combined size of all the packets read
@ -343,6 +383,11 @@ typedef struct InputStream {
// number of frames/samples retrieved from the decoder
uint64_t frames_decoded;
uint64_t samples_decoded;
int64_t *dts_buffer;
int nb_dts_buffer;
int got_output;
} InputStream;
typedef struct InputFile {
@ -367,7 +412,7 @@ typedef struct InputFile {
int rate_emu;
int accurate_seek;
#if HAVE_PTHREADS
#if HAVE_THREADS
AVThreadMessageQueue *in_thread_queue;
pthread_t thread; /* thread reading from this file */
int non_blocking; /* reading packets from the thread should not block */
@ -410,8 +455,15 @@ typedef struct OutputStream {
int64_t first_pts;
/* dts of the last packet sent to the muxer */
int64_t last_mux_dts;
AVBitStreamFilterContext *bitstream_filters;
// the timebase of the packets sent to the muxer
AVRational mux_timebase;
AVRational enc_timebase;
int nb_bitstream_filters;
AVBSFContext **bsf_ctx;
AVCodecContext *enc_ctx;
AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */
AVCodec *enc;
int64_t max_frames;
AVFrame *filtered_frame;
@ -427,10 +479,12 @@ typedef struct OutputStream {
int force_fps;
int top_field_first;
int rotate_overridden;
double rotate_override_value;
AVRational frame_aspect_ratio;
/* forced key frames */
int64_t forced_kf_ref_pts;
int64_t *forced_kf_pts;
int forced_kf_count;
int forced_kf_index;
@ -458,6 +512,14 @@ typedef struct OutputStream {
OSTFinished finished; /* no more packets should be written for this stream */
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
int stream_copy;
// init_output_stream() has been called for this stream
// The encoder and the bitstream filters have been initialized and the stream
// parameters are set in the AVStream.
int initialized;
int inputs_done;
const char *attachment_filename;
int copy_initial_nonkeyframes;
int copy_prior_start;
@ -465,8 +527,6 @@ typedef struct OutputStream {
int keep_pix_fmt;
AVCodecParserContext *parser;
/* stats */
// combined size of all the packets written
uint64_t data_size;
@ -479,6 +539,11 @@ typedef struct OutputStream {
/* packet quality factor */
int quality;
int max_muxing_queue_size;
/* the packets are buffered here until the muxer is ready to be initialized */
AVFifoBuffer *muxing_queue;
/* packet picture type */
int pict_type;
@ -495,6 +560,8 @@ typedef struct OutputFile {
uint64_t limit_filesize; /* filesize limit expressed in bytes */
int shortest;
int header_written;
} OutputFile;
extern InputStream **input_streams;
@ -538,13 +605,21 @@ extern int stdin_interaction;
extern int frame_bits_per_raw_sample;
extern AVIOContext *progress_avio;
extern float max_error_rate;
extern int vdpau_api_ver;
extern char *videotoolbox_pixfmt;
extern int filter_nbthreads;
extern int filter_complex_nbthreads;
extern int vstats_version;
extern const AVIOInterruptCB int_cb;
extern const OptionDef options[];
extern const HWAccel hwaccels[];
extern AVBufferRef *hw_device_ctx;
#if CONFIG_QSV
extern char *qsv_device;
#endif
extern HWDevice *filter_hw_device;
void term_init(void);
@ -565,19 +640,31 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec);
int configure_filtergraph(FilterGraph *fg);
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
void check_filter_outputs(void);
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int filtergraph_is_simple(FilterGraph *fg);
int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int init_complex_filtergraph(FilterGraph *fg);
void sub2video_update(InputStream *ist, AVSubtitle *sub);
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
int ffmpeg_parse_options(int argc, char **argv);
int vdpau_init(AVCodecContext *s);
int dxva2_init(AVCodecContext *s);
int vda_init(AVCodecContext *s);
int videotoolbox_init(AVCodecContext *s);
int qsv_init(AVCodecContext *s);
int qsv_transcode_init(OutputStream *ost);
int cuvid_init(AVCodecContext *s);
HWDevice *hw_device_get_by_name(const char *name);
int hw_device_init_from_string(const char *arg, HWDevice **dev);
void hw_device_free_all(void);
int hw_device_setup_for_decode(InputStream *ist);
int hw_device_setup_for_encode(OutputStream *ost);
int hwaccel_decode_init(AVCodecContext *avctx);
int run(int argc, char **argv);
#endif /* FFMPEG_H */
#endif /* FFTOOLS_FFMPEG_H */

@ -20,10 +20,11 @@
#include <stdint.h>
#include "ffmpeg/ffmpeg.h"
#include "ffmpeg.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
@ -36,7 +37,6 @@
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
{
static const enum AVPixelFormat mjpeg_formats[] =
@ -63,6 +63,7 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCod
if (codec && codec->pix_fmts) {
const enum AVPixelFormat *p = codec->pix_fmts;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
//FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
@ -92,33 +93,33 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
if (codec && codec->sample_fmts) {
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
if (*p == st->codec->sample_fmt)
if (*p == st->codecpar->format)
break;
}
if (*p == -1) {
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0]))
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
if(av_get_sample_fmt_name(st->codec->sample_fmt))
if(av_get_sample_fmt_name(st->codecpar->format))
av_log(NULL, AV_LOG_WARNING,
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
av_get_sample_fmt_name(st->codec->sample_fmt),
av_get_sample_fmt_name(st->codecpar->format),
codec->name,
av_get_sample_fmt_name(codec->sample_fmts[0]));
st->codec->sample_fmt = codec->sample_fmts[0];
st->codecpar->format = codec->sample_fmts[0];
}
}
}
static char *choose_pix_fmts(OutputStream *ost)
static char *choose_pix_fmts(OutputFilter *ofilter)
{
OutputStream *ost = ofilter->ost;
AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
if (strict_dict)
// used by choose_pixel_fmt() and below
av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
if (ost->keep_pix_fmt) {
if (ost->filter)
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
avfilter_graph_set_auto_convert(ofilter->graph->graph,
AVFILTER_AUTO_CONVERT_NONE);
if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
return NULL;
@ -153,13 +154,13 @@ static char *choose_pix_fmts(OutputStream *ost)
/* Define a function for building a string containing a list of
* allowed formats. */
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name) \
static char *choose_ ## var ## s(OutputStream *ost) \
#define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
static char *choose_ ## suffix (OutputFilter *ofilter) \
{ \
if (ost->enc_ctx->var != none) { \
get_name(ost->enc_ctx->var); \
if (ofilter->var != none) { \
get_name(ofilter->var); \
return av_strdup(name); \
} else if (ost->enc && ost->enc->supported_list) { \
} else if (ofilter->supported_list) { \
const type *p; \
AVIOContext *s = NULL; \
uint8_t *ret; \
@ -168,7 +169,7 @@ static char *choose_ ## var ## s(OutputStream *ost) \
if (avio_open_dyn_buf(&s) < 0) \
exit_program(1); \
\
for (p = ost->enc->supported_list; *p != none; p++) { \
for (p = ofilter->supported_list; *p != none; p++) { \
get_name(*p); \
avio_printf(s, "%s|", name); \
} \
@ -179,19 +180,19 @@ static char *choose_ ## var ## s(OutputStream *ost) \
return NULL; \
}
// DEF_CHOOSE_FORMAT(enum AVPixelFormat, pix_fmt, pix_fmts, AV_PIX_FMT_NONE,
//DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
// GET_PIX_FMT_NAME)
DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME)
DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
GET_SAMPLE_RATE_NAME)
DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
GET_CH_LAYOUT_NAME)
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
{
FilterGraph *fg = av_mallocz(sizeof(*fg));
@ -204,6 +205,7 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
exit_program(1);
fg->outputs[0]->ost = ost;
fg->outputs[0]->graph = fg;
fg->outputs[0]->format = -1;
ost->filter = fg->outputs[0];
@ -212,6 +214,11 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
exit_program(1);
fg->inputs[0]->ist = ist;
fg->inputs[0]->graph = fg;
fg->inputs[0]->format = -1;
fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
if (!fg->inputs[0]->frame_queue)
exit_program(1);
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
@ -219,7 +226,26 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
GROW_ARRAY(filtergraphs, nb_filtergraphs);
filtergraphs[nb_filtergraphs - 1] = fg;
return fg;
return 0;
}
static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
{
AVFilterContext *ctx = inout->filter_ctx;
AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
AVIOContext *pb;
uint8_t *res = NULL;
if (avio_open_dyn_buf(&pb) < 0)
exit_program(1);
avio_printf(pb, "%s", ctx->filter->name);
if (nb_pads > 1)
avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
avio_w8(pb, 0);
avio_close_dyn_buf(pb, &res);
return res;
}
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
@ -249,7 +275,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
s = input_files[file_idx]->ctx;
for (i = 0; i < s->nb_streams; i++) {
enum AVMediaType stream_type = s->streams[i]->codec->codec_type;
enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
if (stream_type != type &&
!(stream_type == AVMEDIA_TYPE_SUBTITLE &&
type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
@ -265,10 +291,17 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
exit_program(1);
}
ist = input_streams[input_files[file_idx]->ist_index + st->index];
if (ist->user_set_discard == AVDISCARD_ALL) {
av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
"matches a disabled input stream.\n", p, fg->graph_desc);
exit_program(1);
}
} else {
/* find the first unused stream of corresponding type */
for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i];
if (ist->user_set_discard == AVDISCARD_ALL)
continue;
if (ist->dec_ctx->codec_type == type && ist->discard)
break;
}
@ -290,6 +323,13 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
exit_program(1);
fg->inputs[fg->nb_inputs - 1]->ist = ist;
fg->inputs[fg->nb_inputs - 1]->graph = fg;
fg->inputs[fg->nb_inputs - 1]->format = -1;
fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
exit_program(1);
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
@ -306,6 +346,7 @@ int init_complex_filtergraph(FilterGraph *fg)
graph = avfilter_graph_alloc();
if (!graph)
return AVERROR(ENOMEM);
graph->nb_threads = 1;
ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
if (ret < 0)
@ -324,6 +365,7 @@ int init_complex_filtergraph(FilterGraph *fg)
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
fg->outputs[fg->nb_outputs - 1]->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
cur->pad_idx);
fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
cur = cur->next;
fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
}
@ -412,13 +454,12 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
char *pix_fmts;
OutputStream *ost = ofilter->ost;
OutputFile *of = output_files[ost->file_index];
AVCodecContext *codec = ost->enc_ctx;
AVFilterContext *last_filter = out->filter_ctx;
int pad_idx = out->pad_idx;
int ret;
char name[255];
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&ofilter->filter,
avfilter_get_by_name("buffersink"),
name, NULL, NULL, fg->graph);
@ -426,21 +467,20 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
if (ret < 0)
return ret;
if (codec->width || codec->height) {
if (ofilter->width || ofilter->height) {
char args[255];
AVFilterContext *filter;
AVDictionaryEntry *e = NULL;
snprintf(args, sizeof(args), "%d:%d",
codec->width,
codec->height);
ofilter->width, ofilter->height);
while ((e = av_dict_get(ost->sws_dict, "", e,
AV_DICT_IGNORE_SUFFIX))) {
av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
}
snprintf(name, sizeof(name), "scaler for output stream %d:%d",
snprintf(name, sizeof(name), "scaler_out_%d_%d",
ost->file_index, ost->index);
if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
name, args, NULL, fg->graph)) < 0)
@ -452,9 +492,9 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
pad_idx = 0;
}
if ((pix_fmts = choose_pix_fmts(ost))) {
if ((pix_fmts = choose_pix_fmts(ofilter))) {
AVFilterContext *filter;
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
snprintf(name, sizeof(name), "format_out_%d_%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&filter,
avfilter_get_by_name("format"),
@ -475,7 +515,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
ost->frame_rate.den);
snprintf(name, sizeof(name), "fps for output stream %d:%d",
snprintf(name, sizeof(name), "fps_out_%d_%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
name, args, NULL, fg->graph);
@ -489,7 +529,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
pad_idx = 0;
}
snprintf(name, sizeof(name), "trim for output stream %d:%d",
snprintf(name, sizeof(name), "trim_out_%d_%d",
ost->file_index, ost->index);
ret = insert_trim(of->start_time, of->recording_time,
&last_filter, &pad_idx, name);
@ -514,7 +554,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
char name[255];
int ret;
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&ofilter->filter,
avfilter_get_by_name("abuffersink"),
name, NULL, NULL, fg->graph);
@ -559,9 +599,9 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
if (codec->channels && !codec->channel_layout)
codec->channel_layout = av_get_default_channel_layout(codec->channels);
sample_fmts = choose_sample_fmts(ost);
sample_rates = choose_sample_rates(ost);
channel_layouts = choose_channel_layouts(ost);
sample_fmts = choose_sample_fmts(ofilter);
sample_rates = choose_sample_rates(ofilter);
channel_layouts = choose_channel_layouts(ofilter);
if (sample_fmts || sample_rates || channel_layouts) {
AVFilterContext *format;
char args[256];
@ -581,7 +621,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
av_freep(&sample_rates);
av_freep(&channel_layouts);
snprintf(name, sizeof(name), "audio format for output stream %d:%d",
snprintf(name, sizeof(name), "format_out_%d_%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&format,
avfilter_get_by_name("aformat"),
@ -609,7 +649,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
int i;
for (i=0; i<of->ctx->nb_streams; i++)
if (of->ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
break;
if (i<of->ctx->nb_streams) {
@ -631,30 +671,10 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
return 0;
}
#define DESCRIBE_FILTER_LINK(f, inout, in) \
{ \
AVFilterContext *ctx = inout->filter_ctx; \
AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads; \
int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs; \
AVIOContext *pb; \
\
if (avio_open_dyn_buf(&pb) < 0) \
exit_program(1); \
\
avio_printf(pb, "%s", ctx->filter->name); \
if (nb_pads > 1) \
avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\
avio_w8(pb, 0); \
avio_close_dyn_buf(pb, &f->name); \
}
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
av_freep(&ofilter->name);
DESCRIBE_FILTER_LINK(ofilter, out, 0);
if (!ofilter->ost) {
av_log(NULL, AV_LOG_FATAL, "Filter %s has a unconnected output\n", ofilter->name);
av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
exit_program(1);
}
@ -665,21 +685,36 @@ int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOu
}
}
static int sub2video_prepare(InputStream *ist)
void check_filter_outputs(void)
{
int i;
for (i = 0; i < nb_filtergraphs; i++) {
int n;
for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
OutputFilter *output = filtergraphs[i]->outputs[n];
if (!output->ost) {
av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
exit_program(1);
}
}
}
}
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
{
AVFormatContext *avf = input_files[ist->file_index]->ctx;
int i, w, h;
/* Compute the size of the canvas for the subtitles stream.
If the subtitles codec has set a size, use it. Otherwise use the
If the subtitles codecpar has set a size, use it. Otherwise use the
maximum dimensions of the video streams in the same file. */
w = ist->dec_ctx->width;
h = ist->dec_ctx->height;
w = ifilter->width;
h = ifilter->height;
if (!(w && h)) {
for (i = 0; i < avf->nb_streams; i++) {
if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
w = FFMAX(w, avf->streams[i]->codec->width);
h = FFMAX(h, avf->streams[i]->codec->height);
if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
w = FFMAX(w, avf->streams[i]->codecpar->width);
h = FFMAX(h, avf->streams[i]->codecpar->height);
}
}
if (!(w && h)) {
@ -688,17 +723,21 @@ static int sub2video_prepare(InputStream *ist)
}
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
}
ist->sub2video.w = ist->resample_width = w;
ist->sub2video.h = ist->resample_height = h;
ist->sub2video.w = ifilter->width = w;
ist->sub2video.h = ifilter->height = h;
ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
palettes for all rectangles are identical or compatible */
ist->resample_pix_fmt = ist->dec_ctx->pix_fmt = AV_PIX_FMT_RGB32;
ifilter->format = AV_PIX_FMT_RGB32;
ist->sub2video.frame = av_frame_alloc();
if (!ist->sub2video.frame)
return AVERROR(ENOMEM);
ist->sub2video.last_pts = INT64_MIN;
ist->sub2video.end_pts = INT64_MIN;
return 0;
}
@ -717,32 +756,36 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
char name[255];
int ret, pad_idx = 0;
int64_t tsoffset = 0;
AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
if (!par)
return AVERROR(ENOMEM);
memset(par, 0, sizeof(*par));
par->format = AV_PIX_FMT_NONE;
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
return AVERROR(EINVAL);
ret = AVERROR(EINVAL);
goto fail;
}
if (!fr.num)
fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
ret = sub2video_prepare(ist);
ret = sub2video_prepare(ist, ifilter);
if (ret < 0)
return ret;
goto fail;
}
sar = ist->st->sample_aspect_ratio.num ?
ist->st->sample_aspect_ratio :
ist->dec_ctx->sample_aspect_ratio;
sar = ifilter->sample_aspect_ratio;
if(!sar.den)
sar = (AVRational){0,1};
av_bprint_init(&args, 0, 1);
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
av_bprintf(&args,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->resample_width,
ist->resample_height,
ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
"pixel_aspect=%d/%d:sws_param=flags=%d",
ifilter->width, ifilter->height, ifilter->format,
tb.num, tb.den, sar.num, sar.den,
SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
if (fr.num && fr.den)
@ -750,9 +793,15 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
args.str, NULL, fg->graph)) < 0)
return ret;
goto fail;
par->hw_frames_ctx = ifilter->hw_frames_ctx;
ret = av_buffersrc_parameters_set(ifilter->filter, par);
if (ret < 0)
goto fail;
av_freep(&par);
last_filter = ifilter->filter;
if (ist->autorotate) {
@ -776,27 +825,10 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
return ret;
}
if (ist->framerate.num) {
AVFilterContext *setpts;
snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&setpts,
avfilter_get_by_name("setpts"),
name, "N", NULL,
fg->graph)) < 0)
return ret;
if ((ret = avfilter_link(last_filter, 0, setpts, 0)) < 0)
return ret;
last_filter = setpts;
}
if (do_deinterlace) {
AVFilterContext *yadif;
snprintf(name, sizeof(name), "deinterlace input from stream %d:%d",
snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&yadif,
avfilter_get_by_name("yadif"),
@ -810,7 +842,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
last_filter = yadif;
}
snprintf(name, sizeof(name), "trim for input stream %d:%d",
snprintf(name, sizeof(name), "trim_in_%d_%d",
ist->file_index, ist->st->index);
if (copy_ts) {
tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
@ -826,6 +858,10 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
return ret;
return 0;
fail:
av_freep(&par);
return ret;
}
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
@ -847,15 +883,15 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1, ist->dec_ctx->sample_rate,
ist->dec_ctx->sample_rate,
av_get_sample_fmt_name(ist->dec_ctx->sample_fmt));
if (ist->dec_ctx->channel_layout)
1, ifilter->sample_rate,
ifilter->sample_rate,
av_get_sample_fmt_name(ifilter->format));
if (ifilter->channel_layout)
av_bprintf(&args, ":channel_layout=0x%"PRIx64,
ist->dec_ctx->channel_layout);
ifilter->channel_layout);
else
av_bprintf(&args, ":channels=%d", ist->dec_ctx->channels);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
av_bprintf(&args, ":channels=%d", ifilter->channels);
snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
@ -870,7 +906,7 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
"similarly to -af " filter_name "=%s.\n", arg); \
\
snprintf(name, sizeof(name), "graph %d %s for input stream %d:%d", \
snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
fg->index, filter_name, ist->file_index, ist->st->index); \
ret = avfilter_graph_create_filter(&filt_ctx, \
avfilter_get_by_name(filter_name), \
@ -941,9 +977,6 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
AVFilterInOut *in)
{
av_freep(&ifilter->name);
DESCRIBE_FILTER_LINK(ifilter, in, 1);
if (!ifilter->ist->dec) {
av_log(NULL, AV_LOG_ERROR,
"No decoder for stream #%d:%d, filtering impossible\n",
@ -957,14 +990,24 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
}
}
static void cleanup_filtergraph(FilterGraph *fg)
{
int i;
for (i = 0; i < fg->nb_outputs; i++)
fg->outputs[i]->filter = (AVFilterContext *)NULL;
for (i = 0; i < fg->nb_inputs; i++)
fg->inputs[i]->filter = (AVFilterContext *)NULL;
avfilter_graph_free(&fg->graph);
}
int configure_filtergraph(FilterGraph *fg)
{
AVFilterInOut *inputs, *outputs, *cur;
int ret, i, simple = !fg->graph_desc;
int ret, i, simple = filtergraph_is_simple(fg);
const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
fg->graph_desc;
avfilter_graph_free(&fg->graph);
cleanup_filtergraph(fg);
if (!(fg->graph = avfilter_graph_alloc()))
return AVERROR(ENOMEM);
@ -973,6 +1016,8 @@ int configure_filtergraph(FilterGraph *fg)
char args[512];
AVDictionaryEntry *e = NULL;
fg->graph->nb_threads = filter_nbthreads;
args[0] = 0;
while ((e = av_dict_get(ost->sws_dict, "", e,
AV_DICT_IGNORE_SUFFIX))) {
@ -998,15 +1043,28 @@ int configure_filtergraph(FilterGraph *fg)
}
if (strlen(args))
args[strlen(args) - 1] = '\0';
fg->graph->resample_lavr_opts = av_strdup(args);
e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
if (e)
av_opt_set(fg->graph, "threads", e->value, 0);
} else {
fg->graph->nb_threads = filter_complex_nbthreads;
}
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
return ret;
goto fail;
if (filter_hw_device || hw_device_ctx) {
AVBufferRef *device = filter_hw_device ? filter_hw_device->device_ref
: hw_device_ctx;
for (i = 0; i < fg->graph->nb_filters; i++) {
fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(device);
if (!fg->graph->filters[i]->hw_device_ctx) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
}
if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
const char *num_inputs;
@ -1030,14 +1088,15 @@ int configure_filtergraph(FilterGraph *fg)
" However, it had %s input(s) and %s output(s)."
" Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
graph_desc, num_inputs, num_outputs);
return AVERROR(EINVAL);
ret = AVERROR(EINVAL);
goto fail;
}
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
goto fail;
}
avfilter_inout_free(&inputs);
@ -1046,7 +1105,22 @@ int configure_filtergraph(FilterGraph *fg)
avfilter_inout_free(&outputs);
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
return ret;
goto fail;
/* limit the lists of allowed formats to the ones selected, to
* make sure they stay the same if the filtergraph is reconfigured later */
for (i = 0; i < fg->nb_outputs; i++) {
OutputFilter *ofilter = fg->outputs[i];
AVFilterContext *sink = ofilter->filter;
ofilter->format = av_buffersink_get_format(sink);
ofilter->width = av_buffersink_get_w(sink);
ofilter->height = av_buffersink_get_h(sink);
ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
ofilter->channel_layout = av_buffersink_get_channel_layout(sink);
}
fg->reconfiguration = 1;
@ -1056,8 +1130,9 @@ int configure_filtergraph(FilterGraph *fg)
/* identical to the same check in ffmpeg.c, needed because
complex filter graphs are initialized earlier */
av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
return AVERROR(EINVAL);
avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto fail;
}
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
@ -1065,6 +1140,66 @@ int configure_filtergraph(FilterGraph *fg)
ost->enc_ctx->frame_size);
}
for (i = 0; i < fg->nb_inputs; i++) {
while (av_fifo_size(fg->inputs[i]->frame_queue)) {
AVFrame *tmp;
av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
av_frame_free(&tmp);
if (ret < 0)
goto fail;
}
}
/* send the EOFs for the finished inputs */
for (i = 0; i < fg->nb_inputs; i++) {
if (fg->inputs[i]->eof) {
ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
if (ret < 0)
goto fail;
}
}
/* process queued up subtitle packets */
for (i = 0; i < fg->nb_inputs; i++) {
InputStream *ist = fg->inputs[i]->ist;
if (ist->sub2video.sub_queue && ist->sub2video.frame) {
while (av_fifo_size(ist->sub2video.sub_queue)) {
AVSubtitle tmp;
av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
sub2video_update(ist, &tmp);
avsubtitle_free(&tmp);
}
}
}
return 0;
fail:
cleanup_filtergraph(fg);
return ret;
}
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
{
av_buffer_unref(&ifilter->hw_frames_ctx);
ifilter->format = frame->format;
ifilter->width = frame->width;
ifilter->height = frame->height;
ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
ifilter->sample_rate = frame->sample_rate;
ifilter->channels = frame->channels;
ifilter->channel_layout = frame->channel_layout;
if (frame->hw_frames_ctx) {
ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
if (!ifilter->hw_frames_ctx)
return AVERROR(ENOMEM);
}
return 0;
}
@ -1077,3 +1212,7 @@ int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
return 0;
}
int filtergraph_is_simple(FilterGraph *fg)
{
return !fg->graph_desc;
}

File diff suppressed because it is too large Load Diff

@ -35,8 +35,10 @@
#include "libavutil/bprint.h"
#include "libavutil/display.h"
#include "libavutil/hash.h"
#include "libavutil/mastering_display_metadata.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/spherical.h"
#include "libavutil/stereo3d.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
@ -48,6 +50,19 @@
#include "libswresample/swresample.h"
#include "libpostproc/postprocess.h"
#include "cmdutils.h"
#include "libavutil/thread.h"
#if !HAVE_THREADS
# ifdef pthread_mutex_lock
# undef pthread_mutex_lock
# endif
# define pthread_mutex_lock(a) do{}while(0)
# ifdef pthread_mutex_unlock
# undef pthread_mutex_unlock
# endif
# define pthread_mutex_unlock(a) do{}while(0)
#endif
#include "ffprobe.h"
typedef struct InputStream {
@ -85,6 +100,7 @@ static int do_show_library_versions = 0;
static int do_show_pixel_formats = 0;
static int do_show_pixel_format_flags = 0;
static int do_show_pixel_format_components = 0;
static int do_show_log = 0;
static int do_show_chapter_tags = 0;
static int do_show_format_tags = 0;
@ -114,6 +130,8 @@ typedef struct ReadInterval {
static ReadInterval *read_intervals;
static int read_intervals_nb = 0;
static int find_stream_info = 1;
/* section structure definition */
#define SECTION_MAX_NB_CHILDREN 10
@ -147,6 +165,10 @@ typedef enum {
SECTION_ID_FRAME_TAGS,
SECTION_ID_FRAME_SIDE_DATA_LIST,
SECTION_ID_FRAME_SIDE_DATA,
SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST,
SECTION_ID_FRAME_SIDE_DATA_TIMECODE,
SECTION_ID_FRAME_LOG,
SECTION_ID_FRAME_LOGS,
SECTION_ID_LIBRARY_VERSION,
SECTION_ID_LIBRARY_VERSIONS,
SECTION_ID_PACKET,
@ -186,17 +208,21 @@ static struct section sections[] = {
[SECTION_ID_FORMAT] = { SECTION_ID_FORMAT, "format", 0, { SECTION_ID_FORMAT_TAGS, -1 } },
[SECTION_ID_FORMAT_TAGS] = { SECTION_ID_FORMAT_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "format_tags" },
[SECTION_ID_FRAMES] = { SECTION_ID_FRAMES, "frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME, SECTION_ID_SUBTITLE, -1 } },
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, -1 } },
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, SECTION_ID_FRAME_LOGS, -1 } },
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 } },
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { -1 } },
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "frame_side_data_list" },
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST, "timecodes", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, -1 } },
[SECTION_ID_FRAME_SIDE_DATA_TIMECODE] = { SECTION_ID_FRAME_SIDE_DATA_TIMECODE, "timecode", 0, { -1 } },
[SECTION_ID_FRAME_LOGS] = { SECTION_ID_FRAME_LOGS, "logs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_LOG, -1 } },
[SECTION_ID_FRAME_LOG] = { SECTION_ID_FRAME_LOG, "log", 0, { -1 }, },
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
[SECTION_ID_LIBRARY_VERSION] = { SECTION_ID_LIBRARY_VERSION, "library_version", 0, { -1 } },
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { SECTION_ID_PACKET_TAGS, SECTION_ID_PACKET_SIDE_DATA_LIST, -1 } },
[SECTION_ID_PACKET_TAGS] = { SECTION_ID_PACKET_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "packet_tags" },
[SECTION_ID_PACKET_SIDE_DATA_LIST] ={ SECTION_ID_PACKET_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET_SIDE_DATA, -1 } },
[SECTION_ID_PACKET_SIDE_DATA_LIST] ={ SECTION_ID_PACKET_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "packet_side_data_list" },
[SECTION_ID_PACKET_SIDE_DATA] = { SECTION_ID_PACKET_SIDE_DATA, "side_data", 0, { -1 } },
[SECTION_ID_PIXEL_FORMATS] = { SECTION_ID_PIXEL_FORMATS, "pixel_formats", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PIXEL_FORMAT, -1 } },
[SECTION_ID_PIXEL_FORMAT] = { SECTION_ID_PIXEL_FORMAT, "pixel_format", 0, { SECTION_ID_PIXEL_FORMAT_FLAGS, SECTION_ID_PIXEL_FORMAT_COMPONENTS, -1 } },
@ -219,7 +245,7 @@ static struct section sections[] = {
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, SECTION_ID_STREAM_SIDE_DATA_LIST, -1 } },
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
[SECTION_ID_STREAM_TAGS] = { SECTION_ID_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "stream_tags" },
[SECTION_ID_STREAM_SIDE_DATA_LIST] ={ SECTION_ID_STREAM_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM_SIDE_DATA, -1 } },
[SECTION_ID_STREAM_SIDE_DATA_LIST] ={ SECTION_ID_STREAM_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM_SIDE_DATA, -1 }, .element_name = "side_data", .unique_name = "stream_side_data_list" },
[SECTION_ID_STREAM_SIDE_DATA] = { SECTION_ID_STREAM_SIDE_DATA, "side_data", 0, { -1 } },
[SECTION_ID_SUBTITLE] = { SECTION_ID_SUBTITLE, "subtitle", 0, { -1 } },
};
@ -256,6 +282,21 @@ static uint64_t *nb_streams_packets;
static uint64_t *nb_streams_frames;
static int *selected_streams;
#if HAVE_THREADS
pthread_mutex_t log_mutex;
#endif
typedef struct LogBuffer {
char *context_name;
int log_level;
char *log_message;
AVClassCategory category;
char *parent_name;
AVClassCategory parent_category;
}LogBuffer;
static LogBuffer *log_buffer;
static int log_buffer_size;
static int buffer_length = 0;
static int buffer_size = 256 * 1024;
char *print_buffer = NULL;
@ -329,6 +370,54 @@ void frank_printf_json(char *fmt, ...) {
va_end(args);
}
static void log_callback(void *ptr, int level, const char *fmt, va_list vl)
{
AVClass* avc = ptr ? *(AVClass **) ptr : NULL;
va_list vl2;
char line[1024];
static int print_prefix = 1;
void *new_log_buffer;
va_copy(vl2, vl);
av_log_default_callback(ptr, level, fmt, vl);
av_log_format_line(ptr, level, fmt, vl2, line, sizeof(line), &print_prefix);
va_end(vl2);
#if HAVE_THREADS
pthread_mutex_lock(&log_mutex);
new_log_buffer = av_realloc_array(log_buffer, log_buffer_size + 1, sizeof(*log_buffer));
if (new_log_buffer) {
char *msg;
int i;
log_buffer = new_log_buffer;
memset(&log_buffer[log_buffer_size], 0, sizeof(log_buffer[log_buffer_size]));
log_buffer[log_buffer_size].context_name= avc ? av_strdup(avc->item_name(ptr)) : NULL;
if (avc) {
if (avc->get_category) log_buffer[log_buffer_size].category = avc->get_category(ptr);
else log_buffer[log_buffer_size].category = avc->category;
}
log_buffer[log_buffer_size].log_level = level;
msg = log_buffer[log_buffer_size].log_message = av_strdup(line);
for (i=strlen(msg) - 1; i>=0 && msg[i] == '\n'; i--) {
msg[i] = 0;
}
if (avc && avc->parent_log_context_offset) {
AVClass** parent = *(AVClass ***) (((uint8_t *) ptr) +
avc->parent_log_context_offset);
if (parent && *parent) {
log_buffer[log_buffer_size].parent_name = av_strdup((*parent)->item_name(parent));
log_buffer[log_buffer_size].parent_category =
(*parent)->get_category ? (*parent)->get_category(parent) :(*parent)->category;
}
}
log_buffer_size ++;
}
pthread_mutex_unlock(&log_mutex);
#endif
}
static void ffprobe_cleanup(int ret)
{
@ -337,6 +426,9 @@ static void ffprobe_cleanup(int ret)
av_dict_free(&(sections[i].entries_to_show));
buffer_length = 0;
#if HAVE_THREADS
pthread_mutex_destroy(&log_mutex);
#endif
}
struct unit_value {
@ -1837,6 +1929,7 @@ static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id
}
static void print_pkt_side_data(WriterContext *w,
AVCodecParameters *par,
const AVPacketSideData *side_data,
int nb_side_data,
SectionID id_data_list,
@ -1844,32 +1937,77 @@ static void print_pkt_side_data(WriterContext *w,
{
int i;
writer_print_section_header(w, SECTION_ID_STREAM_SIDE_DATA_LIST);
writer_print_section_header(w, id_data_list);
for (i = 0; i < nb_side_data; i++) {
const AVPacketSideData *sd = &side_data[i];
const char *name = av_packet_side_data_name(sd->type);
writer_print_section_header(w, SECTION_ID_STREAM_SIDE_DATA);
writer_print_section_header(w, id_data);
print_str("side_data_type", name ? name : "unknown");
print_int("side_data_size", sd->size);
if (sd->type == AV_PKT_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
} else if (sd->type == AV_PKT_DATA_STEREO3D) {
const AVStereo3D *stereo = (AVStereo3D *)sd->data;
// print_str("type", av_stereo3d_type_name(stereo->type));//TODO
print_str("type", av_stereo3d_type_name(stereo->type));
print_int("inverted", !!(stereo->flags & AV_STEREO3D_FLAG_INVERT));
} else if (sd->type == AV_PKT_DATA_SPHERICAL) {
const AVSphericalMapping *spherical = (AVSphericalMapping *)sd->data;
print_str("projection", av_spherical_projection_name(spherical->projection));
if (spherical->projection == AV_SPHERICAL_CUBEMAP) {
print_int("padding", spherical->padding);
} else if (spherical->projection == AV_SPHERICAL_EQUIRECTANGULAR_TILE) {
size_t l, t, r, b;
av_spherical_tile_bounds(spherical, par->width, par->height,
&l, &t, &r, &b);
print_int("bound_left", l);
print_int("bound_top", t);
print_int("bound_right", r);
print_int("bound_bottom", b);
}
print_int("yaw", (double) spherical->yaw / (1 << 16));
print_int("pitch", (double) spherical->pitch / (1 << 16));
print_int("roll", (double) spherical->roll / (1 << 16));
} else if (sd->type == AV_PKT_DATA_SKIP_SAMPLES && sd->size == 10) {
print_int("skip_samples", AV_RL32(sd->data));
print_int("discard_padding", AV_RL32(sd->data + 4));
print_int("skip_reason", AV_RL8(sd->data + 8));
print_int("discard_reason", AV_RL8(sd->data + 9));
} else if (sd->type == AV_PKT_DATA_MASTERING_DISPLAY_METADATA) {
AVMasteringDisplayMetadata *metadata = (AVMasteringDisplayMetadata *)sd->data;
if (metadata->has_primaries) {
print_q("red_x", metadata->display_primaries[0][0], '/');
print_q("red_y", metadata->display_primaries[0][1], '/');
print_q("green_x", metadata->display_primaries[1][0], '/');
print_q("green_y", metadata->display_primaries[1][1], '/');
print_q("blue_x", metadata->display_primaries[2][0], '/');
print_q("blue_y", metadata->display_primaries[2][1], '/');
print_q("white_point_x", metadata->white_point[0], '/');
print_q("white_point_y", metadata->white_point[1], '/');
}
if (metadata->has_luminance) {
print_q("min_luminance", metadata->min_luminance, '/');
print_q("max_luminance", metadata->max_luminance, '/');
}
} else if (sd->type == AV_PKT_DATA_CONTENT_LIGHT_LEVEL) {
AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data;
print_int("max_content", metadata->MaxCLL);
print_int("max_average", metadata->MaxFALL);
}
writer_print_section_footer(w);
}
writer_print_section_footer(w);
}
static void print_color_range(WriterContext *w, enum AVColorRange color_range, const char *fallback)
static void print_color_range(WriterContext *w, enum AVColorRange color_range)
{
const char *val = av_color_range_name(color_range);
if (!val || color_range == AVCOL_RANGE_UNSPECIFIED) {
print_str_opt("color_range", fallback);
print_str_opt("color_range", "unknown");
} else {
print_str("color_range", val);
}
@ -1915,6 +2053,58 @@ static void print_chroma_location(WriterContext *w, enum AVChromaLocation chroma
}
}
static void clear_log(int need_lock)
{
int i;
if (need_lock)
pthread_mutex_lock(&log_mutex);
for (i=0; i<log_buffer_size; i++) {
av_freep(&log_buffer[i].context_name);
av_freep(&log_buffer[i].parent_name);
av_freep(&log_buffer[i].log_message);
}
log_buffer_size = 0;
if(need_lock)
pthread_mutex_unlock(&log_mutex);
}
static int show_log(WriterContext *w, int section_ids, int section_id, int log_level)
{
int i;
pthread_mutex_lock(&log_mutex);
if (!log_buffer_size) {
pthread_mutex_unlock(&log_mutex);
return 0;
}
writer_print_section_header(w, section_ids);
for (i=0; i<log_buffer_size; i++) {
if (log_buffer[i].log_level <= log_level) {
writer_print_section_header(w, section_id);
print_str("context", log_buffer[i].context_name);
print_int("level", log_buffer[i].log_level);
print_int("category", log_buffer[i].category);
if (log_buffer[i].parent_name) {
print_str("parent_context", log_buffer[i].parent_name);
print_int("parent_category", log_buffer[i].parent_category);
} else {
print_str_opt("parent_context", "N/A");
print_str_opt("parent_category", "N/A");
}
print_str("message", log_buffer[i].log_message);
writer_print_section_footer(w);
}
}
clear_log(0);
pthread_mutex_unlock(&log_mutex);
writer_print_section_footer(w);
return 0;
}
static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int packet_idx)
{
char val_str[128];
@ -1926,7 +2116,7 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
writer_print_section_header(w, SECTION_ID_PACKET);
s = av_get_media_type_string(st->codec->codec_type);
s = av_get_media_type_string(st->codecpar->codec_type);
if (s) print_str ("codec_type", s);
else print_str_opt("codec_type", "unknown");
print_int("stream_index", pkt->stream_index);
@ -1941,9 +2131,8 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
print_val("size", pkt->size, unit_byte_str);
if (pkt->pos != -1) print_fmt ("pos", "%"PRId64, pkt->pos);
else print_str_opt("pos", "N/A");
// print_fmt("flags", "%c%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_',
// pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
print_fmt("flags", "%c%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_',
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
if (pkt->side_data_elems) {
int size;
@ -1957,7 +2146,7 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
av_dict_free(&dict);
}
print_pkt_side_data(w, pkt->side_data, pkt->side_data_elems,
print_pkt_side_data(w, st->codecpar, pkt->side_data, pkt->side_data_elems,
SECTION_ID_PACKET_SIDE_DATA_LIST,
SECTION_ID_PACKET_SIDE_DATA);
}
@ -2006,7 +2195,7 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
writer_print_section_header(w, SECTION_ID_FRAME);
s = av_get_media_type_string(stream->codec->codec_type);
s = av_get_media_type_string(stream->codecpar->codec_type);
if (s) print_str ("media_type", s);
else print_str_opt("media_type", "unknown");
print_int("stream_index", stream->index);
@ -2015,16 +2204,16 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_time("pkt_pts_time", frame->pts, &stream->time_base);
print_ts ("pkt_dts", frame->pkt_dts);
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
print_ts ("best_effort_timestamp", av_frame_get_best_effort_timestamp(frame));
print_time("best_effort_timestamp_time", av_frame_get_best_effort_timestamp(frame), &stream->time_base);
print_duration_ts ("pkt_duration", av_frame_get_pkt_duration(frame));
print_duration_time("pkt_duration_time", av_frame_get_pkt_duration(frame), &stream->time_base);
if (av_frame_get_pkt_pos (frame) != -1) print_fmt ("pkt_pos", "%"PRId64, av_frame_get_pkt_pos(frame));
print_ts ("best_effort_timestamp", frame->best_effort_timestamp);
print_time("best_effort_timestamp_time", frame->best_effort_timestamp, &stream->time_base);
print_duration_ts ("pkt_duration", frame->pkt_duration);
print_duration_time("pkt_duration_time", frame->pkt_duration, &stream->time_base);
if (frame->pkt_pos != -1) print_fmt ("pkt_pos", "%"PRId64, frame->pkt_pos);
else print_str_opt("pkt_pos", "N/A");
if (av_frame_get_pkt_size(frame) != -1) print_val ("pkt_size", av_frame_get_pkt_size(frame), unit_byte_str);
if (frame->pkt_size != -1) print_val ("pkt_size", frame->pkt_size, unit_byte_str);
else print_str_opt("pkt_size", "N/A");
switch (stream->codec->codec_type) {
switch (stream->codecpar->codec_type) {
AVRational sar;
case AVMEDIA_TYPE_VIDEO:
@ -2045,6 +2234,12 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_int("interlaced_frame", frame->interlaced_frame);
print_int("top_field_first", frame->top_field_first);
print_int("repeat_pict", frame->repeat_pict);
print_color_range(w, frame->color_range);
print_color_space(w, frame->colorspace);
print_primaries(w, frame->color_primaries);
print_color_trc(w, frame->color_trc);
print_chroma_location(w, frame->chroma_location);
break;
case AVMEDIA_TYPE_AUDIO:
@ -2052,18 +2247,20 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
if (s) print_str ("sample_fmt", s);
else print_str_opt("sample_fmt", "unknown");
print_int("nb_samples", frame->nb_samples);
print_int("channels", av_frame_get_channels(frame));
if (av_frame_get_channel_layout(frame)) {
print_int("channels", frame->channels);
if (frame->channel_layout) {
av_bprint_clear(&pbuf);
av_bprint_channel_layout(&pbuf, av_frame_get_channels(frame),
av_frame_get_channel_layout(frame));
av_bprint_channel_layout(&pbuf, frame->channels,
frame->channel_layout);
print_str ("channel_layout", pbuf.str);
} else
print_str_opt("channel_layout", "unknown");
break;
}
if (do_show_frame_tags)
show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
show_tags(w, frame->metadata, SECTION_ID_FRAME_TAGS);
if (do_show_log)
show_log(w, SECTION_ID_FRAME_LOGS, SECTION_ID_FRAME_LOG, do_show_log);
if (frame->nb_side_data) {
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_LIST);
for (i = 0; i < frame->nb_side_data; i++) {
@ -2073,7 +2270,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA);
name = av_frame_side_data_name(sd->type);
print_str("side_data_type", name ? name : "unknown");
print_int("side_data_size", sd->size);
if (sd->type == AV_FRAME_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
@ -2081,6 +2277,46 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
char tcbuf[AV_TIMECODE_STR_SIZE];
av_timecode_make_mpeg_tc_string(tcbuf, *(int64_t *)(sd->data));
print_str("timecode", tcbuf);
} else if (sd->type == AV_FRAME_DATA_S12M_TIMECODE && sd->size == 16) {
uint32_t *tc = (uint32_t*)sd->data;
int m = FFMIN(tc[0],3);
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE_LIST);
for (int j = 1; j <= m ; j++) {
char tcbuf[AV_TIMECODE_STR_SIZE];
av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0);
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_TIMECODE);
print_str("value", tcbuf);
writer_print_section_footer(w);
}
writer_print_section_footer(w);
} else if (sd->type == AV_FRAME_DATA_MASTERING_DISPLAY_METADATA) {
AVMasteringDisplayMetadata *metadata = (AVMasteringDisplayMetadata *)sd->data;
if (metadata->has_primaries) {
print_q("red_x", metadata->display_primaries[0][0], '/');
print_q("red_y", metadata->display_primaries[0][1], '/');
print_q("green_x", metadata->display_primaries[1][0], '/');
print_q("green_y", metadata->display_primaries[1][1], '/');
print_q("blue_x", metadata->display_primaries[2][0], '/');
print_q("blue_y", metadata->display_primaries[2][1], '/');
print_q("white_point_x", metadata->white_point[0], '/');
print_q("white_point_y", metadata->white_point[1], '/');
}
if (metadata->has_luminance) {
print_q("min_luminance", metadata->min_luminance, '/');
print_q("max_luminance", metadata->max_luminance, '/');
}
} else if (sd->type == AV_FRAME_DATA_CONTENT_LIGHT_LEVEL) {
AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data;
print_int("max_content", metadata->MaxCLL);
print_int("max_average", metadata->MaxFALL);
} else if (sd->type == AV_FRAME_DATA_ICC_PROFILE) {
AVDictionaryEntry *tag = av_dict_get(sd->metadata, "name", NULL, AV_DICT_MATCH_CASE);
if (tag)
print_str(tag->key, tag->value);
print_int("size", sd->size);
}
writer_print_section_footer(w);
}
@ -2095,36 +2331,55 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
static av_always_inline int process_frame(WriterContext *w,
InputFile *ifile,
AVFrame *frame, AVPacket *pkt)
AVFrame *frame, AVPacket *pkt,
int *packet_new)
{
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
AVCodecContext *dec_ctx = ifile->streams[pkt->stream_index].dec_ctx;
AVCodecParameters *par = ifile->streams[pkt->stream_index].st->codecpar;
AVSubtitle sub;
int ret = 0, got_frame = 0;
clear_log(1);
if (dec_ctx && dec_ctx->codec) {
switch (dec_ctx->codec_type) {
switch (par->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, pkt);
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, pkt);
if (*packet_new) {
ret = avcodec_send_packet(dec_ctx, pkt);
if (ret == AVERROR(EAGAIN)) {
ret = 0;
} else if (ret >= 0 || ret == AVERROR_EOF) {
ret = 0;
*packet_new = 0;
}
}
if (ret >= 0) {
ret = avcodec_receive_frame(dec_ctx, frame);
if (ret >= 0) {
got_frame = 1;
} else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
ret = 0;
}
}
break;
case AVMEDIA_TYPE_SUBTITLE:
if (*packet_new)
ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_frame, pkt);
*packet_new = 0;
break;
default:
*packet_new = 0;
}
} else {
*packet_new = 0;
}
if (ret < 0)
return ret;
ret = FFMIN(ret, pkt->size); /* guard against bogus return values */
pkt->data += ret;
pkt->size -= ret;
if (got_frame) {
int is_sub = (dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE);
int is_sub = (par->codec_type == AVMEDIA_TYPE_SUBTITLE);
nb_streams_frames[pkt->stream_index]++;
if (do_show_frames)
if (is_sub)
@ -2134,7 +2389,7 @@ static av_always_inline int process_frame(WriterContext *w,
if (is_sub)
avsubtitle_free(&sub);
}
return got_frame;
return got_frame || *packet_new;
}
static void log_read_interval(const ReadInterval *interval, void *log_ctx, int log_level)
@ -2165,7 +2420,7 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
const ReadInterval *interval, int64_t *cur_ts)
{
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
AVPacket pkt, pkt1;
AVPacket pkt;
AVFrame *frame = NULL;
int ret = 0, i = 0, frame_count = 0;
int64_t start = -INT64_MAX, end = interval->end;
@ -2206,11 +2461,11 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
goto end;
}
while (!av_read_frame(fmt_ctx, &pkt)) {
if (ifile->nb_streams > nb_streams) {
if (fmt_ctx->nb_streams > nb_streams) {
REALLOCZ_ARRAY_STREAM(nb_streams_frames, nb_streams, fmt_ctx->nb_streams);
REALLOCZ_ARRAY_STREAM(nb_streams_packets, nb_streams, fmt_ctx->nb_streams);
REALLOCZ_ARRAY_STREAM(selected_streams, nb_streams, fmt_ctx->nb_streams);
nb_streams = ifile->nb_streams;
nb_streams = fmt_ctx->nb_streams;
}
if (selected_streams[pkt.stream_index]) {
AVRational tb = ifile->streams[pkt.stream_index].st->time_base;
@ -2242,20 +2497,18 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
nb_streams_packets[pkt.stream_index]++;
}
if (do_read_frames) {
pkt1 = pkt;
while (pkt1.size && process_frame(w, ifile, frame, &pkt1) > 0);
int packet_new = 1;
while (process_frame(w, ifile, frame, &pkt, &packet_new) > 0);
}
}
av_packet_unref(&pkt);
}
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
av_packet_unref(&pkt);
//Flush remaining frames that are cached in the decoder
for (i = 0; i < fmt_ctx->nb_streams; i++) {
pkt.stream_index = i;
if (do_read_frames)
while (process_frame(w, ifile, frame, &pkt) > 0);
while (process_frame(w, ifile, frame, &pkt, &(int){1}) > 0);
}
end:
@ -2290,6 +2543,7 @@ static int read_packets(WriterContext *w, InputFile *ifile)
static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_idx, InputStream *ist, int in_program)
{
AVStream *stream = ist->st;
AVCodecParameters *par;
AVCodecContext *dec_ctx;
char val_str[128];
const char *s;
@ -2305,8 +2559,9 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
print_int("index", stream->index);
par = stream->codecpar;
dec_ctx = ist->dec_ctx;
if (cd == avcodec_descriptor_get(dec_ctx->codec_id)) {
if (cd = avcodec_descriptor_get(par->codec_id)) {
print_str("codec_name", cd->name);
if (!do_bitexact) {
print_str("codec_long_name",
@ -2319,18 +2574,18 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
}
}
if (!do_bitexact && (profile = avcodec_profile_name(dec_ctx->codec_id, dec_ctx->profile)))
if (!do_bitexact && (profile = avcodec_profile_name(par->codec_id, par->profile)))
print_str("profile", profile);
else {
if (dec_ctx->profile != FF_PROFILE_UNKNOWN) {
if (par->profile != FF_PROFILE_UNKNOWN) {
char profile_num[12];
snprintf(profile_num, sizeof(profile_num), "%d", dec_ctx->profile);
snprintf(profile_num, sizeof(profile_num), "%d", par->profile);
print_str("profile", profile_num);
} else
print_str_opt("profile", "unknown");
}
s = av_get_media_type_string(dec_ctx->codec_type);
s = av_get_media_type_string(par->codec_type);
if (s) print_str ("codec_type", s);
else print_str_opt("codec_type", "unknown");
#if FF_API_LAVF_AVCTX
@ -2339,51 +2594,52 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
#endif
/* print AVI/FourCC tag */
av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag);
print_str("codec_tag_string", val_str);
print_fmt("codec_tag", "0x%04x", dec_ctx->codec_tag);
print_str("codec_tag_string", av_fourcc2str(par->codec_tag));
print_fmt("codec_tag", "0x%04"PRIx32, par->codec_tag);
switch (dec_ctx->codec_type) {
switch (par->codec_type) {
case AVMEDIA_TYPE_VIDEO:
print_int("width", dec_ctx->width);
print_int("height", dec_ctx->height);
print_int("width", par->width);
print_int("height", par->height);
#if FF_API_LAVF_AVCTX
if (dec_ctx) {
print_int("coded_width", dec_ctx->coded_width);
print_int("coded_height", dec_ctx->coded_height);
}
print_int("has_b_frames", dec_ctx->has_b_frames);
#endif
print_int("has_b_frames", par->video_delay);
sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL);
if (sar.den) {
if (sar.num) {
print_q("sample_aspect_ratio", sar, ':');
av_reduce(&dar.num, &dar.den,
dec_ctx->width * sar.num,
dec_ctx->height * sar.den,
par->width * sar.num,
par->height * sar.den,
1024*1024);
print_q("display_aspect_ratio", dar, ':');
} else {
print_str_opt("sample_aspect_ratio", "N/A");
print_str_opt("display_aspect_ratio", "N/A");
}
s = av_get_pix_fmt_name(dec_ctx->pix_fmt);
s = av_get_pix_fmt_name(par->format);
if (s) print_str ("pix_fmt", s);
else print_str_opt("pix_fmt", "unknown");
print_int("level", dec_ctx->level);
print_int("level", par->level);
print_color_range(w, dec_ctx->color_range, "N/A");
print_color_space(w, dec_ctx->colorspace);
print_color_trc(w, dec_ctx->color_trc);
print_primaries(w, dec_ctx->color_primaries);
print_chroma_location(w, dec_ctx->chroma_sample_location);
print_color_range(w, par->color_range);
print_color_space(w, par->color_space);
print_color_trc(w, par->color_trc);
print_primaries(w, par->color_primaries);
print_chroma_location(w, par->chroma_location);
if (dec_ctx->field_order == AV_FIELD_PROGRESSIVE)
if (par->field_order == AV_FIELD_PROGRESSIVE)
print_str("field_order", "progressive");
else if (dec_ctx->field_order == AV_FIELD_TT)
else if (par->field_order == AV_FIELD_TT)
print_str("field_order", "tt");
else if (dec_ctx->field_order == AV_FIELD_BB)
else if (par->field_order == AV_FIELD_BB)
print_str("field_order", "bb");
else if (dec_ctx->field_order == AV_FIELD_TB)
else if (par->field_order == AV_FIELD_TB)
print_str("field_order", "tb");
else if (dec_ctx->field_order == AV_FIELD_BT)
else if (par->field_order == AV_FIELD_BT)
print_str("field_order", "bt");
else
print_str_opt("field_order", "unknown");
@ -2402,30 +2658,30 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
break;
case AVMEDIA_TYPE_AUDIO:
s = av_get_sample_fmt_name(dec_ctx->pix_fmt);
s = av_get_sample_fmt_name(par->format);
if (s) print_str ("sample_fmt", s);
else print_str_opt("sample_fmt", "unknown");
print_val("sample_rate", dec_ctx->sample_rate, unit_hertz_str);
print_int("channels", dec_ctx->channels);
print_val("sample_rate", par->sample_rate, unit_hertz_str);
print_int("channels", par->channels);
if (dec_ctx->channel_layout) {
if (par->channel_layout) {
av_bprint_clear(&pbuf);
av_bprint_channel_layout(&pbuf, dec_ctx->channels, dec_ctx->channel_layout);
av_bprint_channel_layout(&pbuf, par->channels, par->channel_layout);
print_str ("channel_layout", pbuf.str);
} else {
print_str_opt("channel_layout", "unknown");
}
print_int("bits_per_sample", av_get_bits_per_sample(dec_ctx->codec_id));
print_int("bits_per_sample", av_get_bits_per_sample(par->codec_id));
break;
case AVMEDIA_TYPE_SUBTITLE:
if (dec_ctx->width)
print_int("width", dec_ctx->width);
if (par->width)
print_int("width", par->width);
else
print_str_opt("width", "N/A");
if (dec_ctx->height)
print_int("height", dec_ctx->height);
if (par->height)
print_int("height", par->height);
else
print_str_opt("height", "N/A");
break;
@ -2452,7 +2708,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
print_time("start_time", stream->start_time, &stream->time_base);
print_ts ("duration_ts", stream->duration);
print_time("duration", stream->duration, &stream->time_base);
if (dec_ctx->bit_rate > 0) print_val ("bit_rate", dec_ctx->bit_rate, unit_bit_per_second_str);
if (par->bit_rate > 0) print_val ("bit_rate", par->bit_rate, unit_bit_per_second_str);
else print_str_opt("bit_rate", "N/A");
#if FF_API_LAVF_AVCTX
if (stream->codec->rc_max_rate > 0) print_val ("max_bit_rate", stream->codec->rc_max_rate, unit_bit_per_second_str);
@ -2467,10 +2723,10 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
if (nb_streams_packets[stream_idx]) print_fmt ("nb_read_packets", "%"PRIu64, nb_streams_packets[stream_idx]);
else print_str_opt("nb_read_packets", "N/A");
if (do_show_data)
writer_print_data(w, "extradata", dec_ctx->extradata,
dec_ctx->extradata_size);
writer_print_data_hash(w, "extradata_hash", dec_ctx->extradata,
dec_ctx->extradata_size);
writer_print_data(w, "extradata", par->extradata,
par->extradata_size);
writer_print_data_hash(w, "extradata_hash", par->extradata,
par->extradata_size);
/* Print disposition information */
#define PRINT_DISPOSITION(flagname, name) do { \
@ -2490,6 +2746,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
PRINT_DISPOSITION(VISUAL_IMPAIRED, "visual_impaired");
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
PRINT_DISPOSITION(TIMED_THUMBNAILS, "timed_thumbnails");
writer_print_section_footer(w);
}
@ -2497,7 +2754,7 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
ret = show_tags(w, stream->metadata, in_program ? SECTION_ID_PROGRAM_STREAM_TAGS : SECTION_ID_STREAM_TAGS);
if (stream->nb_side_data) {
print_pkt_side_data(w, stream->side_data, stream->nb_side_data,
print_pkt_side_data(w, stream->codecpar, stream->side_data, stream->nb_side_data,
SECTION_ID_STREAM_SIDE_DATA_LIST,
SECTION_ID_STREAM_SIDE_DATA);
}
@ -2612,7 +2869,7 @@ static int show_format(WriterContext *w, InputFile *ifile)
int ret = 0;
writer_print_section_header(w, SECTION_ID_FORMAT);
print_str_validate("filename", fmt_ctx->filename);
print_str_validate("filename", fmt_ctx->url);
print_int("nb_streams", fmt_ctx->nb_streams);
print_int("nb_programs", fmt_ctx->nb_programs);
print_str("format_name", fmt_ctx->iformat->name);
@ -2626,7 +2883,7 @@ static int show_format(WriterContext *w, InputFile *ifile)
else print_str_opt("size", "N/A");
if (fmt_ctx->bit_rate > 0) print_val ("bit_rate", fmt_ctx->bit_rate, unit_bit_per_second_str);
else print_str_opt("bit_rate", "N/A");
print_int("probe_score", av_format_get_probe_score(fmt_ctx));
print_int("probe_score", fmt_ctx->probe_score);
if (do_show_format_tags)
ret = show_tags(w, fmt_ctx->metadata, SECTION_ID_FORMAT_TAGS);
@ -2651,12 +2908,17 @@ static void show_error(WriterContext *w, int err)
static int open_input_file(InputFile *ifile, const char *filename)
{
int err, i, orig_nb_streams;
int err, i;
AVFormatContext *fmt_ctx = NULL;
AVDictionaryEntry *t;
AVDictionary **opts;
int scan_all_pmts_set = 0;
fmt_ctx = avformat_alloc_context();
if (!fmt_ctx) {
print_error(filename, AVERROR(ENOMEM));
exit_program(1);
}
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
scan_all_pmts_set = 1;
@ -2674,9 +2936,9 @@ static int open_input_file(InputFile *ifile, const char *filename)
return AVERROR_OPTION_NOT_FOUND;
}
/* fill the streams in the format context */
opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
orig_nb_streams = fmt_ctx->nb_streams;
if (find_stream_info) {
AVDictionary **opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
int orig_nb_streams = fmt_ctx->nb_streams;
err = avformat_find_stream_info(fmt_ctx, opts);
@ -2688,6 +2950,7 @@ static int open_input_file(InputFile *ifile, const char *filename)
print_error(filename, err);
return err;
}
}
av_dump_format(fmt_ctx, 0, filename, 0);
@ -2705,35 +2968,45 @@ static int open_input_file(InputFile *ifile, const char *filename)
ist->st = stream;
if (stream->codec->codec_id == AV_CODEC_ID_PROBE) {
if (stream->codecpar->codec_id == AV_CODEC_ID_PROBE) {
av_log(NULL, AV_LOG_WARNING,
"Failed to probe codec for input stream %d\n",
stream->index);
continue;
}
codec = avcodec_find_decoder(stream->codec->codec_id);
codec = avcodec_find_decoder(stream->codecpar->codec_id);
if (!codec) {
av_log(NULL, AV_LOG_WARNING,
"Unsupported codec with id %d for input stream %d\n",
stream->codec->codec_id, stream->index);
stream->codecpar->codec_id, stream->index);
continue;
}
{
AVDictionary *opts = filter_codec_opts(codec_opts, stream->codec->codec_id,
AVDictionary *opts = filter_codec_opts(codec_opts, stream->codecpar->codec_id,
fmt_ctx, stream, codec);
ist->dec_ctx = avcodec_alloc_context3(codec);
if (!ist->dec_ctx)
exit(1);
//TODO:copy context
err = avcodec_copy_context(ist->dec_ctx, stream->codec);
err = avcodec_parameters_to_context(ist->dec_ctx, stream->codecpar);
if (err < 0)
exit(1);
av_codec_set_pkt_timebase(ist->dec_ctx, stream->time_base);
if (do_show_log) {
// For loging it is needed to disable at least frame threads as otherwise
// the log information would need to be reordered and matches up to contexts and frames
// That is in fact possible but not trivial
av_dict_set(&codec_opts, "threads", "1", 0);
}
ist->dec_ctx->pkt_timebase = stream->time_base;
ist->dec_ctx->framerate = stream->avg_frame_rate;
#if FF_API_LAVF_AVCTX
ist->dec_ctx->coded_width = stream->codec->coded_width;
ist->dec_ctx->coded_height = stream->codec->coded_height;
#endif
if (avcodec_open2(ist->dec_ctx, codec, &opts) < 0) {
av_log(NULL, AV_LOG_WARNING, "Could not open codec for input stream %d\n",
@ -2759,7 +3032,7 @@ static void close_input_file(InputFile *ifile)
/* close decoder for each stream */
for (i = 0; i < ifile->nb_streams; i++)
if (ifile->streams[i].st->codec->codec_id != AV_CODEC_ID_NONE)
if (ifile->streams[i].st->codecpar->codec_id != AV_CODEC_ID_NONE)
avcodec_free_context(&ifile->streams[i].dec_ctx);
av_freep(&ifile->streams);
@ -2800,6 +3073,8 @@ static int probe_file(WriterContext *wctx, const char *filename)
} else {
selected_streams[i] = 1;
}
if (!selected_streams[i])
ifile.fmt_ctx->streams[i]->discard = AVDISCARD_ALL;
}
if (do_read_frames || do_read_packets) {
@ -2849,7 +3124,7 @@ end:
static void show_usage(void)
{
av_log(NULL, AV_LOG_INFO, "Simple multimedia streams analyzer\n");
av_log(NULL, AV_LOG_INFO, "usage: %s [OPTIONS] [INPUT_FILE]\n", ffprobe_program_name);
av_log(NULL, AV_LOG_INFO, "usage: %s [OPTIONS] [INPUT_FILE]\n", program_name);
av_log(NULL, AV_LOG_INFO, "\n");
}
@ -2930,7 +3205,9 @@ static void ffprobe_show_pixel_formats(WriterContext *w)
PRINT_PIX_FMT_FLAG(HWACCEL, "hwaccel");
PRINT_PIX_FMT_FLAG(PLANAR, "planar");
PRINT_PIX_FMT_FLAG(RGB, "rgb");
#if FF_API_PSEUDOPAL
PRINT_PIX_FMT_FLAG(PSEUDOPAL, "pseudopal");
#endif
PRINT_PIX_FMT_FLAG(ALPHA, "alpha");
writer_print_section_footer(w);
}
@ -3086,6 +3363,7 @@ void show_ffprobe_help_default(const char *opt, const char *arg)
printf("\n");
show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
}
/**
@ -3158,6 +3436,7 @@ static int parse_read_interval(const char *interval_spec,
}
interval->end = lli;
} else {
interval->duration_frames = 0;
ret = av_parse_time(&us, p, 1);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Invalid interval end/duration specification '%s'\n", p);
@ -3291,7 +3570,7 @@ DEFINE_OPT_SHOW_SECTION(streams, STREAMS)
DEFINE_OPT_SHOW_SECTION(programs, PROGRAMS)
static const OptionDef real_options[] = {
#include "cmdutils_common_opts.h"
CMDUTILS_COMMON_OPTIONS
{ "f", HAS_ARG, {.func_arg = opt_format}, "force format", "format" },
{ "unit", OPT_BOOL, {&show_value_unit}, "show unit of the displayed values" },
{ "prefix", OPT_BOOL, {&use_value_prefix}, "use SI prefixes for the displayed values" },
@ -3315,6 +3594,9 @@ static const OptionDef real_options[] = {
"show a particular entry from the format/container info", "entry" },
{ "show_entries", HAS_ARG, {.func_arg = opt_show_entries},
"show a set of specified entries", "entry_list" },
#if HAVE_THREADS
{ "show_log", OPT_INT|HAS_ARG, {(void*)&do_show_log}, "show log" },
#endif
{ "show_packets", 0, {(void*)&opt_show_packets}, "show packets info" },
{ "show_programs", 0, {(void*)&opt_show_programs}, "show programs info" },
{ "show_streams", 0, {(void*)&opt_show_streams}, "show streams info" },
@ -3331,6 +3613,8 @@ static const OptionDef real_options[] = {
{ "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" },
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" },
{ "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"},
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
"read and decode the streams to fill missing information with heuristics" },
{ NULL, },
};
@ -3365,12 +3649,19 @@ char* ffprobe_run(int argc, char **argv)
}
memset(print_buffer, '\0', (size_t) buffer_size);
init_dynload();
#if HAVE_THREADS
ret = pthread_mutex_init(&log_mutex, NULL);
if (ret != 0) {
goto end;
}
#endif
av_log_set_flags(AV_LOG_SKIP_REPEATED);
register_exit(ffprobe_cleanup);
options = real_options;
parse_loglevel(argc, argv, options);
av_register_all();
avformat_network_init();
init_opts();
#if CONFIG_AVDEVICE
@ -3380,6 +3671,9 @@ char* ffprobe_run(int argc, char **argv)
show_banner(argc, argv, options);
parse_options(NULL, argc, argv, options, opt_input_file);
if (do_show_log)
av_log_set_callback(log_callback);
/* mark things to show, based on -show_entries */
SET_DO_SHOW(CHAPTERS, chapters);
SET_DO_SHOW(ERROR, error);
@ -3470,7 +3764,7 @@ char* ffprobe_run(int argc, char **argv)
(!do_show_program_version && !do_show_library_versions && !do_show_pixel_formats))) {
show_usage();
av_log(NULL, AV_LOG_ERROR, "You have to specify one input file.\n");
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", ffprobe_program_name);
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
ret = AVERROR(EINVAL);
} else if (input_filename) {
ret = probe_file(wctx, input_filename);

Loading…
Cancel
Save