|
|
@ -23,7 +23,6 @@ |
|
|
|
* multimedia converter based on the FFmpeg libraries |
|
|
|
* multimedia converter based on the FFmpeg libraries |
|
|
|
*/ |
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
|
|
#include "config.h" |
|
|
|
|
|
|
|
#include <ctype.h> |
|
|
|
#include <ctype.h> |
|
|
|
#include <string.h> |
|
|
|
#include <string.h> |
|
|
|
#include <math.h> |
|
|
|
#include <math.h> |
|
|
@ -40,8 +39,13 @@ |
|
|
|
#include <unistd.h> |
|
|
|
#include <unistd.h> |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef __cplusplus |
|
|
|
|
|
|
|
extern "C" { |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
#include "libavformat/avformat.h" |
|
|
|
#include "libavformat/avformat.h" |
|
|
|
#include "libswresample/swresample.h" |
|
|
|
#include "libswresample/swresample.h" |
|
|
|
|
|
|
|
#include "libavutil/avassert.h" |
|
|
|
#include "libavutil/opt.h" |
|
|
|
#include "libavutil/opt.h" |
|
|
|
#include "libavutil/channel_layout.h" |
|
|
|
#include "libavutil/channel_layout.h" |
|
|
|
#include "libavutil/parseutils.h" |
|
|
|
#include "libavutil/parseutils.h" |
|
|
@ -69,6 +73,14 @@ |
|
|
|
# include "libavfilter/buffersrc.h" |
|
|
|
# include "libavfilter/buffersrc.h" |
|
|
|
# include "libavfilter/buffersink.h" |
|
|
|
# include "libavfilter/buffersink.h" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "config.h" |
|
|
|
|
|
|
|
#include "ffmpeg.h" |
|
|
|
|
|
|
|
#include "cmdutils.h" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef __cplusplus |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
#if HAVE_SYS_RESOURCE_H |
|
|
|
#if HAVE_SYS_RESOURCE_H |
|
|
|
#include <sys/time.h> |
|
|
|
#include <sys/time.h> |
|
|
|
#include <sys/types.h> |
|
|
|
#include <sys/types.h> |
|
|
@ -99,11 +111,6 @@ |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
#include <time.h> |
|
|
|
#include <time.h> |
|
|
|
|
|
|
|
|
|
|
|
#include "ffmpeg.h" |
|
|
|
|
|
|
|
#include "cmdutils.h" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "libavutil/avassert.h" |
|
|
|
|
|
|
|
#include <setjmp.h> |
|
|
|
#include <setjmp.h> |
|
|
|
|
|
|
|
|
|
|
|
jmp_buf jump_buf; |
|
|
|
jmp_buf jump_buf; |
|
|
@ -243,7 +250,7 @@ static void sub2video_push_ref(InputStream *ist, int64_t pts) |
|
|
|
void sub2video_update(InputStream *ist, AVSubtitle *sub) |
|
|
|
void sub2video_update(InputStream *ist, AVSubtitle *sub) |
|
|
|
{ |
|
|
|
{ |
|
|
|
AVFrame *frame = ist->sub2video.frame; |
|
|
|
AVFrame *frame = ist->sub2video.frame; |
|
|
|
int8_t *dst; |
|
|
|
uint8_t *dst; |
|
|
|
int dst_linesize; |
|
|
|
int dst_linesize; |
|
|
|
int num_rects, i; |
|
|
|
int num_rects, i; |
|
|
|
int64_t pts, end_pts; |
|
|
|
int64_t pts, end_pts; |
|
|
@ -696,7 +703,9 @@ static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, |
|
|
|
int i; |
|
|
|
int i; |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
|
|
for (i = 0; i < nb_output_streams; i++) { |
|
|
|
OutputStream *ost2 = output_streams[i]; |
|
|
|
OutputStream *ost2 = output_streams[i]; |
|
|
|
ost2->finished |= ost == ost2 ? this_stream : others; |
|
|
|
// ost2->finished |= ost == ost2 ? this_stream : others;
|
|
|
|
|
|
|
|
OSTFinished cur = ost == ost2 ? this_stream : others; |
|
|
|
|
|
|
|
ost2->finished = (OSTFinished) (ost2->finished | cur); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -830,7 +839,7 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u |
|
|
|
if (ret < 0) { |
|
|
|
if (ret < 0) { |
|
|
|
print_error("av_interleaved_write_frame()", ret); |
|
|
|
print_error("av_interleaved_write_frame()", ret); |
|
|
|
main_return_code = 1; |
|
|
|
main_return_code = 1; |
|
|
|
close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED); |
|
|
|
close_all_output_streams(ost, (OSTFinished) (MUXER_FINISHED | ENCODER_FINISHED), ENCODER_FINISHED); |
|
|
|
} |
|
|
|
} |
|
|
|
av_packet_unref(pkt); |
|
|
|
av_packet_unref(pkt); |
|
|
|
} |
|
|
|
} |
|
|
@ -839,7 +848,8 @@ static void close_output_stream(OutputStream *ost) |
|
|
|
{ |
|
|
|
{ |
|
|
|
OutputFile *of = output_files[ost->file_index]; |
|
|
|
OutputFile *of = output_files[ost->file_index]; |
|
|
|
|
|
|
|
|
|
|
|
ost->finished |= ENCODER_FINISHED; |
|
|
|
// ost->finished |= ENCODER_FINISHED;
|
|
|
|
|
|
|
|
ost->finished = (OSTFinished) (ost->finished | ENCODER_FINISHED); |
|
|
|
if (of->shortest) { |
|
|
|
if (of->shortest) { |
|
|
|
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q); |
|
|
|
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q); |
|
|
|
of->recording_time = FFMIN(of->recording_time, end); |
|
|
|
of->recording_time = FFMIN(of->recording_time, end); |
|
|
@ -1001,7 +1011,7 @@ static void do_subtitle_out(OutputFile *of, |
|
|
|
enc = ost->enc_ctx; |
|
|
|
enc = ost->enc_ctx; |
|
|
|
|
|
|
|
|
|
|
|
if (!subtitle_out) { |
|
|
|
if (!subtitle_out) { |
|
|
|
subtitle_out = av_malloc(subtitle_out_max_size); |
|
|
|
subtitle_out = static_cast<uint8_t *>(av_malloc(subtitle_out_max_size)); |
|
|
|
if (!subtitle_out) { |
|
|
|
if (!subtitle_out) { |
|
|
|
av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n"); |
|
|
|
av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n"); |
|
|
|
exit_program(1); |
|
|
|
exit_program(1); |
|
|
@ -1243,7 +1253,7 @@ static void do_video_out(OutputFile *of, |
|
|
|
mux_par->field_order = AV_FIELD_PROGRESSIVE; |
|
|
|
mux_par->field_order = AV_FIELD_PROGRESSIVE; |
|
|
|
|
|
|
|
|
|
|
|
in_picture->quality = enc->global_quality; |
|
|
|
in_picture->quality = enc->global_quality; |
|
|
|
in_picture->pict_type = 0; |
|
|
|
in_picture->pict_type = AV_PICTURE_TYPE_NONE;//0;
|
|
|
|
|
|
|
|
|
|
|
|
if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE && |
|
|
|
if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE && |
|
|
|
in_picture->pts != AV_NOPTS_VALUE) |
|
|
|
in_picture->pts != AV_NOPTS_VALUE) |
|
|
@ -1409,7 +1419,8 @@ static void do_video_stats(OutputStream *ost, int frame_size) |
|
|
|
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0; |
|
|
|
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0; |
|
|
|
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", |
|
|
|
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", |
|
|
|
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate); |
|
|
|
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate); |
|
|
|
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type)); |
|
|
|
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char( |
|
|
|
|
|
|
|
static_cast<AVPictureType>(ost->pict_type))); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -1420,11 +1431,11 @@ static void finish_output_stream(OutputStream *ost) |
|
|
|
OutputFile *of = output_files[ost->file_index]; |
|
|
|
OutputFile *of = output_files[ost->file_index]; |
|
|
|
int i; |
|
|
|
int i; |
|
|
|
|
|
|
|
|
|
|
|
ost->finished = ENCODER_FINISHED | MUXER_FINISHED; |
|
|
|
ost->finished = (OSTFinished) (ENCODER_FINISHED | MUXER_FINISHED); |
|
|
|
|
|
|
|
|
|
|
|
if (of->shortest) { |
|
|
|
if (of->shortest) { |
|
|
|
for (i = 0; i < of->ctx->nb_streams; i++) |
|
|
|
for (i = 0; i < of->ctx->nb_streams; i++) |
|
|
|
output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED; |
|
|
|
output_streams[of->ost_index + i]->finished = (OSTFinished) (ENCODER_FINISHED | MUXER_FINISHED); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -1839,7 +1850,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti |
|
|
|
if (progress_avio) { |
|
|
|
if (progress_avio) { |
|
|
|
av_bprintf(&buf_script, "progress=%s\n", |
|
|
|
av_bprintf(&buf_script, "progress=%s\n", |
|
|
|
is_last_report ? "end" : "continue"); |
|
|
|
is_last_report ? "end" : "continue"); |
|
|
|
avio_write(progress_avio, buf_script.str, |
|
|
|
avio_write(progress_avio, reinterpret_cast<const unsigned char *>(buf_script.str), |
|
|
|
FFMIN(buf_script.len, buf_script.size - 1)); |
|
|
|
FFMIN(buf_script.len, buf_script.size - 1)); |
|
|
|
avio_flush(progress_avio); |
|
|
|
avio_flush(progress_avio); |
|
|
|
av_bprint_finalize(&buf_script, NULL); |
|
|
|
av_bprint_finalize(&buf_script, NULL); |
|
|
@ -2392,10 +2403,10 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ |
|
|
|
// The old code used to set dts on the drain packet, which does not work
|
|
|
|
// The old code used to set dts on the drain packet, which does not work
|
|
|
|
// with the new API anymore.
|
|
|
|
// with the new API anymore.
|
|
|
|
if (eof) { |
|
|
|
if (eof) { |
|
|
|
void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0])); |
|
|
|
void *new007 = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0])); |
|
|
|
if (!new) |
|
|
|
if (!new007) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
ist->dts_buffer = new; |
|
|
|
ist->dts_buffer = static_cast<int64_t *>(new007); |
|
|
|
ist->dts_buffer[ist->nb_dts_buffer++] = dts; |
|
|
|
ist->dts_buffer[ist->nb_dts_buffer++] = dts; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -2450,7 +2461,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ |
|
|
|
if (err < 0) |
|
|
|
if (err < 0) |
|
|
|
goto fail; |
|
|
|
goto fail; |
|
|
|
} |
|
|
|
} |
|
|
|
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format; |
|
|
|
ist->hwaccel_retrieved_pix_fmt = static_cast<AVPixelFormat>(decoded_frame->format); |
|
|
|
|
|
|
|
|
|
|
|
best_effort_timestamp= decoded_frame->best_effort_timestamp; |
|
|
|
best_effort_timestamp= decoded_frame->best_effort_timestamp; |
|
|
|
*duration_pts = decoded_frame->pkt_duration; |
|
|
|
*duration_pts = decoded_frame->pkt_duration; |
|
|
@ -2577,7 +2588,7 @@ static int send_filter_eof(InputStream *ist) |
|
|
|
int i, ret; |
|
|
|
int i, ret; |
|
|
|
/* TODO keep pts also in stream time base to avoid converting back */ |
|
|
|
/* TODO keep pts also in stream time base to avoid converting back */ |
|
|
|
int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base, |
|
|
|
int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base, |
|
|
|
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); |
|
|
|
(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); |
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ist->nb_filters; i++) { |
|
|
|
for (i = 0; i < ist->nb_filters; i++) { |
|
|
|
ret = ifilter_send_eof(ist->filters[i], pts); |
|
|
|
ret = ifilter_send_eof(ist->filters[i], pts); |
|
|
@ -2783,7 +2794,7 @@ static void print_sdp(void) |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
avc = av_malloc_array(nb_output_files, sizeof(*avc)); |
|
|
|
avc = static_cast<AVFormatContext **>(av_malloc_array(nb_output_files, sizeof(*avc))); |
|
|
|
if (!avc) |
|
|
|
if (!avc) |
|
|
|
exit_program(1); |
|
|
|
exit_program(1); |
|
|
|
for (i = 0, j = 0; i < nb_output_files; i++) { |
|
|
|
for (i = 0, j = 0; i < nb_output_files; i++) { |
|
|
@ -2817,7 +2828,7 @@ fail: |
|
|
|
|
|
|
|
|
|
|
|
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts) |
|
|
|
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts) |
|
|
|
{ |
|
|
|
{ |
|
|
|
InputStream *ist = s->opaque; |
|
|
|
InputStream *ist = static_cast<InputStream *>(s->opaque); |
|
|
|
const enum AVPixelFormat *p; |
|
|
|
const enum AVPixelFormat *p; |
|
|
|
int ret; |
|
|
|
int ret; |
|
|
|
|
|
|
|
|
|
|
@ -2903,7 +2914,7 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat |
|
|
|
|
|
|
|
|
|
|
|
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags) |
|
|
|
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags) |
|
|
|
{ |
|
|
|
{ |
|
|
|
InputStream *ist = s->opaque; |
|
|
|
InputStream *ist = static_cast<InputStream *>(s->opaque); |
|
|
|
|
|
|
|
|
|
|
|
if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt) |
|
|
|
if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt) |
|
|
|
return ist->hwaccel_get_buffer(s, frame, flags); |
|
|
|
return ist->hwaccel_get_buffer(s, frame, flags); |
|
|
@ -3117,7 +3128,7 @@ static int init_output_stream_streamcopy(OutputStream *ost) |
|
|
|
ost->frame_rate = ist->framerate; |
|
|
|
ost->frame_rate = ist->framerate; |
|
|
|
ost->st->avg_frame_rate = ost->frame_rate; |
|
|
|
ost->st->avg_frame_rate = ost->frame_rate; |
|
|
|
|
|
|
|
|
|
|
|
ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb); |
|
|
|
ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, (AVTimebaseSource) copy_tb); |
|
|
|
if (ret < 0) |
|
|
|
if (ret < 0) |
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
|
|
|
|
|
|
|
@ -3213,16 +3224,16 @@ static void set_encoder_id(OutputFile *of, OutputStream *ost) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2; |
|
|
|
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2; |
|
|
|
encoder_string = av_mallocz(encoder_string_len); |
|
|
|
encoder_string = static_cast<uint8_t *>(av_mallocz(encoder_string_len)); |
|
|
|
if (!encoder_string) |
|
|
|
if (!encoder_string) |
|
|
|
exit_program(1); |
|
|
|
exit_program(1); |
|
|
|
|
|
|
|
|
|
|
|
if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT)) |
|
|
|
if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT)) |
|
|
|
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len); |
|
|
|
av_strlcpy(reinterpret_cast<char *>(encoder_string), LIBAVCODEC_IDENT " ", encoder_string_len); |
|
|
|
else |
|
|
|
else |
|
|
|
av_strlcpy(encoder_string, "Lavc ", encoder_string_len); |
|
|
|
av_strlcpy(reinterpret_cast<char *>(encoder_string), "Lavc ", encoder_string_len); |
|
|
|
av_strlcat(encoder_string, ost->enc->name, encoder_string_len); |
|
|
|
av_strlcat(reinterpret_cast<char *>(encoder_string), ost->enc->name, encoder_string_len); |
|
|
|
av_dict_set(&ost->st->metadata, "encoder", encoder_string, |
|
|
|
av_dict_set(&ost->st->metadata, "encoder", reinterpret_cast<const char *>(encoder_string), |
|
|
|
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE); |
|
|
|
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -3237,7 +3248,7 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost, |
|
|
|
if (*p == ',') |
|
|
|
if (*p == ',') |
|
|
|
n++; |
|
|
|
n++; |
|
|
|
size = n; |
|
|
|
size = n; |
|
|
|
pts = av_malloc_array(size, sizeof(*pts)); |
|
|
|
pts = static_cast<int64_t *>(av_malloc_array(size, sizeof(*pts))); |
|
|
|
if (!pts) { |
|
|
|
if (!pts) { |
|
|
|
av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n"); |
|
|
|
av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n"); |
|
|
|
exit_program(1); |
|
|
|
exit_program(1); |
|
|
@ -3256,8 +3267,8 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost, |
|
|
|
int j; |
|
|
|
int j; |
|
|
|
|
|
|
|
|
|
|
|
if (avf->nb_chapters > INT_MAX - size || |
|
|
|
if (avf->nb_chapters > INT_MAX - size || |
|
|
|
!(pts = av_realloc_f(pts, size += avf->nb_chapters - 1, |
|
|
|
!(pts = static_cast<int64_t *>(av_realloc_f(pts, size += avf->nb_chapters - 1, |
|
|
|
sizeof(*pts)))) { |
|
|
|
sizeof(*pts))))) { |
|
|
|
av_log(NULL, AV_LOG_FATAL, |
|
|
|
av_log(NULL, AV_LOG_FATAL, |
|
|
|
"Could not allocate forced key frames array.\n"); |
|
|
|
"Could not allocate forced key frames array.\n"); |
|
|
|
exit_program(1); |
|
|
|
exit_program(1); |
|
|
@ -3376,7 +3387,8 @@ static int init_output_stream_encode(OutputStream *ost) |
|
|
|
|
|
|
|
|
|
|
|
switch (enc_ctx->codec_type) { |
|
|
|
switch (enc_ctx->codec_type) { |
|
|
|
case AVMEDIA_TYPE_AUDIO: |
|
|
|
case AVMEDIA_TYPE_AUDIO: |
|
|
|
enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter); |
|
|
|
enc_ctx->sample_fmt = static_cast<AVSampleFormat>(av_buffersink_get_format( |
|
|
|
|
|
|
|
ost->filter->filter)); |
|
|
|
if (dec_ctx) |
|
|
|
if (dec_ctx) |
|
|
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, |
|
|
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, |
|
|
|
av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3); |
|
|
|
av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3); |
|
|
@ -3409,7 +3421,8 @@ static int init_output_stream_encode(OutputStream *ost) |
|
|
|
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) : |
|
|
|
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) : |
|
|
|
av_buffersink_get_sample_aspect_ratio(ost->filter->filter); |
|
|
|
av_buffersink_get_sample_aspect_ratio(ost->filter->filter); |
|
|
|
|
|
|
|
|
|
|
|
enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter); |
|
|
|
enc_ctx->pix_fmt = static_cast<AVPixelFormat>(av_buffersink_get_format( |
|
|
|
|
|
|
|
ost->filter->filter)); |
|
|
|
if (dec_ctx) |
|
|
|
if (dec_ctx) |
|
|
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, |
|
|
|
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample, |
|
|
|
av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); |
|
|
|
av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); |
|
|
@ -3488,7 +3501,8 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) |
|
|
|
dec = ist->dec_ctx; |
|
|
|
dec = ist->dec_ctx; |
|
|
|
if (dec && dec->subtitle_header) { |
|
|
|
if (dec && dec->subtitle_header) { |
|
|
|
/* ASS code assumes this buffer is null terminated so add extra byte. */ |
|
|
|
/* ASS code assumes this buffer is null terminated so add extra byte. */ |
|
|
|
ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1); |
|
|
|
ost->enc_ctx->subtitle_header = static_cast<uint8_t *>(av_mallocz( |
|
|
|
|
|
|
|
dec->subtitle_header_size + 1)); |
|
|
|
if (!ost->enc_ctx->subtitle_header) |
|
|
|
if (!ost->enc_ctx->subtitle_header) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); |
|
|
|
memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); |
|
|
@ -3597,7 +3611,7 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
return AVERROR(ENOMEM); |
|
|
|
memcpy(dst, sd->data, sd->size); |
|
|
|
memcpy(dst, sd->data, sd->size); |
|
|
|
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) |
|
|
|
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) |
|
|
|
av_display_rotation_set((uint32_t *)dst, 0); |
|
|
|
av_display_rotation_set((int32_t *)dst, 0); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -3637,13 +3651,13 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) |
|
|
|
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" }, |
|
|
|
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" }, |
|
|
|
{ NULL }, |
|
|
|
{ NULL }, |
|
|
|
}; |
|
|
|
}; |
|
|
|
static const AVClass class = { |
|
|
|
static const AVClass clazz = { |
|
|
|
.class_name = "", |
|
|
|
.class_name = "", |
|
|
|
.item_name = av_default_item_name, |
|
|
|
.item_name = av_default_item_name, |
|
|
|
.option = opts, |
|
|
|
.option = opts, |
|
|
|
.version = LIBAVUTIL_VERSION_INT, |
|
|
|
.version = LIBAVUTIL_VERSION_INT, |
|
|
|
}; |
|
|
|
}; |
|
|
|
const AVClass *pclass = &class; |
|
|
|
const AVClass *pclass = &clazz; |
|
|
|
|
|
|
|
|
|
|
|
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition); |
|
|
|
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition); |
|
|
|
if (ret < 0) |
|
|
|
if (ret < 0) |
|
|
@ -3745,7 +3759,7 @@ static int transcode_init(void) |
|
|
|
discard = AVDISCARD_DEFAULT; |
|
|
|
discard = AVDISCARD_DEFAULT; |
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
p->discard = discard; |
|
|
|
p->discard = static_cast<AVDiscard>(discard); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -4043,7 +4057,7 @@ static int check_keyboard_interaction(int64_t cur_time) |
|
|
|
#if HAVE_THREADS |
|
|
|
#if HAVE_THREADS |
|
|
|
static void *input_thread(void *arg) |
|
|
|
static void *input_thread(void *arg) |
|
|
|
{ |
|
|
|
{ |
|
|
|
InputFile *f = arg; |
|
|
|
InputFile *f = static_cast<InputFile *>(arg); |
|
|
|
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0; |
|
|
|
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0; |
|
|
|
int ret = 0; |
|
|
|
int ret = 0; |
|
|
|
|
|
|
|
|
|
|
@ -4379,15 +4393,16 @@ static int process_input(int file_index) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (debug_ts) { |
|
|
|
if (debug_ts) { |
|
|
|
|
|
|
|
AVRational time_base_q = (AVRational){1, AV_TIME_BASE}; |
|
|
|
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s " |
|
|
|
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s " |
|
|
|
"next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n", |
|
|
|
"next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n", |
|
|
|
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), |
|
|
|
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), |
|
|
|
av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q), |
|
|
|
av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, /*&AV_TIME_BASE_Q*/&time_base_q), |
|
|
|
av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q), |
|
|
|
av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, /*&AV_TIME_BASE_Q*/&time_base_q), |
|
|
|
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base), |
|
|
|
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base), |
|
|
|
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base), |
|
|
|
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base), |
|
|
|
av_ts2str(input_files[ist->file_index]->ts_offset), |
|
|
|
av_ts2str(input_files[ist->file_index]->ts_offset), |
|
|
|
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q)); |
|
|
|
av_ts2timestr(input_files[ist->file_index]->ts_offset, /*&AV_TIME_BASE_Q*/&time_base_q)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){ |
|
|
|
if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){ |
|
|
@ -4455,7 +4470,7 @@ static int process_input(int file_index) |
|
|
|
if (pkt.dts != AV_NOPTS_VALUE) |
|
|
|
if (pkt.dts != AV_NOPTS_VALUE) |
|
|
|
pkt.dts *= ist->ts_scale; |
|
|
|
pkt.dts *= ist->ts_scale; |
|
|
|
|
|
|
|
|
|
|
|
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); |
|
|
|
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
|
|
|
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || |
|
|
|
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || |
|
|
|
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && |
|
|
|
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && |
|
|
|
pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts |
|
|
|
pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts |
|
|
@ -4483,7 +4498,7 @@ static int process_input(int file_index) |
|
|
|
if (pkt.dts != AV_NOPTS_VALUE) |
|
|
|
if (pkt.dts != AV_NOPTS_VALUE) |
|
|
|
pkt.dts += duration; |
|
|
|
pkt.dts += duration; |
|
|
|
|
|
|
|
|
|
|
|
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); |
|
|
|
pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); |
|
|
|
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || |
|
|
|
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || |
|
|
|
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && |
|
|
|
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) && |
|
|
|
pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && |
|
|
|
pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && |
|
|
@ -4526,12 +4541,13 @@ static int process_input(int file_index) |
|
|
|
ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); |
|
|
|
ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); |
|
|
|
|
|
|
|
|
|
|
|
if (debug_ts) { |
|
|
|
if (debug_ts) { |
|
|
|
|
|
|
|
AVRational time_base_q = (AVRational){1, AV_TIME_BASE}; |
|
|
|
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n", |
|
|
|
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n", |
|
|
|
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), |
|
|
|
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type), |
|
|
|
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base), |
|
|
|
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base), |
|
|
|
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base), |
|
|
|
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base), |
|
|
|
av_ts2str(input_files[ist->file_index]->ts_offset), |
|
|
|
av_ts2str(input_files[ist->file_index]->ts_offset), |
|
|
|
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q)); |
|
|
|
av_ts2timestr(input_files[ist->file_index]->ts_offset, /*&AV_TIME_BASE_Q*/&time_base_q)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
sub2video_heartbeat(ist, pkt.pts); |
|
|
|
sub2video_heartbeat(ist, pkt.pts); |