Feature: use struct to wrap params

pull/221/head
xufuji456 2 years ago
parent 71c785731b
commit 8538307789
  1. 1
      app/src/main/cpp/common_media_jni.cpp
  2. 249
      app/src/main/cpp/ff_audio_resample.cpp
  3. 64
      app/src/main/cpp/ff_audio_resample.h

@ -16,6 +16,5 @@ COMMON_MEDIA_FUNC(int, audioResample, jstring srcFile, jstring dstFile, int samp
delete audioResample; delete audioResample;
env->ReleaseStringUTFChars(dstFile, dst_file); env->ReleaseStringUTFChars(dstFile, dst_file);
env->ReleaseStringUTFChars(srcFile, src_file); env->ReleaseStringUTFChars(srcFile, src_file);
LOGE("AudioResample", "done......");
return ret; return ret;
} }

@ -7,22 +7,28 @@
#define ALOGE(Format, ...) LOGE("audio_resample", Format, ##__VA_ARGS__) #define ALOGE(Format, ...) LOGE("audio_resample", Format, ##__VA_ARGS__)
int FFAudioResample::openInputFile(const char *filename, FFAudioResample::FFAudioResample() {
AVFormatContext **input_format_context, resample = new AudioResample();
AVCodecContext **input_codec_context) { }
FFAudioResample::~FFAudioResample() {
delete resample;
}
int FFAudioResample::openInputFile(const char *filename) {
int ret; int ret;
const AVCodec *input_codec; const AVCodec *input_codec;
AVStream *audio_stream = nullptr; AVStream *audio_stream = nullptr;
if ((ret = avformat_open_input(input_format_context, filename, nullptr,nullptr)) < 0) { if ((ret = avformat_open_input(&resample->inFormatCtx, filename, nullptr,nullptr)) < 0) {
ALOGE("Could not open input file:%s\n", av_err2str(ret)); ALOGE("Could not open input file:%s\n", av_err2str(ret));
return ret; return ret;
} }
avformat_find_stream_info(*input_format_context, nullptr); avformat_find_stream_info(resample->inFormatCtx, nullptr);
for (int i = 0; i < (*input_format_context)->nb_streams; ++i) { for (int i = 0; i < resample->inFormatCtx->nb_streams; ++i) {
if ((*input_format_context)->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { if (resample->inFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
audio_stream = (*input_format_context)->streams[i]; audio_stream = resample->inFormatCtx->streams[i];
} }
} }
if (!(input_codec = avcodec_find_decoder(audio_stream->codecpar->codec_id))) { if (!(input_codec = avcodec_find_decoder(audio_stream->codecpar->codec_id))) {
@ -30,22 +36,18 @@ int FFAudioResample::openInputFile(const char *filename,
return -1; return -1;
} }
*input_codec_context = avcodec_alloc_context3(input_codec); resample->inCodecCtx = avcodec_alloc_context3(input_codec);
avcodec_parameters_to_context(*input_codec_context, audio_stream->codecpar); avcodec_parameters_to_context(resample->inCodecCtx, audio_stream->codecpar);
if ((ret = avcodec_open2(*input_codec_context, input_codec, nullptr)) < 0) { if ((ret = avcodec_open2(resample->inCodecCtx, input_codec, nullptr)) < 0) {
ALOGE("Could not open input codec (error:%s)\n", av_err2str(ret)); ALOGE("Could not open input codec (error:%s)\n", av_err2str(ret));
} }
resample->inFrame = av_frame_alloc();
return 0; return 0;
} }
int FFAudioResample::openOutputFile(const char *filename, int FFAudioResample::openOutputFile(const char *filename, int sample_rate) {
int sample_rate,
AVCodecContext *input_codec_context,
AVFormatContext **output_format_context,
AVCodecContext **output_codec_context) {
AVCodecContext *avctx;
AVIOContext *output_io_context = nullptr; AVIOContext *output_io_context = nullptr;
const AVCodec *output_codec; const AVCodec *output_codec;
int ret; int ret;
@ -55,34 +57,34 @@ int FFAudioResample::openOutputFile(const char *filename,
return ret; return ret;
} }
*output_format_context = avformat_alloc_context(); resample->outFormatCtx = avformat_alloc_context();
(*output_format_context)->pb = output_io_context; resample->outFormatCtx->pb = output_io_context;
(*output_format_context)->url = av_strdup(filename); resample->outFormatCtx->url = av_strdup(filename);
(*output_format_context)->oformat = av_guess_format(nullptr, filename,nullptr); resample->outFormatCtx->oformat = av_guess_format(nullptr, filename,nullptr);
if (!(*output_format_context)->oformat) { if (!(resample->outFormatCtx->oformat)) {
ALOGE("Could not find output file format\n"); ALOGE("Could not find output file format\n");
return -1; return -1;
} }
/* Find the encoder to be used by its name. */ /* Find the encoder to be used by its name. */
if (!(output_codec = avcodec_find_encoder(input_codec_context->codec_id))) { if (!(output_codec = avcodec_find_encoder(resample->inCodecCtx->codec_id))) {
ALOGE( "Could not find encoder=%s\n", input_codec_context->codec->name); ALOGE( "Could not find encoder=%s\n", resample->inCodecCtx->codec->name);
return -1; return -1;
} }
/* Create a new audio stream in the output file container. */ /* Create a new audio stream in the output file container. */
AVStream *stream = avformat_new_stream(*output_format_context, nullptr); AVStream *stream = avformat_new_stream(resample->outFormatCtx, nullptr);
avctx = avcodec_alloc_context3(output_codec); resample->outCodecCtx = avcodec_alloc_context3(output_codec);
/* Set the basic encoder parameters.*/ /* Set the basic encoder parameters.*/
avctx->channels = input_codec_context->channels; resample->outCodecCtx->channels = resample->inCodecCtx->channels;
avctx->channel_layout = av_get_default_channel_layout(input_codec_context->channels); resample->outCodecCtx->channel_layout = av_get_default_channel_layout(resample->inCodecCtx->channels);
avctx->sample_rate = sample_rate; resample->outCodecCtx->sample_rate = sample_rate;
avctx->sample_fmt = output_codec->sample_fmts[0]; resample->outCodecCtx->sample_fmt = output_codec->sample_fmts[0];
/* Allow the use of the experimental AAC encoder. */ /* Allow the use of the experimental AAC encoder. */
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; resample->outCodecCtx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
/* Set the sample rate for the container. */ /* Set the sample rate for the container. */
stream->time_base.den = sample_rate; stream->time_base.den = sample_rate;
@ -90,41 +92,35 @@ int FFAudioResample::openOutputFile(const char *filename,
/* Some container formats (like MP4) require global headers to be present. /* Some container formats (like MP4) require global headers to be present.
* Mark the encoder so that it behaves accordingly. */ * Mark the encoder so that it behaves accordingly. */
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER) if (resample->outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; resample->outCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
/* Open the encoder for the audio stream to use it later. */ /* Open the encoder for the audio stream to use it later. */
if ((ret = avcodec_open2(avctx, output_codec, nullptr)) < 0) { if ((ret = avcodec_open2(resample->outCodecCtx, output_codec, nullptr)) < 0) {
ALOGE("Could not open output codec (error:%s)\n", av_err2str(ret)); ALOGE("Could not open output codec (error:%s)\n", av_err2str(ret));
return ret; return ret;
} }
avcodec_parameters_from_context(stream->codecpar, avctx); avcodec_parameters_from_context(stream->codecpar, resample->outCodecCtx);
*output_codec_context = avctx;
return 0; return 0;
} }
int FFAudioResample::initResample(AVCodecContext *input_codec_context, int FFAudioResample::initResample() {
AVCodecContext *output_codec_context, resample->resampleCtx = swr_alloc_set_opts(nullptr,
SwrContext **resample_context) { av_get_default_channel_layout(resample->outCodecCtx->channels),
*resample_context = swr_alloc_set_opts(nullptr, resample->outCodecCtx->sample_fmt,
av_get_default_channel_layout(output_codec_context->channels), resample->outCodecCtx->sample_rate,
output_codec_context->sample_fmt, av_get_default_channel_layout(resample->inCodecCtx->channels),
output_codec_context->sample_rate, resample->inCodecCtx->sample_fmt,
av_get_default_channel_layout(input_codec_context->channels), resample->inCodecCtx->sample_rate,
input_codec_context->sample_fmt,
input_codec_context->sample_rate,
0, nullptr); 0, nullptr);
return swr_init(*resample_context); return swr_init(resample->resampleCtx);
} }
int FFAudioResample::decodeAudioFrame(AVFrame *frame, int FFAudioResample::decodeAudioFrame(AVFrame *frame, int *data_present, int *finished) {
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
int *data_present, int *finished) {
int ret; int ret;
if ((ret = av_read_frame(input_format_context, &input_packet)) < 0) { if ((ret = av_read_frame(resample->inFormatCtx, &resample->inPacket)) < 0) {
if (ret == AVERROR_EOF) if (ret == AVERROR_EOF)
*finished = 1; *finished = 1;
else { else {
@ -132,18 +128,19 @@ int FFAudioResample::decodeAudioFrame(AVFrame *frame,
return ret; return ret;
} }
} }
if (input_format_context->streams[input_packet.stream_index]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) { if (resample->inFormatCtx->streams[resample->inPacket.stream_index]->codecpar->codec_type
!= AVMEDIA_TYPE_AUDIO) {
ret = 0; ret = 0;
ALOGE("isn't audio packet, skip it..."); ALOGE("isn't audio packet, skip it...");
goto cleanup; goto cleanup;
} }
/* Send the audio frame stored in the temporary packet to the decoder.*/ /* Send the audio frame stored in the temporary packet to the decoder.*/
if ((ret = avcodec_send_packet(input_codec_context, &input_packet)) < 0) { if ((ret = avcodec_send_packet(resample->inCodecCtx, &resample->inPacket)) < 0) {
ALOGE("Could not send packet for decoding (error:%s)\n", av_err2str(ret)); ALOGE("Could not send packet for decoding (error:%s)\n", av_err2str(ret));
return ret; return ret;
} }
/* Receive one frame from the decoder. */ /* Receive one frame from the decoder. */
ret = avcodec_receive_frame(input_codec_context, frame); ret = avcodec_receive_frame(resample->inCodecCtx, frame);
if (ret == AVERROR(EAGAIN)) { if (ret == AVERROR(EAGAIN)) {
ret = 0; ret = 0;
goto cleanup; goto cleanup;
@ -160,23 +157,22 @@ int FFAudioResample::decodeAudioFrame(AVFrame *frame,
} }
cleanup: cleanup:
av_packet_unref(&input_packet); av_packet_unref(&resample->inPacket);
return ret; return ret;
} }
int FFAudioResample::initConvertedSamples(uint8_t ***converted_input_samples, int FFAudioResample::initConvertedSamples(uint8_t ***converted_input_samples, int frame_size) {
AVCodecContext *output_codec_context, int frame_size) {
int ret; int ret;
if (!(*converted_input_samples = (uint8_t **) calloc(output_codec_context->channels, if (!(*converted_input_samples = (uint8_t **) calloc(resample->outCodecCtx->channels,
sizeof(**converted_input_samples)))) { sizeof(**converted_input_samples)))) {
ALOGE("Could not allocate converted input sample pointers\n"); ALOGE("Could not allocate converted input sample pointers\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
if ((ret = av_samples_alloc(*converted_input_samples, nullptr, if ((ret = av_samples_alloc(*converted_input_samples, nullptr,
output_codec_context->channels, resample->outCodecCtx->channels,
frame_size, frame_size,
output_codec_context->sample_fmt, 0)) < 0) { resample->outCodecCtx->sample_fmt, 0)) < 0) {
ALOGE("Could not allocate converted input samples (error:%s)\n", av_err2str(ret)); ALOGE("Could not allocate converted input samples (error:%s)\n", av_err2str(ret));
av_freep(&(*converted_input_samples)[0]); av_freep(&(*converted_input_samples)[0]);
free(*converted_input_samples); free(*converted_input_samples);
@ -190,19 +186,13 @@ int FFAudioResample::initConvertedSamples(uint8_t ***converted_input_samples,
* it in the FIFO buffer. * it in the FIFO buffer.
* *
*/ */
int FFAudioResample::decodeAndConvert(AVAudioFifo *fifo, int FFAudioResample::decodeAndConvert(int *finished) {
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
SwrContext *resample_context,
int *finished) {
uint8_t **converted_dst_samples = nullptr; uint8_t **converted_dst_samples = nullptr;
int data_present = 0; int data_present = 0;
int ret = AVERROR_EXIT; int ret = AVERROR_EXIT;
/* Decode one frame worth of audio samples. */ /* Decode one frame worth of audio samples. */
if (decodeAudioFrame(input_frame, input_format_context, if (decodeAudioFrame(resample->inFrame, &data_present, finished))
input_codec_context, &data_present, finished))
goto cleanup; goto cleanup;
if (*finished) { if (*finished) {
ret = 0; ret = 0;
@ -210,20 +200,20 @@ int FFAudioResample::decodeAndConvert(AVAudioFifo *fifo,
} }
/* If there is decoded data, convert and store it. */ /* If there is decoded data, convert and store it. */
if (data_present) { if (data_present) {
int dst_nb_samples = (int) av_rescale_rnd(input_frame->nb_samples, output_codec_context->sample_rate, int dst_nb_samples = (int) av_rescale_rnd(resample->inFrame->nb_samples, resample->outCodecCtx->sample_rate,
input_codec_context->sample_rate, AV_ROUND_UP); resample->inCodecCtx->sample_rate, AV_ROUND_UP);
if (initConvertedSamples(&converted_dst_samples, output_codec_context, dst_nb_samples)) if (initConvertedSamples(&converted_dst_samples, dst_nb_samples))
goto cleanup; goto cleanup;
ret = swr_convert(resample_context, converted_dst_samples, dst_nb_samples, ret = swr_convert(resample->resampleCtx, converted_dst_samples, dst_nb_samples,
(const uint8_t**)input_frame->extended_data, input_frame->nb_samples); (const uint8_t**)resample->inFrame->extended_data, resample->inFrame->nb_samples);
if (ret < 0) { if (ret < 0) {
ALOGE("Could not convert input samples (error:%s)\n", av_err2str(ret)); ALOGE("Could not convert input samples (error:%s)\n", av_err2str(ret));
goto cleanup; goto cleanup;
} }
if (av_audio_fifo_write(fifo, (void **)converted_dst_samples, dst_nb_samples) < dst_nb_samples) if (av_audio_fifo_write(resample->fifo, (void **)converted_dst_samples, dst_nb_samples) < dst_nb_samples)
goto cleanup; goto cleanup;
} }
ret = 0; ret = 0;
@ -237,7 +227,7 @@ cleanup:
return ret; return ret;
} }
static int init_output_frame(AVFrame **frame, AVCodecContext *output_codec_context) { static int initOutputFrame(AVFrame **frame, AVCodecContext *output_codec_context) {
*frame = av_frame_alloc(); *frame = av_frame_alloc();
(*frame)->nb_samples = output_codec_context->frame_size; (*frame)->nb_samples = output_codec_context->frame_size;
(*frame)->channel_layout = output_codec_context->channel_layout; (*frame)->channel_layout = output_codec_context->channel_layout;
@ -251,19 +241,16 @@ static int init_output_frame(AVFrame **frame, AVCodecContext *output_codec_conte
return ret; return ret;
} }
int FFAudioResample::encodeAudioFrame(AVFrame *frame, int FFAudioResample::encodeAudioFrame(AVFrame *frame, int *data_present) {
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context,
int *data_present) {
int ret; int ret;
/* Set a timestamp based on the sample rate for the container. */ /* Set a timestamp based on the sample rate for the container. */
if (frame) { if (frame) {
frame->pts = pts; frame->pts = resample->pts;
pts += frame->nb_samples; resample->pts += frame->nb_samples;
} }
ret = avcodec_send_frame(output_codec_context, frame); ret = avcodec_send_frame(resample->outCodecCtx, frame);
if (ret == AVERROR_EOF) { if (ret == AVERROR_EOF) {
ret = 0; ret = 0;
goto cleanup; goto cleanup;
@ -272,7 +259,7 @@ int FFAudioResample::encodeAudioFrame(AVFrame *frame,
return ret; return ret;
} }
ret = avcodec_receive_packet(output_codec_context, &output_packet); ret = avcodec_receive_packet(resample->outCodecCtx, &resample->outPacket);
if (ret == AVERROR(EAGAIN)) { if (ret == AVERROR(EAGAIN)) {
ret = 0; ret = 0;
goto cleanup; goto cleanup;
@ -288,12 +275,12 @@ int FFAudioResample::encodeAudioFrame(AVFrame *frame,
/* Write one audio frame from the temporary packet to the output file. */ /* Write one audio frame from the temporary packet to the output file. */
if (*data_present && if (*data_present &&
(ret = av_write_frame(output_format_context, &output_packet)) < 0) { (ret = av_write_frame(resample->outFormatCtx, &resample->outPacket)) < 0) {
ALOGE("Could not write frame (error:%s)\n", av_err2str(ret)); ALOGE("Could not write frame (error:%s)\n", av_err2str(ret));
} }
cleanup: cleanup:
av_packet_unref(&output_packet); av_packet_unref(&resample->outPacket);
return ret; return ret;
} }
@ -302,21 +289,18 @@ cleanup:
* output file. * output file.
* *
*/ */
int FFAudioResample::encodeAndWrite(AVAudioFifo *fifo, int FFAudioResample::encodeAndWrite() {
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context) {
int data_written; int data_written;
const int frame_size = FFMIN(av_audio_fifo_size(fifo), const int frame_size = FFMIN(av_audio_fifo_size(resample->fifo),
output_codec_context->frame_size); resample->outCodecCtx->frame_size);
output_frame->nb_samples = frame_size; resample->outFrame->nb_samples = frame_size;
if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) { if (av_audio_fifo_read(resample->fifo, (void **)resample->outFrame->data, frame_size) < frame_size) {
ALOGE("Could not read data from FIFO\n"); ALOGE("Could not read data from FIFO\n");
return AVERROR_EXIT; return AVERROR_EXIT;
} }
if (encodeAudioFrame(output_frame, output_format_context, if (encodeAudioFrame(resample->outFrame, &data_written)) {
output_codec_context, &data_written)) {
return AVERROR_EXIT; return AVERROR_EXIT;
} }
return 0; return 0;
@ -324,43 +308,33 @@ int FFAudioResample::encodeAndWrite(AVAudioFifo *fifo,
int FFAudioResample::resampling(const char *src_file, const char *dst_file, int sampleRate) { int FFAudioResample::resampling(const char *src_file, const char *dst_file, int sampleRate) {
int ret = AVERROR_EXIT; int ret = AVERROR_EXIT;
AVAudioFifo *fifo = nullptr;
SwrContext *resample_context = nullptr;
AVCodecContext *input_codec_context = nullptr;
AVCodecContext *output_codec_context = nullptr;
AVFormatContext *input_format_context = nullptr;
AVFormatContext *output_format_context = nullptr;
/* Open the input file for reading. */ /* Open the input file for reading. */
if (openInputFile(src_file, &input_format_context, &input_codec_context)) if (openInputFile(src_file))
goto cleanup; goto cleanup;
/* Open the output file for writing. */ /* Open the output file for writing. */
if (openOutputFile(dst_file, sampleRate, input_codec_context, &output_format_context, &output_codec_context)) if (openOutputFile(dst_file, sampleRate))
goto cleanup; goto cleanup;
/* Initialize the re-sampler to be able to convert audio sample formats. */ /* Initialize the re-sampler to be able to convert audio sample formats. */
if (initResample(input_codec_context, output_codec_context, if (initResample())
&resample_context))
goto cleanup; goto cleanup;
/* Initialize the FIFO buffer to store audio samples to be encoded. */ /* Initialize the FIFO buffer to store audio samples to be encoded. */
fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt, output_codec_context->channels, 1024 * 10); resample->fifo = av_audio_fifo_alloc(resample->outCodecCtx->sample_fmt,
input_frame = av_frame_alloc(); resample->outCodecCtx->channels, 1024 * 10);
if (init_output_frame(&output_frame, output_codec_context)) if (initOutputFrame(&resample->outFrame, resample->outCodecCtx))
goto cleanup; goto cleanup;
/* Write the header of the output file container. */ /* Write the header of the output file container. */
if ((ret = avformat_write_header(output_format_context, nullptr)) < 0) { if ((ret = avformat_write_header(resample->outFormatCtx, nullptr)) < 0) {
ALOGE("write header error=%s", av_err2str(ret)); ALOGE("write header error=%s", av_err2str(ret));
} }
while (1) { while (true) {
const int output_frame_size = output_codec_context->frame_size;
int finished = 0; int finished = 0;
const int output_frame_size = resample->outCodecCtx->frame_size;
while (av_audio_fifo_size(fifo) < output_frame_size) { while (av_audio_fifo_size(resample->fifo) < output_frame_size) {
/* Decode one frame, convert sample format and put it into the FIFO buffer. */ /* Decode one frame, convert sample format and put it into the FIFO buffer. */
if (decodeAndConvert(fifo, input_format_context, if (decodeAndConvert(&finished))
input_codec_context,
output_codec_context,
resample_context, &finished))
goto cleanup; goto cleanup;
if (finished) if (finished)
@ -368,9 +342,9 @@ int FFAudioResample::resampling(const char *src_file, const char *dst_file, int
} }
/* If we have enough samples for the encoder, we encode them.*/ /* If we have enough samples for the encoder, we encode them.*/
while (av_audio_fifo_size(fifo) >= output_frame_size || while (av_audio_fifo_size(resample->fifo) >= output_frame_size ||
(finished && av_audio_fifo_size(fifo) > 0)) (finished && av_audio_fifo_size(resample->fifo) > 0))
if (encodeAndWrite(fifo, output_format_context, output_codec_context)) if (encodeAndWrite())
goto cleanup; goto cleanup;
/* encode all the remaining samples. */ /* encode all the remaining samples. */
@ -378,8 +352,7 @@ int FFAudioResample::resampling(const char *src_file, const char *dst_file, int
int data_written; int data_written;
do { do {
data_written = 0; data_written = 0;
if (encodeAudioFrame(nullptr, output_format_context, if (encodeAudioFrame(nullptr, &data_written))
output_codec_context, &data_written))
goto cleanup; goto cleanup;
} while (data_written); } while (data_written);
break; break;
@ -387,29 +360,29 @@ int FFAudioResample::resampling(const char *src_file, const char *dst_file, int
} }
/* Write the trailer of the output file container. */ /* Write the trailer of the output file container. */
if (av_write_trailer(output_format_context)) { if (av_write_trailer(resample->outFormatCtx)) {
ALOGE("write trailer error..."); ALOGE("write trailer error...");
} }
ret = 0; ret = 0;
cleanup: cleanup:
if (fifo) if (resample->fifo)
av_audio_fifo_free(fifo); av_audio_fifo_free(resample->fifo);
swr_free(&resample_context); swr_free(&(resample->resampleCtx));
if (output_codec_context) if (resample->outCodecCtx)
avcodec_free_context(&output_codec_context); avcodec_free_context(&(resample->outCodecCtx));
if (output_format_context) { if (resample->outFormatCtx) {
avio_closep(&output_format_context->pb); avio_closep(&(resample->outFormatCtx->pb));
avformat_free_context(output_format_context); avformat_free_context(resample->outFormatCtx);
} }
if (input_codec_context) if (resample->inCodecCtx)
avcodec_free_context(&input_codec_context); avcodec_free_context(&(resample->inCodecCtx));
if (input_format_context) if (resample->inFormatCtx)
avformat_close_input(&input_format_context); avformat_close_input(&(resample->inFormatCtx));
if (output_frame) if (resample->inFrame)
av_frame_free(&output_frame); av_frame_free(&(resample->inFrame));
if (input_frame) if (resample->outFrame)
av_frame_free(&input_frame); av_frame_free(&(resample->outFrame));
return ret; return ret;
} }

@ -26,57 +26,49 @@ extern "C" {
} }
#endif #endif
class FFAudioResample { struct AudioResample {
private:
int64_t pts = 0; int64_t pts = 0;
AVPacket input_packet; AVPacket inPacket;
AVPacket outPacket;
AVFrame *inFrame;
AVFrame *outFrame;
AVPacket output_packet; SwrContext *resampleCtx;
AVAudioFifo *fifo = nullptr;
AVFormatContext *inFormatCtx;
AVCodecContext *inCodecCtx;
AVFormatContext *outFormatCtx;
AVCodecContext *outCodecCtx;
};
AVFrame *input_frame; class FFAudioResample {
private:
AVFrame *output_frame; AudioResample *resample;
int openInputFile(const char *filename, int openInputFile(const char *filename);
AVFormatContext **input_format_context,
AVCodecContext **input_codec_context);
int openOutputFile(const char *filename, int openOutputFile(const char *filename, int sample_rate);
int sample_rate,
AVCodecContext *input_codec_context,
AVFormatContext **output_format_context,
AVCodecContext **output_codec_context);
int initResample(AVCodecContext *input_codec_context, int initResample();
AVCodecContext *output_codec_context,
SwrContext **resample_context);
int decodeAudioFrame(AVFrame *frame, int decodeAudioFrame(AVFrame *frame, int *data_present, int *finished);
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
int *data_present, int *finished);
int initConvertedSamples(uint8_t ***converted_input_samples, int initConvertedSamples(uint8_t ***converted_input_samples, int frame_size);
AVCodecContext *output_codec_context, int frame_size);
int decodeAndConvert(AVAudioFifo *fifo, int decodeAndConvert(int *finished);
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
SwrContext *resample_context,
int *finished);
int encodeAudioFrame(AVFrame *frame, int encodeAudioFrame(AVFrame *frame, int *data_present);
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context,
int *data_present);
int encodeAndWrite(AVAudioFifo *fifo, int encodeAndWrite();
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context);
public: public:
FFAudioResample();
~FFAudioResample();
int resampling(const char *src_file, const char *dst_file, int sampleRate); int resampling(const char *src_file, const char *dst_file, int sampleRate);
}; };

Loading…
Cancel
Save