Feature: adjust samplerate

pull/221/head
xufuji456 3 years ago
parent c1ae76fa6d
commit e2faaabc4a
  1. 56
      app/src/main/cpp/audio_resample.cpp
  2. 22
      app/src/main/java/com/frank/ffmpeg/activity/MainActivity.kt

@ -177,7 +177,7 @@ static int open_output_file(const char *filename,
* The input file's sample rate is used to avoid a sample rate conversion. */ * The input file's sample rate is used to avoid a sample rate conversion. */
avctx->channels = OUTPUT_CHANNELS; avctx->channels = OUTPUT_CHANNELS;
avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS); avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
avctx->sample_rate = sample_rate; //input_codec_context->sample_rate; avctx->sample_rate = sample_rate;
avctx->sample_fmt = output_codec->sample_fmts[0]; avctx->sample_fmt = output_codec->sample_fmts[0];
avctx->bit_rate = OUTPUT_BIT_RATE; avctx->bit_rate = OUTPUT_BIT_RATE;
@ -185,7 +185,7 @@ static int open_output_file(const char *filename,
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
/* Set the sample rate for the container. */ /* Set the sample rate for the container. */
stream->time_base.den = input_codec_context->sample_rate; stream->time_base.den = sample_rate;
stream->time_base.num = 1; stream->time_base.num = 1;
/* Some container formats (like MP4) require global headers to be present. /* Some container formats (like MP4) require global headers to be present.
@ -269,12 +269,6 @@ static int init_resampler(AVCodecContext *input_codec_context,
ALOGE("Could not allocate resample context\n"); ALOGE("Could not allocate resample context\n");
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
/*
* Perform a sanity check so that the number of converted samples is
* not greater than the number of samples to be converted.
* If the sample rates differ, this case has to be handled differently
*/
av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
/* Open the re-sampler with the specified parameters. */ /* Open the re-sampler with the specified parameters. */
if ((error = swr_init(*resample_context)) < 0) { if ((error = swr_init(*resample_context)) < 0) {
@ -291,7 +285,6 @@ static int init_resampler(AVCodecContext *input_codec_context,
*/ */
static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context) static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
{ {
/* Create the FIFO buffer based on the specified output sample format. */
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt, if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
output_codec_context->channels, 1))) { output_codec_context->channels, 1))) {
ALOGE("Could not allocate FIFO\n"); ALOGE("Could not allocate FIFO\n");
@ -308,8 +301,7 @@ static int write_output_file_header(AVFormatContext *output_format_context)
{ {
int error; int error;
if ((error = avformat_write_header(output_format_context, nullptr)) < 0) { if ((error = avformat_write_header(output_format_context, nullptr)) < 0) {
ALOGE("Could not write output file header (error '%s')\n", ALOGE("Could not write output file header (error:%s)\n", av_err2str(error));
av_err2str(error));
return error; return error;
} }
return 0; return 0;
@ -324,9 +316,8 @@ static int decode_audio_frame(AVFrame *frame,
AVCodecContext *input_codec_context, AVCodecContext *input_codec_context,
int *data_present, int *finished) int *data_present, int *finished)
{ {
/* Packet used for temporary storage. */
AVPacket input_packet;
int error; int error;
AVPacket input_packet;
init_packet(&input_packet); init_packet(&input_packet);
/* Read one audio frame from the input file into a temporary packet. */ /* Read one audio frame from the input file into a temporary packet. */
@ -335,8 +326,7 @@ static int decode_audio_frame(AVFrame *frame,
if (error == AVERROR_EOF) if (error == AVERROR_EOF)
*finished = 1; *finished = 1;
else { else {
ALOGE("Could not read frame (error:%s)\n", ALOGE("Could not read frame (error:%s)\n", av_err2str(error));
av_err2str(error));
return error; return error;
} }
} }
@ -419,11 +409,8 @@ static int convert_samples(const uint8_t **input_data,
SwrContext *resample_context) SwrContext *resample_context)
{ {
int error; int error;
if ((error = swr_convert(resample_context, converted_data, frame_size,
/* Convert the samples using the resampler. */ input_data, frame_size)) < 0) {
if ((error = swr_convert(resample_context,
converted_data, frame_size,
input_data , frame_size)) < 0) {
ALOGE("Could not convert input samples (error:%s)\n", av_err2str(error)); ALOGE("Could not convert input samples (error:%s)\n", av_err2str(error));
return error; return error;
} }
@ -563,14 +550,14 @@ static int init_output_frame(AVFrame **frame,
* Encode one frame worth of audio to the output file. * Encode one frame worth of audio to the output file.
* *
*/ */
static int encode_audio_frame(AVFrame *frame, static int encode_audio_frame(AVFrame *frame, AVFormatContext *input_format_context,
AVFormatContext *output_format_context, AVFormatContext *output_format_context,
AVCodecContext *output_codec_context, AVCodecContext *output_codec_context,
int *data_present) int *data_present)
{ {
/* Packet used for temporary storage. */
AVPacket output_packet;
int error; int error;
int stream_index = 0;
AVPacket output_packet;
init_packet(&output_packet); init_packet(&output_packet);
/* Set a timestamp based on the sample rate for the container. */ /* Set a timestamp based on the sample rate for the container. */
@ -605,11 +592,23 @@ static int encode_audio_frame(AVFrame *frame,
} else if (error < 0) { } else if (error < 0) {
ALOGE("Could not encode frame (error:%s)\n", av_err2str(error)); ALOGE("Could not encode frame (error:%s)\n", av_err2str(error));
goto cleanup; goto cleanup;
/* Default case: Return encoded data. */
} else { } else {
*data_present = 1; *data_present = 1;
} }
// rescale pts from input to output
// av_packet_rescale_ts(&output_packet,
// input_format_context->streams[stream_index]->time_base,
// output_format_context->streams[stream_index]->time_base);
output_packet.pts = av_rescale_q_rnd(output_packet.pts,
input_format_context->streams[stream_index]->time_base,
output_format_context->streams[stream_index]->time_base,
static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
output_packet.dts = output_packet.pts;
output_packet.duration = av_rescale_q(output_packet.duration,
input_format_context->streams[stream_index]->time_base,
output_format_context->streams[stream_index]->time_base);
/* Write one audio frame from the temporary packet to the output file. */ /* Write one audio frame from the temporary packet to the output file. */
if (*data_present && if (*data_present &&
(error = av_write_frame(output_format_context, &output_packet)) < 0) { (error = av_write_frame(output_format_context, &output_packet)) < 0) {
@ -627,7 +626,7 @@ cleanup:
* output file. * output file.
* *
*/ */
static int load_encode_and_write(AVAudioFifo *fifo, static int load_encode_and_write(AVAudioFifo *fifo, AVFormatContext *input_format_context,
AVFormatContext *output_format_context, AVFormatContext *output_format_context,
AVCodecContext *output_codec_context) AVCodecContext *output_codec_context)
{ {
@ -653,7 +652,7 @@ static int load_encode_and_write(AVAudioFifo *fifo,
} }
/* Encode one frame worth of audio samples. */ /* Encode one frame worth of audio samples. */
if (encode_audio_frame(output_frame, output_format_context, if (encode_audio_frame(output_frame, input_format_context, output_format_context,
output_codec_context, &data_written)) { output_codec_context, &data_written)) {
av_frame_free(&output_frame); av_frame_free(&output_frame);
return AVERROR_EXIT; return AVERROR_EXIT;
@ -737,8 +736,7 @@ int resampling(const char *src_file, const char *dst_file, int sampleRate)
(finished && av_audio_fifo_size(fifo) > 0)) (finished && av_audio_fifo_size(fifo) > 0))
/* Take one frame worth of audio samples from the FIFO buffer, /* Take one frame worth of audio samples from the FIFO buffer,
* encode it and write it to the output file. */ * encode it and write it to the output file. */
if (load_encode_and_write(fifo, output_format_context, if (load_encode_and_write(fifo, input_format_context, output_format_context, output_codec_context))
output_codec_context))
goto cleanup; goto cleanup;
/* If we are at the end of the input file and have encoded /* If we are at the end of the input file and have encoded
@ -748,7 +746,7 @@ int resampling(const char *src_file, const char *dst_file, int sampleRate)
/* Flush the encoder as it may have delayed frames. */ /* Flush the encoder as it may have delayed frames. */
do { do {
data_written = 0; data_written = 0;
if (encode_audio_frame(nullptr, output_format_context, if (encode_audio_frame(nullptr, input_format_context, output_format_context,
output_codec_context, &data_written)) output_codec_context, &data_written))
goto cleanup; goto cleanup;
} while (data_written); } while (data_written);

@ -2,11 +2,13 @@ package com.frank.ffmpeg.activity
import android.content.Intent import android.content.Intent
import android.os.Bundle import android.os.Bundle
import android.util.Log
import android.view.View import android.view.View
import androidx.recyclerview.widget.RecyclerView import androidx.recyclerview.widget.RecyclerView
import androidx.recyclerview.widget.StaggeredGridLayoutManager import androidx.recyclerview.widget.StaggeredGridLayoutManager
import com.frank.ffmpeg.R import com.frank.ffmpeg.R
import com.frank.ffmpeg.VideoPlayer
import com.frank.ffmpeg.adapter.WaterfallAdapter import com.frank.ffmpeg.adapter.WaterfallAdapter
import com.frank.ffmpeg.listener.OnItemClickListener import com.frank.ffmpeg.listener.OnItemClickListener
@ -70,7 +72,25 @@ class MainActivity : BaseActivity() {
7 //probe media format 7 //probe media format
-> intent.setClass(this@MainActivity, ProbeFormatActivity::class.java) -> intent.setClass(this@MainActivity, ProbeFormatActivity::class.java)
8 //audio effect 8 //audio effect
-> intent.setClass(this@MainActivity, AudioEffectActivity::class.java) // -> intent.setClass(this@MainActivity, AudioEffectActivity::class.java)
-> {
val videoPlayer = VideoPlayer()
// val input = "sdcard/what.mp4"
// val output = "sdcard/haha.avi"
// Thread {
// val ret = videoPlayer.executeTranscode(input, output)
// Log.e("Main", "transcode result=$ret")
// }.start()
val input = "sdcard/okok.aac"
val output = "sdcard/haha48000.aac"
Thread {
val ret = videoPlayer.audioResample(input, output, 48000)
Log.e("Main", "transcode result=$ret")
}.start()
return
}
else -> { else -> {
} }
} }

Loading…
Cancel
Save