视频滤波模块增加音频

视频滤波模块增加音频
pull/107/head
frank 6 years ago
parent 52d8db48aa
commit 28689818cd
  1. 47
      Live/src/main/java/com/frank/live/util/YUVUtil.java
  2. 7
      app/src/main/AndroidManifest.xml
  3. 32
      app/src/main/cpp/audio_player.c
  4. 180
      app/src/main/cpp/video_filter.c
  5. 2
      app/src/main/java/com/frank/ffmpeg/FFmpegCmd.java
  6. 24
      app/src/main/java/com/frank/ffmpeg/VideoPlayer.java
  7. 6
      app/src/main/java/com/frank/ffmpeg/activity/FilterActivity.java
  8. 2
      app/src/main/java/com/frank/ffmpeg/activity/LiveActivity.java
  9. 1
      app/src/main/java/com/frank/ffmpeg/util/FFmpegUtil.java

@ -0,0 +1,47 @@
package com.frank.live.util;
/**
* YUV与RGB转换工具类
* Created by frank on 2018/7/1.
*/
public class YUVUtil {
public static byte[] ARGBtoYUV420SemiPlanar(int[] input, int width, int height) {
final int frameSize = width * height;
byte[] yuv420sp = new byte[width * height * 3 / 2];
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (input[index] & 0xff000000) >> 24; // a is not used obviously
R = (input[index] & 0xff0000) >> 16;
G = (input[index] & 0xff00) >> 8;
B = (input[index] & 0xff);
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanLine.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
}
index++;
}
}
return yuv420sp;
}
}

@ -36,20 +36,25 @@
<!-- 视频解码播放 -->
<activity
android:name=".activity.VideoPlayerActivity"
android:theme="@style/Theme.AppCompat.DayNight.NoActionBar"
android:screenOrientation="landscape" />
<!-- 音视频解码播放 -->
<activity
android:name=".activity.MediaPlayerActivity"
android:theme="@style/Theme.AppCompat.DayNight.NoActionBar"
android:screenOrientation="landscape" />
<!-- 本地推流直播 -->
<activity android:name=".activity.PushActivity" />
<activity android:name=".activity.PushActivity"
android:theme="@style/Theme.AppCompat.DayNight.NoActionBar"/>
<!-- 实时推流直播 -->
<activity
android:name=".activity.LiveActivity"
android:theme="@style/Theme.AppCompat.DayNight.NoActionBar"
android:screenOrientation="landscape" />
<!--滤镜特效-->
<activity
android:name=".activity.FilterActivity"
android:theme="@style/Theme.AppCompat.DayNight.NoActionBar"
android:screenOrientation="landscape" />
</application>

@ -122,23 +122,23 @@ JNIEXPORT void JNICALL Java_com_frank_ffmpeg_AudioPlayer_play
//解码一帧成功
if(got_frame > 0){
LOGI("decode frame count=%d",index++);
//音频格式转换
swr_convert(swrCtx, &out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame->data,frame->nb_samples);
int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,
frame->nb_samples, out_sample_fmt, 1);
//音频格式转换
swr_convert(swrCtx, &out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame->data,frame->nb_samples);
int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,
frame->nb_samples, out_sample_fmt, 1);
jbyteArray audio_sample_array = (*env)->NewByteArray(env,out_buffer_size);
jbyte* sample_byte_array = (*env)->GetByteArrayElements(env,audio_sample_array,NULL);
//拷贝缓冲数据
memcpy(sample_byte_array, out_buffer, (size_t) out_buffer_size);
//释放数组
(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_byte_array,0);
//调用AudioTrack的write方法进行播放
(*env)->CallIntMethod(env,audio_track,audio_track_write_mid,
audio_sample_array,0,out_buffer_size);
//释放局部引用
(*env)->DeleteLocalRef(env,audio_sample_array);
usleep(1000 * 16);
jbyteArray audio_sample_array = (*env)->NewByteArray(env,out_buffer_size);
jbyte* sample_byte_array = (*env)->GetByteArrayElements(env,audio_sample_array,NULL);
//拷贝缓冲数据
memcpy(sample_byte_array, out_buffer, (size_t) out_buffer_size);
//释放数组
(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_byte_array,0);
//调用AudioTrack的write方法进行播放
(*env)->CallIntMethod(env,audio_track,audio_track_write_mid,
audio_sample_array,0,out_buffer_size);
//释放局部引用
(*env)->DeleteLocalRef(env,audio_sample_array);
usleep(1000 * 16);
}
}
av_free_packet(packet);

@ -14,6 +14,7 @@
#include <libavfilter/buffersrc.h>
#include <libavfilter/avfiltergraph.h>
#include <libavutil/opt.h>
#include "libswresample/swresample.h"
#define TAG "VideoFilter"
#define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO, TAG, FORMAT, ##__VA_ARGS__);
@ -51,27 +52,19 @@ int again;
int release;
/**
* colorbalance:
rs/gs/bs
Adjust red, green and blue shadows (darkest pixels).
rm/gm/bm
Adjust red, green and blue midtones (medium pixels).
rh/gh/bh
Adjust red, green and blue highlights (brightest pixels).
*/
/**
* Draw text
* Draw a text string or text from a specified file on top of a video, using the libfreetype library.
* To enable compilation of this filter, you need to configure FFmpeg with --enable-libfreetype.
* To enable default font fallback and the font option you need to configure FFmpeg with --enable-libfontconfig.
* To enable the text_shaping option, you need to configure FFmpeg with --enable-libfribidi.
* Video center: drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h)/2"
*/
#define MAX_AUDIO_FRAME_SIZE 48000 * 4
jmethodID audio_track_write_mid;
uint8_t *out_buffer;
jobject audio_track;
SwrContext *audio_swr_ctx;
int out_channel_nb;
enum AVSampleFormat out_sample_fmt;
int audio_stream_index = -1;
int got_frame;
AVCodecContext *audioCodecCtx;
//const char *filter_descr = "lutyuv='u=128:v=128'";//黑白
//const char *filter_descr = "hue='h=60:s=-3'";//hue滤镜-->Set the hue to 60 degrees and the saturation to -3
//const char *filter_descr = "hue='h=60:s=-3'";//hue滤镜
//const char *filter_descr = "vflip";//上下反序
//const char *filter_descr = "hflip";//左右反序
//const char *filter_descr = "rotate=90";//旋转90°
@ -81,10 +74,9 @@ int release;
//const char *filter_descr = "edgedetect=low=0.1:high=0.4";//边缘检测
//const char *filter_descr = "lutrgb='r=0:g=0'";//去掉红色、绿色分量,只保留蓝色
//const char *filter_descr = "noise=alls=20:allf=t+u";//添加噪声
//const char *filter_descr = "vignette='PI/4+random(1)*PI/50':eval=frame";//闪烁装饰-->Make a flickering vignetting
//const char *filter_descr = "vignette='PI/4+random(1)*PI/50':eval=frame";//闪烁装饰
//const char *filter_descr = "gblur=sigma=0.5:steps=1:planes=1:sigmaV=1";//高斯模糊
//const char *filter_descr = "drawtext=fontfile='app/src/main/cpp/arial.ttf':fontcolor=green:fontsize=30:text='Hello world'";//绘制文字
//const char *filter_descr = "drawtext=fontfile='arial.ttf':fontcolor=green:fontsize=30:text='Hello world'";//绘制文字
//const char *filter_descr = "movie=my_logo.png[wm];[in][wm]overlay=5:5[out]";//添加图片水印
//初始化滤波器
@ -133,23 +125,11 @@ int init_filters(const char *filters_descr) {
goto end;
}
/*
* The buffer source output must be connected to the input pad of
* the first filter described by filters_descr; since the first
* filter input label is not specified, it is set to "in" by
* default.
*/
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
/*
* The buffer sink input must be connected to the output pad of
* the last filter described by filters_descr; since the last
* filter output label is not specified, it is set to "out" by
* default.
*/
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
@ -169,6 +149,7 @@ int init_filters(const char *filters_descr) {
return ret;
}
//初始化视频解码器与播放器
int open_input(JNIEnv * env, const char* file_name, jobject surface){
LOGI("open file:%s\n", file_name);
//注册所有组件
@ -243,8 +224,111 @@ int open_input(JNIEnv * env, const char* file_name, jobject surface){
return 0;
}
//初始化音频解码器与播放器
int init_audio(JNIEnv * env, jclass jthiz){
//获取音频流索引位置
int i;
for(i=0; i < pFormatCtx->nb_streams;i++){
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
audio_stream_index = i;
break;
}
}
//获取音频解码器
audioCodecCtx = pFormatCtx->streams[audio_stream_index]->codec;
AVCodec *codec = avcodec_find_decoder(audioCodecCtx->codec_id);
if(codec == NULL){
LOGI("%s","无法获取音频解码器");
return -1;
}
//打开音频解码器
if(avcodec_open2(audioCodecCtx,codec,NULL) < 0){
LOGI("%s","无法打开音频解码器");
return -1;
}
//frame->16bit 44100 PCM 统一音频采样格式与采样率
audio_swr_ctx = swr_alloc();
//输入的采样格式
enum AVSampleFormat in_sample_fmt = audioCodecCtx->sample_fmt;
//输出采样格式16bit PCM
out_sample_fmt = AV_SAMPLE_FMT_S16;
//输入采样率
int in_sample_rate = audioCodecCtx->sample_rate;
//输出采样率
int out_sample_rate = in_sample_rate;
//声道布局(2个声道,默认立体声stereo)
uint64_t in_ch_layout = audioCodecCtx->channel_layout;
//输出的声道布局(立体声)
uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
swr_alloc_set_opts(audio_swr_ctx,
out_ch_layout,out_sample_fmt,out_sample_rate,
in_ch_layout,in_sample_fmt,in_sample_rate,
0, NULL);
swr_init(audio_swr_ctx);
//输出的声道个数
out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);
jclass player_class = (*env)->GetObjectClass(env,jthiz);
if(!player_class){
LOGE("player_class not found...");
return -1;
}
//AudioTrack对象
jmethodID audio_track_method = (*env)->GetMethodID(env,player_class,"createAudioTrack","(II)Landroid/media/AudioTrack;");
if(!audio_track_method){
LOGE("audio_track_method not found...");
return -1;
}
audio_track = (*env)->CallObjectMethod(env,jthiz,audio_track_method,out_sample_rate,out_channel_nb);
//调用play方法
jclass audio_track_class = (*env)->GetObjectClass(env,audio_track);
jmethodID audio_track_play_mid = (*env)->GetMethodID(env,audio_track_class,"play","()V");
(*env)->CallVoidMethod(env,audio_track,audio_track_play_mid);
//获取write()方法
audio_track_write_mid = (*env)->GetMethodID(env,audio_track_class,"write","([BII)I");
//16bit 44100 PCM 数据
out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE);
return 0;
}
int play_audio(JNIEnv * env, AVPacket* packet, AVFrame* frame){
//解码
int ret = avcodec_decode_audio4(audioCodecCtx, frame, &got_frame, packet);
if(ret < 0){
return ret;
}
//解码一帧成功
if(got_frame > 0){
//音频格式转换
swr_convert(audio_swr_ctx, &out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame->data,frame->nb_samples);
int out_buffer_size = av_samples_get_buffer_size(NULL, out_channel_nb,
frame->nb_samples, out_sample_fmt, 1);
jbyteArray audio_sample_array = (*env)->NewByteArray(env,out_buffer_size);
jbyte* sample_byte_array = (*env)->GetByteArrayElements(env,audio_sample_array,NULL);
//拷贝缓冲数据
memcpy(sample_byte_array, out_buffer, (size_t) out_buffer_size);
//释放数组
(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_byte_array,0);
//调用AudioTrack的write方法进行播放
(*env)->CallIntMethod(env,audio_track,audio_track_write_mid,
audio_sample_array,0,out_buffer_size);
//释放局部引用
(*env)->DeleteLocalRef(env,audio_sample_array);
usleep(1000);//1000 * 16
}
return 0;
}
JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter
(JNIEnv * env, jclass clazz, jstring filePath, jobject surface, jstring filterDescr){
(JNIEnv * env, jclass clazz, jstring filePath, jobject surface, jstring filterDescr, jboolean playAudio){
int ret;
const char * file_name = (*env)->GetStringUTFChars(env, filePath, JNI_FALSE);
@ -252,7 +336,7 @@ JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter
//打开输入文件
if(!is_playing){
LOGI("open_input...");
if((ret = open_input(env, file_name, surface) < 0)){
if((ret = open_input(env, file_name, surface)) < 0){
LOGE("Couldn't allocate video frame.");
goto end;
}
@ -264,6 +348,12 @@ JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter
ret = -1;
goto end;
}
//初始化音频解码器
if ((ret = init_audio(env, clazz)) < 0){
LOGE("Couldn't init_audio.");
goto end;
}
}
//初始化滤波器
@ -315,9 +405,14 @@ JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter
}
av_frame_unref(filter_frame);
}
av_frame_unref(pFrame);
//延迟等待
usleep((unsigned long) (1000 * 40));
if (!playAudio){
usleep((unsigned long) (1000 * 40));//1000 * 40
}
} else if(packet.stream_index == audio_stream_index){//音频帧
if (playAudio){
play_audio(env, &packet, pFrame);
}
}
av_packet_unref(&packet);
}
@ -333,6 +428,15 @@ JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter
avfilter_free(buffersrc_ctx);
avfilter_free(buffersink_ctx);
avfilter_graph_free(&filter_graph);
avcodec_close(audioCodecCtx);
free(buffer);
free(sws_ctx);
free(&windowBuffer);
free(out_buffer);
free(audio_swr_ctx);
free(audio_track);
free(audio_track_write_mid);
ANativeWindow_release(nativeWindow);
(*env)->ReleaseStringUTFChars(env, filePath, file_name);
(*env)->ReleaseStringUTFChars(env, filterDescr, filter_descr);
LOGE("do release...");

@ -11,7 +11,7 @@ public class FFmpegCmd {
System.loadLibrary("media-handle");
}
//开子线程调用native方法进行音频处理
//开子线程调用native方法进行音频处理
public static void execute(final String[] commands, final OnHandleListener onHandleListener){
new Thread(new Runnable() {
@Override

@ -1,5 +1,9 @@
package com.frank.ffmpeg;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
/**
* 视频播放器
* Created by frank on 2018/2/1
@ -13,7 +17,25 @@ public class VideoPlayer {
public native int play(String filePath, Object surface);
public native int setPlayRate(float playRate);
public native int filter(String filePath, Object surface, String filterType);
public native int filter(String filePath, Object surface, String filterType, boolean playAudio);
public native void again();
public native void release();
public AudioTrack createAudioTrack(int sampleRate, int channels){
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
int channelConfig;
if(channels == 1){
channelConfig = AudioFormat.CHANNEL_OUT_MONO;
}else if(channels == 2){
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
}else{
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
}
int bufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);
return new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig, audioFormat,
bufferSizeInBytes, AudioTrack.MODE_STREAM);
}
}

@ -39,7 +39,6 @@ public class FilterActivity extends AppCompatActivity implements SurfaceHolder.C
"drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5",
"colorbalance=bs=0.3",
"drawbox=x=100:y=100:w=100:h=100:color=red@0.5'",
"vignette='PI/4+random(1)*PI/50':eval=frame",
"vflip",
"unsharp"
};
@ -52,11 +51,12 @@ public class FilterActivity extends AppCompatActivity implements SurfaceHolder.C
"九宫格",
"均衡",
"矩形",
"闪烁",//左上角闪烁
"翻转",//vflip上下翻转,hflip是左右翻转
"锐化"
};
private HorizontalAdapter horizontalAdapter;
//是否播放音频
private boolean playAudio = true;
@Override
protected void onCreate(Bundle savedInstanceState) {
@ -100,7 +100,7 @@ public class FilterActivity extends AppCompatActivity implements SurfaceHolder.C
videoPlayer.again();
}
isPlaying = true;
videoPlayer.filter(VIDEO_PATH, surfaceHolder.getSurface(), filters[mPosition]);
videoPlayer.filter(VIDEO_PATH, surfaceHolder.getSurface(), filters[mPosition], playAudio);
}
}).start();
}

@ -35,7 +35,7 @@ public class LiveActivity extends AppCompatActivity implements View.OnClickListe
private final static String TAG = LiveActivity.class.getSimpleName();
private final static int CODE_CAMERA_RECORD = 0x0001;
private final static String[] permissions = new String[]{Manifest.permission.CAMERA, Manifest.permission.RECORD_AUDIO};
private final static String LIVE_URL = "rtmp://192.168.8.115/live/stream";
private final static String LIVE_URL = "rtmp://192.168.1.102/live/stream";
private final static int MSG_ERROR = 100;
private SurfaceHolder surfaceHolder;
private LivePusher livePusher;

@ -233,6 +233,7 @@ public class FFmpegUtil {
* 多画面拼接视频
* @param input1 输入文件1
* @param input2 输入文件2
* @param videoLayout 视频布局
* @param targetFile 画面拼接文件
*
* @return 画面拼接的命令行

Loading…
Cancel
Save