Fix transformation issues in preview and final video result

pull/360/head
Mattia Iavarone 6 years ago
parent af11e24653
commit 926b03cf35
  1. 9
      cameraview/src/main/gles/com/otaliastudios/cameraview/VideoCoreEncoder.java
  2. 35
      cameraview/src/main/gles/com/otaliastudios/cameraview/VideoTextureEncoder.java
  3. 17
      cameraview/src/main/java/com/otaliastudios/cameraview/Camera1.java
  4. 14
      cameraview/src/main/java/com/otaliastudios/cameraview/MediaCodecVideoRecorder.java
  5. 12
      cameraview/src/main/views/com/otaliastudios/cameraview/GLCameraPreview.java
  6. 1
      demo/src/main/java/com/otaliastudios/cameraview/demo/Control.java

@ -45,10 +45,7 @@ import java.nio.ByteBuffer;
@RequiresApi(api = Build.VERSION_CODES.JELLY_BEAN_MR2)
class VideoCoreEncoder {
// TODO: these ought to be configurable as well
private static final String MIME_TYPE = "video/avc"; // H.264 Advanced Video Coding
private static final int FRAME_RATE = 30; // 30fps
private static final int IFRAME_INTERVAL = 5; // 5 seconds between I-frames
private Surface mInputSurface;
private MediaMuxer mMuxer;
@ -61,7 +58,7 @@ class VideoCoreEncoder {
/**
* Configures encoder and muxer state, and prepares the input Surface.
*/
public VideoCoreEncoder(int width, int height, int bitRate, File outputFile)
public VideoCoreEncoder(int width, int height, int bitRate, int frameRate, File outputFile)
throws IOException {
mBufferInfo = new MediaCodec.BufferInfo();
@ -72,8 +69,8 @@ class VideoCoreEncoder {
format.setInteger(MediaFormat.KEY_COLOR_FORMAT,
MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
format.setInteger(MediaFormat.KEY_FRAME_RATE, frameRate);
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 5);
// Create a MediaCodec encoder, and configure it with our format. Get a Surface
// we can use for input and wrap it with a class that handles the EGL work.

@ -17,6 +17,7 @@ package com.otaliastudios.cameraview;
import android.graphics.SurfaceTexture;
import android.opengl.EGLContext;
import android.opengl.Matrix;
import android.os.Build;
import android.os.Handler;
import android.os.Looper;
@ -73,6 +74,8 @@ class VideoTextureEncoder implements Runnable {
private int mTextureId;
private int mFrameNum;
private VideoCoreEncoder mVideoEncoder;
private float mTransformationScaleX = 1F;
private float mTransformationScaleY = 1F;
// ----- accessed by multiple threads -----
private volatile EncoderHandler mHandler;
@ -88,22 +91,29 @@ class VideoTextureEncoder implements Runnable {
* explicit synchronization (and don't need to worry about it getting tweaked out from
* under us).
* <p>
* TODO: make frame rate and iframe interval configurable?
*/
public static class Config {
static class Config {
final File mOutputFile;
final int mWidth;
final int mHeight;
final int mBitRate;
final int mFrameRate;
final float mScaleX;
final float mScaleY;
final EGLContext mEglContext;
public Config(File outputFile, int width, int height, int bitRate,
Config(File outputFile, int width, int height,
int bitRate, int frameRate,
float scaleX, float scaleY,
EGLContext sharedEglContext) {
mOutputFile = outputFile;
mWidth = width;
mHeight = height;
mBitRate = bitRate;
mFrameRate = frameRate;
mEglContext = sharedEglContext;
mScaleX = scaleX;
mScaleY = scaleY;
}
@Override
@ -113,18 +123,18 @@ class VideoTextureEncoder implements Runnable {
}
}
private void prepareEncoder(EGLContext sharedContext, int width, int height, int bitRate,
File outputFile) {
private void prepareEncoder(Config config) {
try {
mVideoEncoder = new VideoCoreEncoder(width, height, bitRate, outputFile);
mVideoEncoder = new VideoCoreEncoder(config.mWidth, config.mHeight, config.mBitRate, config.mFrameRate, config.mOutputFile);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
mEglCore = new EglCore(sharedContext, EglCore.FLAG_RECORDABLE);
mEglCore = new EglCore(config.mEglContext, EglCore.FLAG_RECORDABLE);
mInputWindowSurface = new EglWindowSurface(mEglCore, mVideoEncoder.getInputSurface(), true);
mInputWindowSurface.makeCurrent();
mFullScreen = new EglViewport();
mTransformationScaleX = config.mScaleX;
mTransformationScaleY = config.mScaleY;
}
private void releaseEncoder() {
@ -216,6 +226,10 @@ class VideoTextureEncoder implements Runnable {
float[] transform = new float[16]; // TODO - avoid alloc every frame. Not easy, need a pool
st.getTransformMatrix(transform);
float translX = (1F - mTransformationScaleX) / 2F;
float translY = (1F - mTransformationScaleY) / 2F;
Matrix.translateM(transform, 0, translX, translY, 0);
Matrix.scaleM(transform, 0, mTransformationScaleX, mTransformationScaleY, 1);
long timestamp = st.getTimestamp();
if (timestamp == 0) {
// Seeing this after device is toggled off/on with power button. The
@ -291,10 +305,7 @@ class VideoTextureEncoder implements Runnable {
case MSG_START_RECORDING:
encoder.mFrameNum = 0;
Config config = (Config) obj;
encoder.prepareEncoder(config.mEglContext,
config.mWidth, config.mHeight,
config.mBitRate,
config.mOutputFile);
encoder.prepareEncoder(config);
break;
case MSG_STOP_RECORDING:
encoder.mVideoEncoder.drainEncoder(true);

@ -720,12 +720,25 @@ class Camera1 extends CameraController implements Camera.PreviewCallback, Camera
videoResult.codec = mVideoCodec;
videoResult.location = mLocation;
videoResult.rotation = offset(REF_SENSOR, REF_OUTPUT);
// What matters as size here is the preview size, which is passed to the GLCameraPreview
// surface texture, which is then passed to the encoder. The view size has no influence.
// CROPPING: We must make sure that the encoder applies our special transformation
// to the data so that it is cropped. In this case, we must CROP the preview size based
// on the view bounds, like below:
Size preview = getPreviewSize(REF_VIEW); // The preview stream size in REF_VIEW
Size view = mPreview.getOutputSurfaceSize(); // The view size in REF_VIEW
Rect crop = CropHelper.computeCrop(preview, AspectRatio.of(view.getWidth(), view.getHeight()));
Size cropSize = new Size(crop.width(), crop.height()); // The visible size in REF_VIEW
// Move the REF_VIEW size to REF_OUTPUT
videoResult.size = flip(REF_VIEW, REF_OUTPUT) ? cropSize.flip() : cropSize;
Size finalSize = flip(REF_VIEW, REF_OUTPUT) ? cropSize.flip() : cropSize; // Move the REF_VIEW size to REF_OUTPUT
// Without cropping (missing ATM), the actual size is the preview size with no crops.
// Passing a cropped size while the cropping is not implemented at the encoder surface level,
// would cause distortions and crashes in the video encoder.
// Size finalSize2 = getPreviewSize(REF_OUTPUT);
videoResult.size = finalSize;
videoResult.audio = mAudio;
videoResult.maxSize = mVideoMaxSize;
videoResult.maxDuration = mVideoMaxDuration;

@ -59,13 +59,21 @@ class MediaCodecVideoRecorder extends VideoRecorder implements GLCameraPreview.R
}
@Override
public void onRendererFrame(SurfaceTexture surfaceTexture) {
public void onRendererFrame(SurfaceTexture surfaceTexture, float scaleX, float scaleY) {
if (mCurrentState == STATE_NOT_RECORDING && mDesiredState == STATE_RECORDING) {
// Ensure width and height are divisible by 2, as I have read somewhere.
int width = mResult.size.getWidth();
int height = mResult.size.getHeight();
width = width % 2 == 0 ? width : width + 1;
height = height % 2 == 0 ? height : height + 1;
VideoTextureEncoder.Config configuration = new VideoTextureEncoder.Config(
mResult.file,
mResult.size.getWidth(),
mResult.size.getHeight(),
width,
height,
1000000,
30,
scaleX,
scaleY,
EGL14.eglGetCurrentContext()
);
mEncoder.startRecording(configuration);

@ -172,12 +172,20 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
}
if (mRendererFrameCallback != null) {
mRendererFrameCallback.onRendererFrame(mInputSurfaceTexture);
mRendererFrameCallback.onRendererFrame(mInputSurfaceTexture, mScaleX, mScaleY);
}
// Draw the video frame.
mInputSurfaceTexture.getTransformMatrix(mTransformMatrix);
if (isCropping()) {
// If the view is 10x1000 (very tall), it will show only the left strip of the preview (not the center one).
// If the view is 1000x10 (very large), it will show only the bottom strip of the preview (not the center one).
// We must use Matrix.translateM, and it must happen before the crop.
float translX = (1F - mScaleX) / 2F;
float translY = (1F - mScaleY) / 2F;
Matrix.translateM(mTransformMatrix, 0, translX, translY, 0);
// Crop. Works, but without translation, it is not centered.
Matrix.scaleM(mTransformMatrix, 0, mScaleX, mScaleY, 1);
}
mOutputViewport.drawFrame(mOutputTextureId, mTransformMatrix);
@ -336,7 +344,7 @@ class GLCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture> imple
void onRendererTextureCreated(int textureId);
// Renderer thread.
void onRendererFrame(SurfaceTexture surfaceTexture);
void onRendererFrame(SurfaceTexture surfaceTexture, float scaleX, float scaleY);
}
void setRendererFrameCallback(@Nullable RendererFrameCallback callback) {

@ -63,6 +63,7 @@ public enum Control {
int boundary = this == WIDTH ? root.getWidth() : root.getHeight();
if (boundary == 0) boundary = 1000;
int step = boundary / 10;
// list.add(this == WIDTH ? 12 : 16);
list.add(ViewGroup.LayoutParams.WRAP_CONTENT);
list.add(ViewGroup.LayoutParams.MATCH_PARENT);
for (int i = step; i < boundary; i += step) {

Loading…
Cancel
Save