Fix bugs, better logs and comments

pull/506/head
Mattia Iavarone 6 years ago
parent da6a0a6299
commit 10b7c3f2a3
  1. 19
      cameraview/src/main/java/com/otaliastudios/cameraview/preview/GlCameraPreview.java
  2. 3
      cameraview/src/main/java/com/otaliastudios/cameraview/video/FullVideoRecorder.java
  3. 42
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioMediaEncoder.java
  4. 1
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/MediaEncoder.java
  5. 33
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/TextureMediaEncoder.java
  6. 8
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/VideoMediaEncoder.java

@ -63,7 +63,7 @@ public class GlCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture
private int mOutputTextureId = 0; private int mOutputTextureId = 0;
private SurfaceTexture mInputSurfaceTexture; private SurfaceTexture mInputSurfaceTexture;
private EglViewport mOutputViewport; private EglViewport mOutputViewport;
private Set<RendererFrameCallback> mRendererFrameCallbacks = Collections.synchronizedSet(new HashSet<RendererFrameCallback>()); private final Set<RendererFrameCallback> mRendererFrameCallbacks = Collections.synchronizedSet(new HashSet<RendererFrameCallback>());
@VisibleForTesting float mCropScaleX = 1F; @VisibleForTesting float mCropScaleX = 1F;
@VisibleForTesting float mCropScaleY = 1F; @VisibleForTesting float mCropScaleY = 1F;
private View mRootView; private View mRootView;
@ -144,8 +144,11 @@ public class GlCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture
getView().queueEvent(new Runnable() { getView().queueEvent(new Runnable() {
@Override @Override
public void run() { public void run() {
for (RendererFrameCallback callback : mRendererFrameCallbacks) { // Need to synchronize when iterating the Collections.synchronizedSet
callback.onRendererTextureCreated(mOutputTextureId); synchronized (mRendererFrameCallbacks) {
for (RendererFrameCallback callback : mRendererFrameCallbacks) {
callback.onRendererTextureCreated(mOutputTextureId);
}
} }
} }
}); });
@ -202,11 +205,12 @@ public class GlCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture
Matrix.translateM(mTransformMatrix, 0, translX, translY, 0); Matrix.translateM(mTransformMatrix, 0, translX, translY, 0);
Matrix.scaleM(mTransformMatrix, 0, mCropScaleX, mCropScaleY, 1); Matrix.scaleM(mTransformMatrix, 0, mCropScaleX, mCropScaleY, 1);
} }
// Future note: passing scale to the viewport?
// They are scaleX an scaleY, but flipped based on mInputFlipped.
mOutputViewport.drawFrame(mOutputTextureId, mTransformMatrix); mOutputViewport.drawFrame(mOutputTextureId, mTransformMatrix);
for (RendererFrameCallback callback : mRendererFrameCallbacks) { synchronized (mRendererFrameCallbacks) {
callback.onRendererFrame(mInputSurfaceTexture, mCropScaleX, mCropScaleY); // Need to synchronize when iterating the Collections.synchronizedSet
for (RendererFrameCallback callback : mRendererFrameCallbacks) {
callback.onRendererFrame(mInputSurfaceTexture, mCropScaleX, mCropScaleY);
}
} }
} }
} }
@ -299,6 +303,7 @@ public class GlCameraPreview extends CameraPreview<GLSurfaceView, SurfaceTexture
* Creates the renderer for this GL surface. * Creates the renderer for this GL surface.
* @return the renderer for this GL surface * @return the renderer for this GL surface
*/ */
@SuppressWarnings("WeakerAccess")
@NonNull @NonNull
protected Renderer instantiateRenderer() { protected Renderer instantiateRenderer() {
return new Renderer(); return new Renderer();

@ -37,13 +37,12 @@ public abstract class FullVideoRecorder extends VideoRecorder {
super(listener); super(listener);
} }
@SuppressWarnings({"WeakerAccess", "UnusedReturnValue", "BooleanMethodIsAlwaysInverted"}) @SuppressWarnings({"WeakerAccess", "UnusedReturnValue"})
protected boolean prepareMediaRecorder(@NonNull VideoResult.Stub stub) { protected boolean prepareMediaRecorder(@NonNull VideoResult.Stub stub) {
if (mMediaRecorderPrepared) return true; if (mMediaRecorderPrepared) return true;
return onPrepareMediaRecorder(stub, new MediaRecorder()); return onPrepareMediaRecorder(stub, new MediaRecorder());
} }
@SuppressWarnings("WeakerAccess")
protected boolean onPrepareMediaRecorder(@NonNull VideoResult.Stub stub, @NonNull MediaRecorder mediaRecorder) { protected boolean onPrepareMediaRecorder(@NonNull VideoResult.Stub stub, @NonNull MediaRecorder mediaRecorder) {
mMediaRecorder = mediaRecorder; mMediaRecorder = mediaRecorder;
Size size = stub.rotation % 180 != 0 ? stub.size.flip() : stub.size; Size size = stub.rotation % 180 != 0 ? stub.size.flip() : stub.size;

@ -20,6 +20,8 @@ import androidx.annotation.RequiresApi;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
/** /**
@ -79,6 +81,10 @@ public class AudioMediaEncoder extends MediaEncoder {
super("AudioEncoder"); super("AudioEncoder");
mConfig = config.copy(); mConfig = config.copy();
mTimestamp = new AudioTimestamp(); mTimestamp = new AudioTimestamp();
// These two were in onPrepare() but it's better to do warm-up here
// since thread and looper creation is expensive.
mEncoder = new AudioEncodingHandler();
mRecorder = new AudioRecordingThread();
} }
@EncoderThread @EncoderThread
@ -97,8 +103,6 @@ public class AudioMediaEncoder extends MediaEncoder {
mMediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE); mMediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mMediaCodec.start(); mMediaCodec.start();
mByteBufferPool = new ByteBufferPool(FRAME_SIZE, BUFFER_POOL_MAX_SIZE); mByteBufferPool = new ByteBufferPool(FRAME_SIZE, BUFFER_POOL_MAX_SIZE);
mEncoder = new AudioEncodingHandler();
mRecorder = new AudioRecordingThread();
} }
@EncoderThread @EncoderThread
@ -200,12 +204,12 @@ public class AudioMediaEncoder extends MediaEncoder {
} }
/** /**
* Sleeps for a frame duration, to skip it. This can be used to slow down * Sleeps for some frames duration, to skip them. This can be used to slow down
* the recording operation to balance it with encoding. * the recording operation to balance it with encoding.
*/ */
private void sleep() { private void sleep() {
try { try {
Thread.sleep(AudioTimestamp.bytesToUs(FRAME_SIZE, BYTE_RATE) / 1000); Thread.sleep(AudioTimestamp.bytesToUs(FRAME_SIZE * 6, BYTE_RATE) / 1000);
} catch (InterruptedException ignore) {} } catch (InterruptedException ignore) {}
} }
@ -247,7 +251,15 @@ public class AudioMediaEncoder extends MediaEncoder {
super(WorkerHandler.get("AudioEncodingHandler").getLooper()); super(WorkerHandler.get("AudioEncodingHandler").getLooper());
} }
// Just to debug performance.
private int mSendCount = 0;
private int mExecuteCount = 0;
private long mAvgSendDelay = 0;
private long mAvgExecuteDelay = 0;
private Map<Long, Long> mSendStartMap = new HashMap<>();
private void sendInputBuffer(ByteBuffer buffer, long presentationTimeUs, boolean endOfStream) { private void sendInputBuffer(ByteBuffer buffer, long presentationTimeUs, boolean endOfStream) {
mSendStartMap.put(presentationTimeUs, System.nanoTime() / 1000000);
sendMessage(obtainMessage( sendMessage(obtainMessage(
endOfStream ? 1 : 0, endOfStream ? 1 : 0,
(int) (presentationTimeUs >> 32), (int) (presentationTimeUs >> 32),
@ -258,9 +270,19 @@ public class AudioMediaEncoder extends MediaEncoder {
@Override @Override
public void handleMessage(Message msg) { public void handleMessage(Message msg) {
super.handleMessage(msg); super.handleMessage(msg);
boolean endOfStream = msg.what == 1;
long timestamp = (((long) msg.arg1) << 32) | (((long) msg.arg2) & 0xffffffffL); long timestamp = (((long) msg.arg1) << 32) | (((long) msg.arg2) & 0xffffffffL);
boolean endOfStream = msg.what == 1;
LOG.i("encoding thread - got buffer. timestamp:", timestamp, "eos:", endOfStream); LOG.i("encoding thread - got buffer. timestamp:", timestamp, "eos:", endOfStream);
// Performance logging
long sendEnd = System.nanoTime() / 1000000;
//noinspection ConstantConditions
long sendStart = mSendStartMap.remove(timestamp);
mAvgSendDelay = ((mAvgSendDelay * mSendCount) + (sendEnd - sendStart)) / (++mSendCount);
LOG.v("send delay millis:", sendEnd - sendStart, "average:", mAvgSendDelay);
long executeStart = System.nanoTime() / 1000000;
// Actual work
ByteBuffer buffer = (ByteBuffer) msg.obj; ByteBuffer buffer = (ByteBuffer) msg.obj;
int readBytes = buffer.remaining(); int readBytes = buffer.remaining();
InputBuffer inputBuffer = mInputBufferPool.get(); InputBuffer inputBuffer = mInputBufferPool.get();
@ -282,6 +304,10 @@ public class AudioMediaEncoder extends MediaEncoder {
break; // Will try later. break; // Will try later.
} }
} }
long executeEnd = System.nanoTime() / 1000000;
mAvgExecuteDelay = ((mAvgExecuteDelay * mExecuteCount) + (executeEnd - executeStart)) / (++mExecuteCount);
LOG.v("execute delay millis:", executeEnd - executeStart, "average:", mAvgExecuteDelay);
} }
private void performPendingOp(InputBuffer buffer) { private void performPendingOp(InputBuffer buffer) {
@ -298,7 +324,11 @@ public class AudioMediaEncoder extends MediaEncoder {
// use an even smaller BUFFER_POOL_MAX_SIZE without losing audio frames. But this way // use an even smaller BUFFER_POOL_MAX_SIZE without losing audio frames. But this way
// we can accumulate delay on this new thread without noticing (no pool getting empty). // we can accumulate delay on this new thread without noticing (no pool getting empty).
drainOutput(eos); drainOutput(eos);
if (eos) WorkerHandler.get("AudioEncodingHandler").getThread().interrupt(); if (eos) {
// Not sure we want this: WorkerHandler.get("AudioEncodingHandler").getThread().interrupt();
LOG.e("EXECUTE DELAY MILLIS:", mAvgExecuteDelay);
LOG.e("SEND DELAY MILLIS:", mAvgSendDelay);
}
} }
} }
} }

@ -399,6 +399,7 @@ abstract class MediaEncoder {
// detect the mMaxLengthReached and stop recording. // detect the mMaxLengthReached and stop recording.
if (mStartPresentationTimeUs == Long.MIN_VALUE) { if (mStartPresentationTimeUs == Long.MIN_VALUE) {
mStartPresentationTimeUs = mBufferInfo.presentationTimeUs; mStartPresentationTimeUs = mBufferInfo.presentationTimeUs;
LOG.w(mName, "DRAINING - Got the first presentation time:", mStartPresentationTimeUs);
} }
mLastPresentationTimeUs = mBufferInfo.presentationTimeUs; mLastPresentationTimeUs = mBufferInfo.presentationTimeUs;
// Pass presentation times as offets with respect to the mStartPresentationTimeUs. // Pass presentation times as offets with respect to the mStartPresentationTimeUs.

@ -103,18 +103,27 @@ public class TextureMediaEncoder extends VideoMediaEncoder<TextureMediaEncoder.C
void onEvent(@NonNull String event, @Nullable Object data) { void onEvent(@NonNull String event, @Nullable Object data) {
if (!event.equals(FRAME_EVENT)) return; if (!event.equals(FRAME_EVENT)) return;
TextureFrame frame = (TextureFrame) data; TextureFrame frame = (TextureFrame) data;
if (frame == null) return; // Should not happen if (frame == null) {
if (frame.timestamp == 0 || mFrameNum < 0) { throw new IllegalArgumentException("Got null frame for FRAME_EVENT.");
// The first condition comes from grafika. }
// The second condition means we were asked to stop. if (frame.timestamp == 0) { // grafika
mFramePool.recycle(frame); mFramePool.recycle(frame);
return; return;
} }
if (mFrameNumber < 0) { // We were asked to stop.
mFramePool.recycle(frame);
return;
}
mFrameNumber++;
// First, drain any previous data.
LOG.i("onEvent", "frameNumber:", mFrameNumber, "timestamp:", frame.timestamp, "- draining.");
drainOutput(false);
// Then draw on the surface.
LOG.i("onEvent", "frameNumber:", mFrameNumber, "timestamp:", frame.timestamp, "- drawing.");
mFrameNum++; // 1. We must scale this matrix like GlCameraPreview does, because it might have some cropping.
int thisFrameNum = mFrameNum;
LOG.v("onEvent", "frameNum:", thisFrameNum, "realFrameNum:", mFrameNum, "timestamp:", frame.timestamp);
// We must scale this matrix like GlCameraPreview does, because it might have some cropping.
// Scaling takes place with respect to the (0, 0, 0) point, so we must apply a Translation to compensate. // Scaling takes place with respect to the (0, 0, 0) point, so we must apply a Translation to compensate.
float[] transform = frame.transform; float[] transform = frame.transform;
float[] overlayTransform = frame.overlayTransform; float[] overlayTransform = frame.overlayTransform;
@ -125,7 +134,7 @@ public class TextureMediaEncoder extends VideoMediaEncoder<TextureMediaEncoder.C
Matrix.translateM(transform, 0, scaleTranslX, scaleTranslY, 0); Matrix.translateM(transform, 0, scaleTranslX, scaleTranslY, 0);
Matrix.scaleM(transform, 0, scaleX, scaleY, 1); Matrix.scaleM(transform, 0, scaleX, scaleY, 1);
// We also must rotate this matrix. In GlCameraPreview it is not needed because it is a live // 2. We also must rotate this matrix. In GlCameraPreview it is not needed because it is a live
// stream, but the output video, must be correctly rotated based on the device rotation at the moment. // stream, but the output video, must be correctly rotated based on the device rotation at the moment.
// Rotation also takes place with respect to the origin (the Z axis), so we must // Rotation also takes place with respect to the origin (the Z axis), so we must
// translate to origin, rotate, then back to where we were. // translate to origin, rotate, then back to where we were.
@ -133,21 +142,17 @@ public class TextureMediaEncoder extends VideoMediaEncoder<TextureMediaEncoder.C
Matrix.rotateM(transform, 0, mTransformRotation, 0, 0, 1); Matrix.rotateM(transform, 0, mTransformRotation, 0, 0, 1);
Matrix.translateM(transform, 0, -0.5F, -0.5F, 0); Matrix.translateM(transform, 0, -0.5F, -0.5F, 0);
// 3. Do the same for overlays with their own rotation.
boolean hasOverlay = mConfig.overlayTextureId != NO_TEXTURE; boolean hasOverlay = mConfig.overlayTextureId != NO_TEXTURE;
if (hasOverlay) { if (hasOverlay) {
Matrix.translateM(overlayTransform, 0, 0.5F, 0.5F, 0); Matrix.translateM(overlayTransform, 0, 0.5F, 0.5F, 0);
Matrix.rotateM(overlayTransform, 0, mConfig.overlayRotation, 0, 0, 1); Matrix.rotateM(overlayTransform, 0, mConfig.overlayRotation, 0, 0, 1);
Matrix.translateM(overlayTransform, 0, -0.5F, -0.5F, 0); Matrix.translateM(overlayTransform, 0, -0.5F, -0.5F, 0);
} }
LOG.v("onEvent", "frameNum:", thisFrameNum, "realFrameNum:", mFrameNum, "calling drainOutput.");
drainOutput(false);
LOG.v("onEvent", "frameNum:", thisFrameNum, "realFrameNum:", mFrameNum, "calling drawFrame.");
mViewport.drawFrame(mConfig.textureId, transform); mViewport.drawFrame(mConfig.textureId, transform);
if (hasOverlay) { if (hasOverlay) {
mViewport.drawFrame(mConfig.overlayTextureId, overlayTransform); mViewport.drawFrame(mConfig.overlayTextureId, overlayTransform);
} }
mWindow.setPresentationTime(frame.timestamp); mWindow.setPresentationTime(frame.timestamp);
mWindow.swapBuffers(); mWindow.swapBuffers();
mFramePool.recycle(frame); mFramePool.recycle(frame);

@ -41,7 +41,7 @@ abstract class VideoMediaEncoder<C extends VideoMediaEncoder.Config> extends Med
protected Surface mSurface; protected Surface mSurface;
@SuppressWarnings("WeakerAccess") @SuppressWarnings("WeakerAccess")
protected int mFrameNum = -1; protected int mFrameNumber = -1;
protected static class Config { protected static class Config {
public int width; public int width;
@ -95,14 +95,14 @@ abstract class VideoMediaEncoder<C extends VideoMediaEncoder.Config> extends Med
@Override @Override
void onStart() { void onStart() {
// Nothing to do here. Waiting for the first frame. // Nothing to do here. Waiting for the first frame.
mFrameNum = 0; mFrameNumber = 0;
} }
@EncoderThread @EncoderThread
@Override @Override
void onStop() { void onStop() {
LOG.i("onStop", "setting mFrameNum to 1 and signaling the end of input stream."); LOG.i("onStop", "setting mFrameNumber to 1 and signaling the end of input stream.");
mFrameNum = -1; mFrameNumber = -1;
// Signals the end of input stream. This is a Video only API, as in the normal case, // Signals the end of input stream. This is a Video only API, as in the normal case,
// we use input buffers to signal the end. In the video case, we don't have input buffers // we use input buffers to signal the end. In the video case, we don't have input buffers
// because we use an input surface instead. // because we use an input surface instead.

Loading…
Cancel
Save