Improve snapshot recording (#374)

* Timestamp changes

* Revisit Audio encoding, create object pools

* Use a Pool for float[] arrays

* Remove unused audioBitRate from audio encoder

* Fix demo app video duration

* Correctly release pools

* Restore output bitrate

* Release textureFrame pool
pull/402/head
Mattia Iavarone 6 years ago committed by GitHub
parent aec17d3e49
commit 7411614433
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 283
      cameraview/src/main/gles/com/otaliastudios/cameraview/AudioMediaEncoder.java
  2. 15
      cameraview/src/main/gles/com/otaliastudios/cameraview/ByteBufferPool.java
  3. 1
      cameraview/src/main/gles/com/otaliastudios/cameraview/EglBaseSurface.java
  4. 1
      cameraview/src/main/gles/com/otaliastudios/cameraview/EglCore.java
  5. 12
      cameraview/src/main/gles/com/otaliastudios/cameraview/InputBuffer.java
  6. 15
      cameraview/src/main/gles/com/otaliastudios/cameraview/InputBufferPool.java
  7. 50
      cameraview/src/main/gles/com/otaliastudios/cameraview/MediaCodecBuffers.java
  8. 311
      cameraview/src/main/gles/com/otaliastudios/cameraview/MediaEncoder.java
  9. 233
      cameraview/src/main/gles/com/otaliastudios/cameraview/MediaEncoderEngine.java
  10. 11
      cameraview/src/main/gles/com/otaliastudios/cameraview/OutputBuffer.java
  11. 18
      cameraview/src/main/gles/com/otaliastudios/cameraview/OutputBufferPool.java
  12. 89
      cameraview/src/main/gles/com/otaliastudios/cameraview/Pool.java
  13. 142
      cameraview/src/main/gles/com/otaliastudios/cameraview/TextureMediaEncoder.java
  14. 19
      cameraview/src/main/gles/com/otaliastudios/cameraview/VideoMediaEncoder.java
  15. 25
      cameraview/src/main/java/com/otaliastudios/cameraview/SnapshotVideoRecorder.java
  16. 1
      cameraview/src/main/java/com/otaliastudios/cameraview/VideoRecorder.java
  17. 10
      cameraview/src/main/utils/com/otaliastudios/cameraview/WorkerHandler.java
  18. 1
      demo/src/main/java/com/otaliastudios/cameraview/demo/CameraActivity.java

@ -1,34 +1,64 @@
package com.otaliastudios.cameraview;
import android.annotation.SuppressLint;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.AudioTimestamp;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.media.MediaRecorder;
import android.os.Build;
import android.os.Handler;
import android.os.Message;
import android.util.Log;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RequiresApi;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.LinkedBlockingQueue;
// TODO create onVideoRecordingStart/onVideoRecordingEnd callbacks
@RequiresApi(api = Build.VERSION_CODES.JELLY_BEAN_MR2)
class AudioMediaEncoder extends MediaEncoder {
private static final String TAG = AudioMediaEncoder.class.getSimpleName();
private static final CameraLogger LOG = CameraLogger.create(TAG);
private static final String MIME_TYPE = "audio/mp4a-latm";
private static final int SAMPLE_RATE = 44100; // 44.1[KHz] is only setting guaranteed to be available on all devices.
public static final int SAMPLES_PER_FRAME = 1024; // AAC, bytes/frame/channel
public static final int FRAMES_PER_BUFFER = 25; // AAC, frame/buffer/sec
private static final int ENCODING = AudioFormat.ENCODING_PCM_16BIT; // Determines the SAMPLE_SIZE
private static final int CHANNELS = AudioFormat.CHANNEL_IN_MONO; // AudioFormat.CHANNEL_IN_STEREO;
// The 44.1KHz frequency is the only setting guaranteed to be available on all devices.
private static final int SAMPLING_FREQUENCY = 44100; // samples/sec
private static final int CHANNELS_COUNT = 1; // 2;
private static final int SAMPLE_SIZE = 2; // byte/sample/channel
private static final int BYTE_RATE_PER_CHANNEL = SAMPLING_FREQUENCY * SAMPLE_SIZE; // byte/sec/channel
private static final int BYTE_RATE = BYTE_RATE_PER_CHANNEL * CHANNELS_COUNT; // byte/sec
static final int BIT_RATE = BYTE_RATE * 8; // bit/sec
// We call FRAME here the chunk of data that we want to read at each loop cycle
private static final int FRAME_SIZE_PER_CHANNEL = 1024; // bytes/frame/channel [AAC constant]
private static final int FRAME_SIZE = FRAME_SIZE_PER_CHANNEL * CHANNELS_COUNT; // bytes/frame
// We allocate buffers of 1KB each, which is not so much. I would say that allocating
// at most 200 of them is a reasonable value. With the current setup, in device tests,
// we manage to use 50 at most.
private static final int BUFFER_POOL_MAX_SIZE = 200;
private final Object mLock = new Object();
private boolean mRequestStop = false;
private AudioEncodingHandler mEncoder;
private AudioRecordingThread mRecorder;
private ByteBufferPool mByteBufferPool;
private Config mConfig;
static class Config {
int bitRate;
Config(int bitRate) {
this.bitRate = bitRate;
}
@ -38,15 +68,20 @@ class AudioMediaEncoder extends MediaEncoder {
mConfig = config;
}
@NonNull
@Override
String getName() {
return "AudioEncoder";
}
@EncoderThread
@Override
void prepare(@NonNull MediaEncoderEngine.Controller controller, long maxLengthMillis) {
super.prepare(controller, maxLengthMillis);
final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE, SAMPLE_RATE, 1);
void onPrepare(@NonNull MediaEncoderEngine.Controller controller, long maxLengthMillis) {
final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE, SAMPLING_FREQUENCY, CHANNELS_COUNT);
audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, AudioFormat.CHANNEL_IN_MONO);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, CHANNELS);
audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, mConfig.bitRate);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 1);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, CHANNELS_COUNT);
try {
mMediaCodec = MediaCodec.createEncoderByType(MIME_TYPE);
} catch (IOException e) {
@ -54,86 +89,228 @@ class AudioMediaEncoder extends MediaEncoder {
}
mMediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mMediaCodec.start();
mByteBufferPool = new ByteBufferPool(FRAME_SIZE, BUFFER_POOL_MAX_SIZE);
mEncoder = new AudioEncodingHandler();
mRecorder = new AudioRecordingThread();
}
@EncoderThread
@Override
void start() {
void onStart() {
mRequestStop = false;
new AudioThread().start();
mRecorder.start();
}
@EncoderThread
@Override
void notify(@NonNull String event, @Nullable Object data) { }
void onEvent(@NonNull String event, @Nullable Object data) { }
@EncoderThread
@Override
void stop() {
void onStop() {
mRequestStop = true;
synchronized (mLock) {
try {
mLock.wait();
} catch (InterruptedException e) {
// do nothing
}
}
}
@Override
void release() {
super.release();
void onRelease() {
mRequestStop = false;
mEncoder = null;
mRecorder = null;
if (mByteBufferPool != null) {
mByteBufferPool.clear();
mByteBufferPool = null;
}
}
class AudioThread extends Thread {
@Override
int getEncodedBitRate() {
return mConfig.bitRate;
}
class AudioRecordingThread extends Thread {
private AudioRecord mAudioRecord;
private ByteBuffer mCurrentBuffer;
private int mReadBytes;
private long mLastTimeUs;
AudioThread() {
final int minBufferSize = AudioRecord.getMinBufferSize(
SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
int bufferSize = SAMPLES_PER_FRAME * FRAMES_PER_BUFFER;
if (bufferSize < minBufferSize) {
bufferSize = ((minBufferSize / SAMPLES_PER_FRAME) + 1) * SAMPLES_PER_FRAME * 2;
AudioRecordingThread() {
final int minBufferSize = AudioRecord.getMinBufferSize(SAMPLING_FREQUENCY, CHANNELS, ENCODING);
int bufferSize = FRAME_SIZE * 25; // Make this bigger so we don't skip frames.
while (bufferSize < minBufferSize) {
bufferSize += FRAME_SIZE; // Unlikely I think.
}
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.CAMCORDER, SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.CAMCORDER,
SAMPLING_FREQUENCY, CHANNELS, ENCODING, bufferSize);
setPriority(Thread.MAX_PRIORITY);
}
@Override
public void run() {
super.run();
mLastTimeUs = System.nanoTime() / 1000L;
mAudioRecord.startRecording();
final ByteBuffer buffer = ByteBuffer.allocateDirect(SAMPLES_PER_FRAME);
int readBytes;
while (!mRequestStop) {
buffer.clear();
readBytes = mAudioRecord.read(buffer, SAMPLES_PER_FRAME);
if (readBytes > 0) {
// set audio data to encoder
buffer.position(readBytes);
buffer.flip();
encode(buffer, readBytes, getPresentationTime());
drain(false);
}
read(false);
}
// This will signal the endOfStream.
LOG.w("RECORDER: Stop was requested. We're out of the loop. Will post an endOfStream.");
// Last input with 0 length. This will signal the endOfStream.
// Can't use drain(true); it is only available when writing to the codec InputSurface.
encode(null, 0, getPresentationTime());
drain(false);
read(true);
mAudioRecord.stop();
mAudioRecord.release();
mAudioRecord = null;
synchronized (mLock) {
mLock.notify();
}
private void read(boolean endOfStream) {
mCurrentBuffer = mByteBufferPool.get();
if (mCurrentBuffer == null) {
LOG.e("Skipping audio frame, encoding is too slow.");
// TODO should fix the next presentation time here. However this is
// extremely unlikely based on my tests. The mByteBufferPool should be big enough.
} else {
mCurrentBuffer.clear();
mReadBytes = mAudioRecord.read(mCurrentBuffer, FRAME_SIZE);
if (mReadBytes > 0) { // Good read: increase PTS.
increaseTime(mReadBytes);
mCurrentBuffer.limit(mReadBytes);
onBuffer(endOfStream);
} else if (mReadBytes == AudioRecord.ERROR_INVALID_OPERATION) {
LOG.e("Got AudioRecord.ERROR_INVALID_OPERATION");
} else if (mReadBytes == AudioRecord.ERROR_BAD_VALUE) {
LOG.e("Got AudioRecord.ERROR_BAD_VALUE");
}
}
}
/**
* New data at position buffer.position() of size buffer.remaining()
* has been written into this buffer. This method should pass the data
* to the consumer.
*/
private void onBuffer(boolean endOfStream) {
mEncoder.sendInputBuffer(mCurrentBuffer, mLastTimeUs, endOfStream);
}
private void increaseTime(int readBytes) {
increaseTime3(readBytes);
LOG.v("Read", readBytes, "bytes, increasing PTS to", mLastTimeUs);
}
/**
* This method simply assumes that we read everything without losing a single US.
* It will use System.nanoTime() just once, as the starting point.
* Of course we don't as there are things going on in this thread.
*/
private void increaseTime1(int readBytes) {
mLastTimeUs += (1000000L * readBytes) / BYTE_RATE;
}
/**
* Just for testing, this method will use Api 24 method to retrieve the timestamp.
* This way we let the platform choose instead of making assumptions.
*/
@RequiresApi(24)
private void increaseTime2(int readBytes) {
if (mApi24Timestamp == null) {
mApi24Timestamp = new AudioTimestamp();
}
mAudioRecord.getTimestamp(mApi24Timestamp, AudioTimestamp.TIMEBASE_MONOTONIC);
mLastTimeUs = mApi24Timestamp.nanoTime / 1000;
}
private AudioTimestamp mApi24Timestamp;
/**
* This method looks like an improvement over {@link #increaseTime1(int)} as it
* accounts for the current time as well. Adapted & improved. from Kickflip.
*/
private void increaseTime3(int readBytes) {
long currentTime = System.nanoTime() / 1000;
long correctedTime;
long bufferDuration = (1000000 * readBytes) / BYTE_RATE;
long bufferTime = currentTime - bufferDuration; // delay of acquiring the audio buffer
if (mTotalReadBytes == 0) {
mStartTimeUs = bufferTime;
}
// Recompute time assuming that we are respecting the sampling frequency.
// However, if the correction is too big (> 2*bufferDuration), reset to this point.
correctedTime = mStartTimeUs + (1000000 * mTotalReadBytes) / BYTE_RATE;
if(bufferTime - correctedTime >= 2 * bufferDuration) {
mStartTimeUs = bufferTime;
mTotalReadBytes = 0;
correctedTime = mStartTimeUs;
}
mTotalReadBytes += readBytes;
mLastTimeUs = correctedTime;
}
private long mStartTimeUs;
private long mTotalReadBytes;
}
@Override
int getBitRate() {
return mConfig.bitRate;
/**
* This will be a super busy thread. It's important for it to be:
* - different than the recording thread: or we would miss a lot of audio
* - different than the 'encoder' thread: we want that to be reactive.
* For example, a stop() must become onStop() soon, can't wait for all this draining.
*/
@SuppressLint("HandlerLeak")
class AudioEncodingHandler extends Handler {
InputBufferPool mInputBufferPool = new InputBufferPool();
LinkedBlockingQueue<InputBuffer> mPendingOps = new LinkedBlockingQueue<>();
AudioEncodingHandler() {
super(WorkerHandler.get("AudioEncodingHandler").getLooper());
}
void sendInputBuffer(ByteBuffer buffer, long presentationTimeUs, boolean endOfStream) {
int presentation1 = (int) (presentationTimeUs >> 32);
int presentation2 = (int) (presentationTimeUs);
sendMessage(obtainMessage(endOfStream ? 1 : 0, presentation1, presentation2, buffer));
}
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
boolean endOfStream = msg.what == 1;
long timestamp = (((long) msg.arg1) << 32) | (((long) msg.arg2) & 0xffffffffL);
ByteBuffer buffer = (ByteBuffer) msg.obj;
int readBytes = buffer.remaining();
InputBuffer inputBuffer = mInputBufferPool.get();
inputBuffer.source = buffer;
inputBuffer.timestamp = timestamp;
inputBuffer.length = readBytes;
inputBuffer.isEndOfStream = endOfStream;
mPendingOps.add(inputBuffer);
performPendingOps(endOfStream);
}
private void performPendingOps(boolean force) {
LOG.v("Performing", mPendingOps.size(), "Pending operations.");
InputBuffer buffer;
while ((buffer = mPendingOps.peek()) != null) {
if (force) {
acquireInputBuffer(buffer);
performPendingOp(buffer);
} else if (tryAcquireInputBuffer(buffer)) {
performPendingOp(buffer);
} else {
break; // Will try later.
}
}
}
private void performPendingOp(InputBuffer buffer) {
buffer.data.put(buffer.source);
mByteBufferPool.recycle(buffer.source);
mPendingOps.remove(buffer);
encodeInputBuffer(buffer);
boolean eos = buffer.isEndOfStream;
mInputBufferPool.recycle(buffer);
drainOutput(eos);
if (eos) {
mInputBufferPool.clear();
WorkerHandler.get("AudioEncodingHandler").getThread().interrupt();
}
}
}
}

@ -0,0 +1,15 @@
package com.otaliastudios.cameraview;
import java.nio.ByteBuffer;
class ByteBufferPool extends Pool<ByteBuffer> {
ByteBufferPool(final int bufferSize, int maxPoolSize) {
super(maxPoolSize, new Factory<ByteBuffer>() {
@Override
public ByteBuffer create() {
return ByteBuffer.allocateDirect(bufferSize);
}
});
}
}

@ -151,6 +151,7 @@ class EglBaseSurface extends EglElement {
/**
* Sends the presentation time stamp to EGL.
* https://www.khronos.org/registry/EGL/extensions/ANDROID/EGL_ANDROID_presentation_time.txt
*
* @param nsecs Timestamp, in nanoseconds.
*/

@ -314,6 +314,7 @@ final class EglCore {
/**
* Sends the presentation time stamp to EGL. Time is expressed in nanoseconds.
* https://www.khronos.org/registry/EGL/extensions/ANDROID/EGL_ANDROID_presentation_time.txt
*/
public void setPresentationTime(EGLSurface eglSurface, long nsecs) {
EGLExt.eglPresentationTimeANDROID(mEGLDisplay, eglSurface, nsecs);

@ -0,0 +1,12 @@
package com.otaliastudios.cameraview;
import java.nio.ByteBuffer;
class InputBuffer {
ByteBuffer data;
ByteBuffer source;
int index;
int length;
long timestamp;
boolean isEndOfStream;
}

@ -0,0 +1,15 @@
package com.otaliastudios.cameraview;
import java.nio.ByteBuffer;
class InputBufferPool extends Pool<InputBuffer> {
InputBufferPool() {
super(Integer.MAX_VALUE, new Factory<InputBuffer>() {
@Override
public InputBuffer create() {
return new InputBuffer();
}
});
}
}

@ -0,0 +1,50 @@
package com.otaliastudios.cameraview;
import android.media.MediaCodec;
import android.os.Build;
import java.nio.ByteBuffer;
/**
* A Wrapper to MediaCodec that facilitates the use of API-dependent get{Input/Output}Buffer methods,
* in order to prevent: http://stackoverflow.com/q/30646885
*/
class MediaCodecBuffers {
private final MediaCodec mMediaCodec;
private final ByteBuffer[] mInputBuffers;
private ByteBuffer[] mOutputBuffers;
MediaCodecBuffers(MediaCodec mediaCodec) {
mMediaCodec = mediaCodec;
if (Build.VERSION.SDK_INT < 21) {
mInputBuffers = mediaCodec.getInputBuffers();
mOutputBuffers = mediaCodec.getOutputBuffers();
} else {
mInputBuffers = mOutputBuffers = null;
}
}
public ByteBuffer getInputBuffer(final int index) {
if (Build.VERSION.SDK_INT >= 21) {
return mMediaCodec.getInputBuffer(index);
}
ByteBuffer buffer = mInputBuffers[index];
buffer.clear();
return buffer;
}
public ByteBuffer getOutputBuffer(final int index) {
if (Build.VERSION.SDK_INT >= 21) {
return mMediaCodec.getOutputBuffer(index);
}
return mOutputBuffers[index];
}
public void onOutputBuffersChanged() {
if (Build.VERSION.SDK_INT < 21) {
mOutputBuffers = mMediaCodec.getOutputBuffers();
}
}
}

@ -1,8 +1,10 @@
package com.otaliastudios.cameraview;
import android.annotation.SuppressLint;
import android.media.MediaCodec;
import android.media.MediaFormat;
import android.os.Build;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RequiresApi;
@ -14,17 +16,107 @@ import java.nio.ByteBuffer;
@RequiresApi(api = Build.VERSION_CODES.JELLY_BEAN_MR2)
abstract class MediaEncoder {
private final static int TIMEOUT_USEC = 10000; // 10 msec
private final static String TAG = MediaEncoder.class.getSimpleName();
private final static CameraLogger LOG = CameraLogger.create(TAG);
// Did some test to see which value would maximize our performance in the current setup (infinite audio pool).
// Measured the time it would take to write a 30 seconds video. Based on this, we'll go with TIMEOUT=0 for now.
// INPUT_TIMEOUT_US 10000: 46 seconds
// INPUT_TIMEOUT_US 1000: 37 seconds
// INPUT_TIMEOUT_US 100: 33 seconds
// INPUT_TIMEOUT_US 0: 32 seconds
private final static int INPUT_TIMEOUT_US = 0;
// 0 also seems to be the best, although it does not change so much.
// Can't go too high or this is a bottleneck for the audio encoder.
private final static int OUTPUT_TIMEOUT_US = 0;
@SuppressWarnings("WeakerAccess")
protected MediaCodec mMediaCodec;
private MediaCodec.BufferInfo mBufferInfo;
@SuppressWarnings("WeakerAccess")
protected WorkerHandler mWorker;
private MediaEncoderEngine.Controller mController;
private int mTrackIndex;
private OutputBufferPool mOutputBufferPool;
private MediaCodec.BufferInfo mBufferInfo;
private MediaCodecBuffers mBuffers;
private long mMaxLengthMillis;
private boolean mMaxLengthReached;
/**
* A readable name for the thread.
*/
@NonNull
abstract String getName();
/**
* This encoder was attached to the engine. Keep the controller
* and run the internal thread.
*/
final void prepare(@NonNull final MediaEncoderEngine.Controller controller, final long maxLengthMillis) {
mController = controller;
mBufferInfo = new MediaCodec.BufferInfo();
mMaxLengthMillis = maxLengthMillis;
mWorker = WorkerHandler.get(getName());
LOG.i(getName(), "Prepare was called. Posting.");
mWorker.post(new Runnable() {
@Override
public void run() {
LOG.i(getName(), "Prepare was called. Executing.");
onPrepare(controller, maxLengthMillis);
}
});
}
/**
* Start recording. This might be a lightweight operation
* in case the encoder needs to wait for a certain event
* like a "frame available".
*/
final void start() {
LOG.i(getName(), "Start was called. Posting.");
mWorker.post(new Runnable() {
@Override
public void run() {
LOG.i(getName(), "Start was called. Executing.");
onStart();
}
});
}
/**
* The caller notifying of a certain event occurring.
* Should analyze the string and see if the event is important.
* @param event what happened
* @param data object
*/
final void notify(final @NonNull String event, final @Nullable Object data) {
LOG.i(getName(), "Notify was called. Posting.");
mWorker.post(new Runnable() {
@Override
public void run() {
LOG.i(getName(), "Notify was called. Executing.");
onEvent(event, data);
}
});
}
/**
* Stop recording.
*/
final void stop() {
LOG.i(getName(), "Stop was called. Posting.");
mWorker.post(new Runnable() {
@Override
public void run() {
LOG.i(getName(), "Stop was called. Executing.");
onStop();
}
});
}
/**
* Called to prepare this encoder before starting.
* Any initialization should be done here as it does not interfere with the original
@ -33,13 +125,10 @@ abstract class MediaEncoder {
* At this point subclasses MUST create the {@link #mMediaCodec} object.
*
* @param controller the muxer controller
* @param maxLengthMillis the maxLength in millis
*/
@EncoderThread
void prepare(@NonNull MediaEncoderEngine.Controller controller, long maxLengthMillis) {
mController = controller;
mBufferInfo = new MediaCodec.BufferInfo();
mMaxLengthMillis = maxLengthMillis;
}
abstract void onPrepare(@NonNull final MediaEncoderEngine.Controller controller, final long maxLengthMillis);
/**
* Start recording. This might be a lightweight operation
@ -47,7 +136,7 @@ abstract class MediaEncoder {
* like a "frame available".
*/
@EncoderThread
abstract void start();
abstract void onStart();
/**
* The caller notifying of a certain event occurring.
@ -56,97 +145,130 @@ abstract class MediaEncoder {
* @param data object
*/
@EncoderThread
abstract void notify(@NonNull String event, @Nullable Object data);
abstract void onEvent(@NonNull String event, @Nullable Object data);
/**
* Stop recording.
* This MUST happen SYNCHRONOUSLY!
*/
@EncoderThread
abstract void stop();
abstract void onStop();
/**
* Release resources here.
* Called by {@link #drainOutput(boolean)} when we get an EOS signal (not necessarily in the
* parameters, might also be through an input buffer flag).
*/
@EncoderThread
void release() {
if (mMediaCodec != null) {
mMediaCodec.stop();
mMediaCodec.release();
mMediaCodec = null;
private void release() {
LOG.w("Subclass", getName(), "Notified that it is released.");
mController.requestRelease(mTrackIndex);
mMediaCodec.stop();
mMediaCodec.release();
mMediaCodec = null;
mOutputBufferPool.clear();
mOutputBufferPool = null;
mBuffers = null;
onRelease();
}
/**
* This is called when we are stopped.
* It is a good moment to release all resources, although the muxer
* might still be alive (we wait for the other Encoder, see Controller).
*/
abstract void onRelease();
/**
* Returns a new input buffer and index, waiting at most {@link #INPUT_TIMEOUT_US} if none is available.
* Callers should check the boolean result - true if the buffer was filled.
*/
@SuppressWarnings("WeakerAccess")
protected boolean tryAcquireInputBuffer(@NonNull InputBuffer holder) {
if (mBuffers == null) {
mBuffers = new MediaCodecBuffers(mMediaCodec);
}
int inputBufferIndex = mMediaCodec.dequeueInputBuffer(INPUT_TIMEOUT_US);
if (inputBufferIndex < 0) {
return false;
} else {
holder.index = inputBufferIndex;
holder.data = mBuffers.getInputBuffer(inputBufferIndex);
return true;
}
}
/**
* Returns a new input buffer and index, waiting indefinitely if none is available.
* The buffer should be written into, then the index should be passed to {@link #encodeInputBuffer(InputBuffer)}.
*/
@SuppressWarnings({"StatementWithEmptyBody", "WeakerAccess"})
protected void acquireInputBuffer(@NonNull InputBuffer holder) {
while (!tryAcquireInputBuffer(holder)) {}
}
/**
* Encode data into the {@link #mMediaCodec}.
*/
@SuppressWarnings("WeakerAccess")
protected void encode(@Nullable final ByteBuffer buffer, final int length, final long presentationTimeUs) {
final ByteBuffer[] inputBuffers = mMediaCodec.getInputBuffers();
while (true) {
final int inputBufferIndex = mMediaCodec.dequeueInputBuffer(TIMEOUT_USEC);
if (inputBufferIndex >= 0) {
final ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
if (buffer != null) {
inputBuffer.put(buffer);
}
if (length <= 0) { // send EOS
mMediaCodec.queueInputBuffer(inputBufferIndex, 0, 0,
presentationTimeUs, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
} else {
mMediaCodec.queueInputBuffer(inputBufferIndex, 0, length,
presentationTimeUs, 0);
}
break;
} else if (inputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
// wait for MediaCodec encoder is ready to encode
// nothing to do here because MediaCodec#dequeueInputBuffer(TIMEOUT_USEC)
// will wait for maximum TIMEOUT_USEC(10msec) on each call
}
protected void encodeInputBuffer(InputBuffer buffer) {
LOG.w("ENCODING:", getName(), "Buffer:", buffer.index, "Bytes:", buffer.length, "Presentation:", buffer.timestamp);
if (buffer.isEndOfStream) { // send EOS
mMediaCodec.queueInputBuffer(buffer.index, 0, 0,
buffer.timestamp, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
} else {
mMediaCodec.queueInputBuffer(buffer.index, 0, buffer.length,
buffer.timestamp, 0);
}
}
/**
* Signals the end of input stream. This is a Video only API, as in the normal case,
* we use input buffers to signal the end. In the video case, we don't have input buffers
* because we use an input surface instead.
*/
@SuppressWarnings("WeakerAccess")
protected void signalEndOfInputStream() {
mMediaCodec.signalEndOfInputStream();
}
/**
* Extracts all pending data that was written and encoded into {@link #mMediaCodec},
* and forwards it to the muxer.
* <p>
* If endOfStream is not set, this returns when there is no more data to drain. If it
* is set, we send EOS to the encoder, and then iterate until we see EOS on the output.
* Calling this with endOfStream set should be done once, right before stopping the muxer.
*
* If drainAll is not set, this returns after TIMEOUT_USEC if there is no more data to drain.
* If drainAll is set, we wait until we see EOS on the output.
* Calling this with drainAll set should be done once, right before stopping the muxer.
*/
@SuppressLint("LogNotTimber")
@SuppressWarnings("WeakerAccess")
protected void drain(boolean endOfStream) {
if (endOfStream) {
mMediaCodec.signalEndOfInputStream();
protected void drainOutput(boolean drainAll) {
LOG.w("DRAINING:", getName(), "EOS:", drainAll);
if (mMediaCodec == null) {
LOG.e("drain() was called before prepare() or after releasing.");
return;
}
if (mBuffers == null) {
mBuffers = new MediaCodecBuffers(mMediaCodec);
}
ByteBuffer[] encoderOutputBuffers = mMediaCodec.getOutputBuffers();
while (true) {
int encoderStatus = mMediaCodec.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
int encoderStatus = mMediaCodec.dequeueOutputBuffer(mBufferInfo, OUTPUT_TIMEOUT_US);
if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
// no output available yet
if (!endOfStream) break; // out of while
if (!drainAll) break; // out of while
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
// not expected for an encoder
encoderOutputBuffers = mMediaCodec.getOutputBuffers();
mBuffers.onOutputBuffersChanged();
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// should happen before receiving buffers, and should only happen once
if (mController.isStarted()) throw new RuntimeException("format changed twice");
if (mController.isStarted()) throw new RuntimeException("MediaFormat changed twice.");
MediaFormat newFormat = mMediaCodec.getOutputFormat();
// now that we have the Magic Goodies, start the muxer
mTrackIndex = mController.start(newFormat);
mTrackIndex = mController.requestStart(newFormat);
mOutputBufferPool = new OutputBufferPool(mTrackIndex);
} else if (encoderStatus < 0) {
Log.w("VideoMediaEncoder", "unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus);
LOG.e("Unexpected result from dequeueOutputBuffer: " + encoderStatus);
// let's ignore it
} else {
ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
if (encodedData == null) {
throw new RuntimeException("encoderOutputBuffer " + encoderStatus + " was null");
}
ByteBuffer encodedData = mBuffers.getOutputBuffer(encoderStatus);
// Codec config means that config data was pulled out and fed to the muxer when we got
// the INFO_OUTPUT_FORMAT_CHANGED status. Ignore it.
@ -155,41 +277,56 @@ abstract class MediaEncoder {
// adjust the ByteBuffer values to match BufferInfo (not needed?)
encodedData.position(mBufferInfo.offset);
encodedData.limit(mBufferInfo.offset + mBufferInfo.size);
mController.write(mTrackIndex, encodedData, mBufferInfo);
mLastPresentationTime = mBufferInfo.presentationTimeUs;
if (mStartPresentationTime == 0) {
mStartPresentationTime = mLastPresentationTime;
// Store startPresentationTime and lastPresentationTime, useful for example to
// detect the mMaxLengthReached and stop recording.
if (mStartPresentationTimeUs == Long.MIN_VALUE) {
mStartPresentationTimeUs = mBufferInfo.presentationTimeUs;
}
mLastPresentationTimeUs = mBufferInfo.presentationTimeUs;
// Pass presentation times as offets with respect to the mStartPresentationTimeUs.
// This ensures consistency between audio pts (coming from System.nanoTime()) and
// video pts (coming from SurfaceTexture) both of which have no meaningful time-base
// and should be used for offsets only.
// TODO find a better way, this causes sync issues. (+ note: this sends pts=0 at first)
// mBufferInfo.presentationTimeUs = mLastPresentationTimeUs - mStartPresentationTimeUs;
LOG.i("DRAINING:", getName(), "Dispatching write(). Presentation:", mBufferInfo.presentationTimeUs);
// TODO fix the mBufferInfo being the same, then implement delayed writing in Controller
// and remove the isStarted() check here.
OutputBuffer buffer = mOutputBufferPool.get();
buffer.info = mBufferInfo;
buffer.trackIndex = mTrackIndex;
buffer.data = encodedData;
mController.write(mOutputBufferPool, buffer);
}
mMediaCodec.releaseOutputBuffer(encoderStatus, false);
if (!mMaxLengthReached) {
if (mLastPresentationTime / 1000 - mStartPresentationTime / 1000 > mMaxLengthMillis) {
mMaxLengthReached = true;
// Log.e("MediaEncoder", this.getClass().getSimpleName() + " requested stop at " + (mLastPresentationTime * 1000 * 1000));
mController.requestStop();
break;
}
// Check for the maxLength constraint (with appropriate conditions)
// Not needed if drainAll because we already were asked to stop
if (!drainAll
&& !mMaxLengthReached
&& mStartPresentationTimeUs != Long.MIN_VALUE
&& mLastPresentationTimeUs - mStartPresentationTimeUs > mMaxLengthMillis * 1000) {
LOG.w("DRAINING: Reached maxLength! mLastPresentationTimeUs:", mLastPresentationTimeUs,
"mStartPresentationTimeUs:", mStartPresentationTimeUs,
"mMaxLengthUs:", mMaxLengthMillis * 1000);
mMaxLengthReached = true;
mController.requestStop(mTrackIndex);
break;
}
// Check for the EOS flag so we can release the encoder.
if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
break; // out of while
LOG.w("DRAINING:", getName(), "Dispatching release().");
release();
break;
}
}
}
}
private long mStartPresentationTime = 0;
private long mLastPresentationTime = 0;
long getPresentationTime() {
long result = System.nanoTime() / 1000L;
// presentationTimeUs should be monotonic
// otherwise muxer fail to write
if (result < mLastPresentationTime) {
result = (mLastPresentationTime - result) + result;
}
return result;
}
private long mStartPresentationTimeUs = Long.MIN_VALUE;
private long mLastPresentationTimeUs = 0;
abstract int getBitRate();
abstract int getEncodedBitRate();
}

@ -1,6 +1,5 @@
package com.otaliastudios.cameraview;
import android.media.MediaCodec;
import android.media.MediaFormat;
import android.media.MediaMuxer;
import android.os.Build;
@ -10,13 +9,12 @@ import androidx.annotation.RequiresApi;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@RequiresApi(api = Build.VERSION_CODES.JELLY_BEAN_MR2)
class MediaEncoderEngine {
private final static String TAG = MediaEncoder.class.getSimpleName();
private final static String TAG = MediaEncoderEngine.class.getSimpleName();
private final static CameraLogger LOG = CameraLogger.create(TAG);
@SuppressWarnings("WeakerAccess")
@ -24,20 +22,19 @@ class MediaEncoderEngine {
final static int STOP_BY_MAX_DURATION = 1;
final static int STOP_BY_MAX_SIZE = 2;
private WorkerHandler mWorker;
private ArrayList<MediaEncoder> mEncoders;
private MediaMuxer mMediaMuxer;
private int mMediaMuxerStartCount;
private int mStartedEncodersCount;
private int mStoppedEncodersCount;
private boolean mMediaMuxerStarted;
private Controller mController;
private Listener mListener;
private int mStopReason = STOP_BY_USER;
private int mPossibleStopReason;
private final Object mLock = new Object();
private final Object mControllerLock = new Object();
MediaEncoderEngine(@NonNull File file, @NonNull VideoMediaEncoder videoEncoder, @Nullable AudioMediaEncoder audioEncoder,
final int maxDuration, final long maxSize, @Nullable Listener listener) {
mWorker = WorkerHandler.get("EncoderEngine");
mListener = listener;
mController = new Controller();
mEncoders = new ArrayList<>();
@ -50,51 +47,54 @@ class MediaEncoderEngine {
} catch (IOException e) {
throw new RuntimeException(e);
}
mMediaMuxerStartCount = 0;
mStartedEncodersCount = 0;
mMediaMuxerStarted = false;
mWorker.post(new Runnable() {
@Override
public void run() {
// Trying to convert the size constraints to duration constraints,
// because they are super easy to check.
// This is really naive & probably not accurate, but...
int bitRate = 0;
for (MediaEncoder encoder : mEncoders) {
bitRate += encoder.getBitRate();
}
int bytePerSecond = bitRate / 8;
long sizeMaxDuration = (maxSize / bytePerSecond) * 1000L;
long finalMaxDuration = Long.MAX_VALUE;
if (maxSize > 0 && maxDuration > 0) {
mPossibleStopReason = sizeMaxDuration < maxDuration ? STOP_BY_MAX_SIZE : STOP_BY_MAX_DURATION;
finalMaxDuration = Math.min(sizeMaxDuration, maxDuration);
} else if (maxSize > 0) {
mPossibleStopReason = STOP_BY_MAX_SIZE;
finalMaxDuration = sizeMaxDuration;
} else if (maxDuration > 0) {
mPossibleStopReason = STOP_BY_MAX_DURATION;
finalMaxDuration = maxDuration;
}
LOG.i("Computed a max duration of", (finalMaxDuration / 1000F));
for (MediaEncoder encoder : mEncoders) {
encoder.prepare(mController, finalMaxDuration);
}
}
});
mStoppedEncodersCount = 0;
// Trying to convert the size constraints to duration constraints,
// because they are super easy to check.
// This is really naive & probably not accurate, but...
int bitRate = 0;
for (MediaEncoder encoder : mEncoders) {
bitRate += encoder.getEncodedBitRate();
}
int bytePerSecond = bitRate / 8;
long sizeMaxDuration = (maxSize / bytePerSecond) * 1000L;
long finalMaxDuration = Long.MAX_VALUE;
if (maxSize > 0 && maxDuration > 0) {
mPossibleStopReason = sizeMaxDuration < maxDuration ? STOP_BY_MAX_SIZE : STOP_BY_MAX_DURATION;
finalMaxDuration = Math.min(sizeMaxDuration, maxDuration);
} else if (maxSize > 0) {
mPossibleStopReason = STOP_BY_MAX_SIZE;
finalMaxDuration = sizeMaxDuration;
} else if (maxDuration > 0) {
mPossibleStopReason = STOP_BY_MAX_DURATION;
finalMaxDuration = maxDuration;
}
LOG.w("Computed a max duration of", (finalMaxDuration / 1000F));
for (MediaEncoder encoder : mEncoders) {
encoder.prepare(mController, finalMaxDuration);
}
}
// Stuff here might be called from multiple threads.
class Controller {
int start(MediaFormat format) {
synchronized (mLock) {
/**
* Request that the muxer should start. This is not guaranteed to be executed:
* we wait for all encoders to call this method, and only then, start the muxer.
* @param format the media format
* @return the encoder track index
*/
int requestStart(MediaFormat format) {
synchronized (mControllerLock) {
if (mMediaMuxerStarted) {
throw new IllegalStateException("Trying to start but muxer started already");
}
int track = mMediaMuxer.addTrack(format);
mMediaMuxerStartCount++;
if (mMediaMuxerStartCount == mEncoders.size()) {
LOG.w("Controller:", "Assigned track", track, "to format", format.getString(MediaFormat.KEY_MIME));
if (++mStartedEncodersCount == mEncoders.size()) {
mMediaMuxer.start();
mMediaMuxerStarted = true;
}
@ -102,83 +102,124 @@ class MediaEncoderEngine {
}
}
/**
* Whether the muxer is started.
* @return true if muxer was started
*/
boolean isStarted() {
synchronized (mLock) {
synchronized (mControllerLock) {
return mMediaMuxerStarted;
}
}
// Synchronization does not seem needed here.
void write(int track, ByteBuffer encodedData, MediaCodec.BufferInfo info) {
/**
* Writes the given data to the muxer. Should be called after {@link #isStarted()}
* returns true. Note: this seems to be thread safe, no lock.
* TODO cache values if not started yet, then apply later. Read comments in drain().
* Currently they are recycled instantly.
*/
void write(OutputBufferPool pool, OutputBuffer buffer) {
if (!mMediaMuxerStarted) {
throw new IllegalStateException("Trying to write before muxer started");
}
mMediaMuxer.writeSampleData(track, encodedData, info);
// This is a bad idea and causes crashes.
// if (info.presentationTimeUs < mLastTimestampUs) info.presentationTimeUs = mLastTimestampUs;
// mLastTimestampUs = info.presentationTimeUs;
LOG.v("Writing for track", buffer.trackIndex, ". Presentation:", buffer.info.presentationTimeUs);
mMediaMuxer.writeSampleData(buffer.trackIndex, buffer.data, buffer.info);
pool.recycle(buffer);
}
void requestStop() {
synchronized (mLock) {
mMediaMuxerStartCount--;
if (mMediaMuxerStartCount == 0) {
/**
* Requests that the engine stops. This is not executed until all encoders call
* this method, so it is a kind of soft request, just like {@link #requestStart(MediaFormat)}.
* To be used when maxLength / maxSize constraints are reached, for example.
*
* When this succeeds, {@link MediaEncoder#stop()} is called.
*/
void requestStop(int track) {
LOG.i("RequestStop was called for track", track);
synchronized (mControllerLock) {
if (--mStartedEncodersCount == 0) {
mStopReason = mPossibleStopReason;
stop();
}
}
}
}
void start() {
mWorker.post(new Runnable() {
@Override
public void run() {
for (MediaEncoder encoder : mEncoders) {
encoder.start();
/**
* Notifies that the encoder was stopped. After this is called by all encoders,
* we will actually stop the muxer.
*/
void requestRelease(int track) {
LOG.i("requestRelease was called for track", track);
synchronized (mControllerLock) {
if (++mStoppedEncodersCount == mEncoders.size()) {
release();
}
}
});
}
}
void notify(final String event, final Object data) {
mWorker.post(new Runnable() {
@Override
public void run() {
for (MediaEncoder encoder : mEncoders) {
encoder.notify(event, data);
}
}
});
final void start() {
for (MediaEncoder encoder : mEncoders) {
encoder.start();
}
}
void stop() {
mWorker.post(new Runnable() {
@Override
public void run() {
for (MediaEncoder encoder : mEncoders) {
encoder.stop();
}
for (MediaEncoder encoder : mEncoders) {
encoder.release();
}
Exception error = null;
if (mMediaMuxer != null) {
// stop() throws an exception if you haven't fed it any data.
// But also in other occasions. So this is a signal that something
// went wrong, and we propagate that to the listener.
try {
mMediaMuxer.stop();
mMediaMuxer.release();
} catch (Exception e) {
error = e;
}
mMediaMuxer = null;
}
if (mListener != null) mListener.onEncoderStop(mStopReason, error);
mStopReason = STOP_BY_USER;
mListener = null;
mMediaMuxerStartCount = 0;
mMediaMuxerStarted = false;
@SuppressWarnings("SameParameterValue")
final void notify(final String event, final Object data) {
for (MediaEncoder encoder : mEncoders) {
encoder.notify(event, data);
}
}
/**
* This just asks the encoder to stop. We will wait for them to call {@link Controller#requestRelease(int)}
* to actually stop the muxer, as there might be async stuff going on.
*/
final void stop() {
for (MediaEncoder encoder : mEncoders) {
encoder.stop();
}
}
private void release() {
Exception error = null;
if (mMediaMuxer != null) {
// stop() throws an exception if you haven't fed it any data.
// But also in other occasions. So this is a signal that something
// went wrong, and we propagate that to the listener.
try {
mMediaMuxer.stop();
mMediaMuxer.release();
} catch (Exception e) {
error = e;
}
});
mMediaMuxer = null;
}
if (mListener != null) {
mListener.onEncoderStop(mStopReason, error);
mListener = null;
}
mStopReason = STOP_BY_USER;
mStartedEncodersCount = 0;
mStoppedEncodersCount = 0;
mMediaMuxerStarted = false;
}
@NonNull
VideoMediaEncoder getVideoEncoder() {
return (VideoMediaEncoder) mEncoders.get(0);
}
@Nullable
AudioMediaEncoder getAudioEncoder() {
if (mEncoders.size() > 1) {
return (AudioMediaEncoder) mEncoders.get(1);
} else {
return null;
}
}
interface Listener {

@ -0,0 +1,11 @@
package com.otaliastudios.cameraview;
import android.media.MediaCodec;
import java.nio.ByteBuffer;
class OutputBuffer {
MediaCodec.BufferInfo info;
int trackIndex;
ByteBuffer data;
}

@ -0,0 +1,18 @@
package com.otaliastudios.cameraview;
import android.media.MediaCodec;
class OutputBufferPool extends Pool<OutputBuffer> {
OutputBufferPool(final int trackIndex) {
super(Integer.MAX_VALUE, new Factory<OutputBuffer>() {
@Override
public OutputBuffer create() {
OutputBuffer buffer = new OutputBuffer();
buffer.trackIndex = trackIndex;
buffer.info = new MediaCodec.BufferInfo();
return buffer;
}
});
}
}

@ -0,0 +1,89 @@
package com.otaliastudios.cameraview;
import java.util.concurrent.LinkedBlockingQueue;
import androidx.annotation.CallSuper;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
class Pool<T> {
private static final String TAG = Pool.class.getSimpleName();
private static final CameraLogger LOG = CameraLogger.create(TAG);
private int maxPoolSize;
private int activeCount;
private LinkedBlockingQueue<T> mQueue;
private Factory<T> factory;
interface Factory<T> {
T create();
}
Pool(int maxPoolSize, Factory<T> factory) {
this.maxPoolSize = maxPoolSize;
this.mQueue = new LinkedBlockingQueue<>(maxPoolSize);
this.factory = factory;
}
boolean canGet() {
return count() < maxPoolSize;
}
@Nullable
T get() {
T buffer = mQueue.poll();
if (buffer != null) {
activeCount++; // poll decreases, this fixes
LOG.v("GET: Reusing recycled item.", this);
return buffer;
}
if (!canGet()) {
LOG.v("GET: Returning null. Too much items requested.", this);
return null;
}
activeCount++;
LOG.v("GET: Creating a new item.", this);
return factory.create();
}
void recycle(@NonNull T item) {
LOG.v("RECYCLE: Recycling item.", this);
if (--activeCount < 0) {
throw new IllegalStateException("Trying to recycle an item which makes activeCount < 0." +
"This means that this or some previous items being recycled were not coming from " +
"this pool, or some item was recycled more than once. " + this);
}
if (!mQueue.offer(item)) {
throw new IllegalStateException("Trying to recycle an item while the queue is full. " +
"This means that this or some previous items being recycled were not coming from " +
"this pool, or some item was recycled more than once. " + this);
}
}
@NonNull
@Override
public String toString() {
return getClass().getSimpleName() + " -- count:" + count() + ", active:" + activeCount() + ", cached:" + cachedCount();
}
final int count() {
return activeCount() + cachedCount();
}
final int activeCount() {
return activeCount;
}
final int cachedCount() {
return mQueue.size();
}
@CallSuper
void clear() {
mQueue.clear();
}
}

@ -3,6 +3,8 @@ package com.otaliastudios.cameraview;
import android.opengl.EGLContext;
import android.opengl.Matrix;
import android.os.Build;
import android.widget.TextView;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RequiresApi;
@ -15,10 +17,6 @@ class TextureMediaEncoder extends VideoMediaEncoder<TextureMediaEncoder.Config>
final static String FRAME_EVENT = "frame";
static class Frame {
float[] transform;
long timestamp;
}
static class Config extends VideoMediaEncoder.Config {
int textureId;
float scaleX;
@ -44,15 +42,40 @@ class TextureMediaEncoder extends VideoMediaEncoder<TextureMediaEncoder.Config>
private EglCore mEglCore;
private EglWindowSurface mWindow;
private EglViewport mViewport;
private Pool<TextureFrame> mFramePool = new Pool<>(100, new Pool.Factory<TextureFrame>() {
@Override
public TextureFrame create() {
return new TextureFrame();
}
});
TextureMediaEncoder(@NonNull Config config) {
super(config);
}
static class TextureFrame {
private TextureFrame() {}
// Nanoseconds, in no meaningful time-base. Should be for offsets only.
// Typically coming from SurfaceTexture.getTimestamp().
long timestamp;
float[] transform = new float[16];
}
@NonNull
TextureFrame acquireFrame() {
if (!mFramePool.canGet()) {
throw new RuntimeException("Need more frames than this! Please increase the pool size.");
} else {
//noinspection ConstantConditions
return mFramePool.get();
}
}
@EncoderThread
@Override
void prepare(@NonNull MediaEncoderEngine.Controller controller, long maxLengthMillis) {
super.prepare(controller, maxLengthMillis);
void onPrepare(@NonNull MediaEncoderEngine.Controller controller, long maxLengthMillis) {
super.onPrepare(controller, maxLengthMillis);
mEglCore = new EglCore(mConfig.eglContext, EglCore.FLAG_RECORDABLE);
mWindow = new EglWindowSurface(mEglCore, mSurface, true);
mWindow.makeCurrent(); // drawing will happen on the InputWindowSurface, which
@ -62,8 +85,57 @@ class TextureMediaEncoder extends VideoMediaEncoder<TextureMediaEncoder.Config>
@EncoderThread
@Override
void release() {
super.release();
void onStart() {
super.onStart();
// Nothing to do here. Waiting for the first frame.
}
@EncoderThread
@Override
void onEvent(@NonNull String event, @Nullable Object data) {
if (!event.equals(FRAME_EVENT)) return;
TextureFrame frame = (TextureFrame) data;
if (frame == null) return; // Should not happen
if (frame.timestamp == 0 || mFrameNum < 0) {
// The first condition comes from grafika.
// The second condition means we were asked to stop.
mFramePool.recycle(frame);
return;
}
mFrameNum++;
LOG.v("Incoming frame timestamp:", frame.timestamp);
// We must scale this matrix like GlCameraPreview does, because it might have some cropping.
// Scaling takes place with respect to the (0, 0, 0) point, so we must apply a Translation to compensate.
float[] transform = frame.transform;
float scaleX = mConfig.scaleX;
float scaleY = mConfig.scaleY;
float scaleTranslX = (1F - scaleX) / 2F;
float scaleTranslY = (1F - scaleY) / 2F;
Matrix.translateM(transform, 0, scaleTranslX, scaleTranslY, 0);
Matrix.scaleM(transform, 0, scaleX, scaleY, 1);
// We also must rotate this matrix. In GlCameraPreview it is not needed because it is a live
// stream, but the output video, must be correctly rotated based on the device rotation at the moment.
// Rotation also takes place with respect to the origin (the Z axis), so we must
// translate to origin, rotate, then back to where we were.
Matrix.translateM(transform, 0, 0.5F, 0.5F, 0);
Matrix.rotateM(transform, 0, mConfig.transformRotation, 0, 0, 1);
Matrix.translateM(transform, 0, -0.5F, -0.5F, 0);
drainOutput(false);
// Future note: passing scale values to the viewport? They are scaleX and scaleY,
// but flipped based on the mConfig.scaleFlipped boolean.
mViewport.drawFrame(mConfig.textureId, transform);
mWindow.setPresentationTime(frame.timestamp);
mWindow.swapBuffers();
mFramePool.recycle(frame);
}
@Override
void onRelease() {
mFramePool.clear();
if (mWindow != null) {
mWindow.release();
mWindow = null;
@ -77,58 +149,4 @@ class TextureMediaEncoder extends VideoMediaEncoder<TextureMediaEncoder.Config>
mEglCore = null;
}
}
@EncoderThread
@Override
void start() {
super.start();
// Nothing to do here. Waiting for the first frame.
}
@EncoderThread
@Override
void notify(@NonNull String event, @Nullable Object data) {
if (event.equals(FRAME_EVENT)) {
Frame frame = (Frame) data;
// Seeing this after device is toggled off/on with power button. The
// first frame back has a zero timestamp.
// MPEG4Writer thinks this is cause to abort() in native code, so it's very
// important that we just ignore the frame.
if (frame.timestamp == 0) return;
if (mFrameNum < 0) return;
mFrameNum++;
int arg1 = (int) (frame.timestamp >> 32);
int arg2 = (int) frame.timestamp;
long timestamp = (((long) arg1) << 32) | (((long) arg2) & 0xffffffffL);
float[] transform = frame.transform;
// We must scale this matrix like GlCameraPreview does, because it might have some cropping.
// Scaling takes place with respect to the (0, 0, 0) point, so we must apply a Translation to compensate.
float scaleX = mConfig.scaleX;
float scaleY = mConfig.scaleY;
float scaleTranslX = (1F - scaleX) / 2F;
float scaleTranslY = (1F - scaleY) / 2F;
Matrix.translateM(transform, 0, scaleTranslX, scaleTranslY, 0);
Matrix.scaleM(transform, 0, scaleX, scaleY, 1);
// We also must rotate this matrix. In GlCameraPreview it is not needed because it is a live
// stream, but the output video, must be correctly rotated based on the device rotation at the moment.
// Rotation also takes place with respect to the origin (the Z axis), so we must
// translate to origin, rotate, then back to where we were.
Matrix.translateM(transform, 0, 0.5F, 0.5F, 0);
Matrix.rotateM(transform, 0, mConfig.transformRotation, 0, 0, 1);
Matrix.translateM(transform, 0, -0.5F, -0.5F, 0);
drain(false);
// Future note: passing scale values to the viewport? They are scaleX and scaleY,
// but flipped based on the mConfig.scaleFlipped boolean.
mViewport.drawFrame(mConfig.textureId, transform);
mWindow.setPresentationTime(timestamp);
mWindow.swapBuffers();
}
}
}

@ -51,10 +51,15 @@ abstract class VideoMediaEncoder<C extends VideoMediaEncoder.Config> extends Med
mConfig = config;
}
@NonNull
@Override
String getName() {
return "VideoEncoder";
}
@EncoderThread
@Override
void prepare(@NonNull MediaEncoderEngine.Controller controller, long maxLengthMillis) {
super.prepare(controller, maxLengthMillis);
void onPrepare(@NonNull MediaEncoderEngine.Controller controller, long maxLengthMillis) {
MediaFormat format = MediaFormat.createVideoFormat(mConfig.mimeType, mConfig.width, mConfig.height);
// Set some properties. Failing to specify some of these can cause the MediaCodec
@ -62,6 +67,7 @@ abstract class VideoMediaEncoder<C extends VideoMediaEncoder.Config> extends Med
format.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
format.setInteger(MediaFormat.KEY_BIT_RATE, mConfig.bitRate);
format.setInteger(MediaFormat.KEY_FRAME_RATE, mConfig.frameRate);
format.setInteger(MediaFormat.KEY_FRAME_RATE, 6); // TODO
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 2);
format.setInteger("rotation-degrees", mConfig.rotation);
@ -79,20 +85,21 @@ abstract class VideoMediaEncoder<C extends VideoMediaEncoder.Config> extends Med
@EncoderThread
@Override
void start() {
void onStart() {
// Nothing to do here. Waiting for the first frame.
mFrameNum = 0;
}
@EncoderThread
@Override
void stop() {
void onStop() {
mFrameNum = -1;
drain(true);
signalEndOfInputStream();
drainOutput(true);
}
@Override
int getBitRate() {
int getEncodedBitRate() {
return mConfig.bitRate;
}
}

@ -58,8 +58,13 @@ class SnapshotVideoRecorder extends VideoRecorder implements GlCameraPreview.Ren
@Override
public void onRendererFrame(@NonNull SurfaceTexture surfaceTexture, float scaleX, float scaleY) {
if (mCurrentState == STATE_NOT_RECORDING && mDesiredState == STATE_RECORDING) {
// Set default options
if (mResult.videoBitRate <= 0) mResult.videoBitRate = DEFAULT_VIDEO_BITRATE;
if (mResult.videoFrameRate <= 0) mResult.videoFrameRate = DEFAULT_VIDEO_FRAMERATE;
if (mResult.audioBitRate <= 0) mResult.audioBitRate = DEFAULT_AUDIO_BITRATE;
// Video. Ensure width and height are divisible by 2, as I have read somewhere.
Size size = mResult.getSize();
// Ensure width and height are divisible by 2, as I have read somewhere.
int width = size.getWidth();
int height = size.getHeight();
width = width % 2 == 0 ? width : width + 1;
@ -70,9 +75,6 @@ class SnapshotVideoRecorder extends VideoRecorder implements GlCameraPreview.Ren
case H_264: type = "video/avc"; break; // MediaFormat.MIMETYPE_VIDEO_AVC:
case DEVICE_DEFAULT: type = "video/avc"; break;
}
if (mResult.videoBitRate <= 0) mResult.videoBitRate = DEFAULT_VIDEO_BITRATE;
if (mResult.audioBitRate <= 0) mResult.audioBitRate = DEFAULT_AUDIO_BITRATE;
if (mResult.videoFrameRate <= 0) mResult.videoFrameRate = DEFAULT_VIDEO_FRAMERATE;
LOG.w("Creating frame encoder. Rotation:", mResult.rotation);
TextureMediaEncoder.Config config = new TextureMediaEncoder.Config(width, height,
mResult.videoBitRate,
@ -84,10 +86,14 @@ class SnapshotVideoRecorder extends VideoRecorder implements GlCameraPreview.Ren
EGL14.eglGetCurrentContext()
);
TextureMediaEncoder videoEncoder = new TextureMediaEncoder(config);
// Audio
AudioMediaEncoder audioEncoder = null;
if (mResult.audio == Audio.ON) {
audioEncoder = new AudioMediaEncoder(new AudioMediaEncoder.Config(mResult.audioBitRate));
}
// Engine
mEncoderEngine = new MediaEncoderEngine(mResult.file, videoEncoder, audioEncoder,
mResult.maxDuration, mResult.maxSize, SnapshotVideoRecorder.this);
mEncoderEngine.start();
@ -96,11 +102,11 @@ class SnapshotVideoRecorder extends VideoRecorder implements GlCameraPreview.Ren
}
if (mCurrentState == STATE_RECORDING) {
TextureMediaEncoder.Frame frame = new TextureMediaEncoder.Frame();
frame.timestamp = surfaceTexture.getTimestamp();
frame.transform = new float[16]; // TODO would be cool to avoid this at every frame. But it's not easy.
surfaceTexture.getTransformMatrix(frame.transform);
mEncoderEngine.notify(TextureMediaEncoder.FRAME_EVENT, frame);
TextureMediaEncoder textureEncoder = (TextureMediaEncoder) mEncoderEngine.getVideoEncoder();
TextureMediaEncoder.TextureFrame textureFrame = textureEncoder.acquireFrame();
textureFrame.timestamp = surfaceTexture.getTimestamp();
surfaceTexture.getTransformMatrix(textureFrame.transform);
mEncoderEngine.notify(TextureMediaEncoder.FRAME_EVENT, textureFrame);
}
if (mCurrentState == STATE_RECORDING && mDesiredState == STATE_NOT_RECORDING) {
@ -113,7 +119,6 @@ class SnapshotVideoRecorder extends VideoRecorder implements GlCameraPreview.Ren
}
@EncoderThread
@Override
public void onEncoderStop(int stopReason, @Nullable Exception e) {
// If something failed, undo the result, since this is the mechanism

@ -22,6 +22,7 @@ abstract class VideoRecorder {
abstract void stop();
@SuppressWarnings("WeakerAccess")
protected void dispatchResult() {
if (mListener != null) {
mListener.onVideoResult(mResult);

@ -2,6 +2,8 @@ package com.otaliastudios.cameraview;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.Looper;
import androidx.annotation.NonNull;
import java.lang.ref.WeakReference;
@ -63,16 +65,22 @@ class WorkerHandler {
}
@NonNull
public Thread getThread() {
public HandlerThread getThread() {
return mThread;
}
@NonNull
public Looper getLooper() {
return mThread.getLooper();
}
static void destroy() {
for (String key : sCache.keySet()) {
WeakReference<WorkerHandler> ref = sCache.get(key);
WorkerHandler handler = ref.get();
if (handler != null && handler.getThread().isAlive()) {
handler.getThread().interrupt();
// handler.getThread().quit();
}
ref.clear();
}

@ -2,6 +2,7 @@ package com.otaliastudios.cameraview.demo;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.net.Uri;
import android.os.Bundle;
import androidx.annotation.NonNull;
import com.google.android.material.bottomsheet.BottomSheetBehavior;

Loading…
Cancel
Save