Estimate video bit rate instead of ugly default

pull/506/head
Mattia Iavarone 6 years ago
parent d4555b5b92
commit da6a0a6299
  1. 10
      cameraview/src/main/java/com/otaliastudios/cameraview/video/SnapshotVideoRecorder.java
  2. 122
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioMediaEncoder.java
  3. 53
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioTimestamp.java

@ -38,9 +38,15 @@ public class SnapshotVideoRecorder extends VideoRecorder implements RendererFram
private static final CameraLogger LOG = CameraLogger.create(TAG);
private static final int DEFAULT_VIDEO_FRAMERATE = 30;
private static final int DEFAULT_VIDEO_BITRATE = 1000000;
private static final int DEFAULT_AUDIO_BITRATE = 64000;
// https://stackoverflow.com/a/5220554/4288782
// Assuming low motion, we don't want to put this too high for default usage,
// advanced users are still free to change this for each video.
private static int estimateVideoBitRate(@NonNull Size size, int frameRate) {
return (int) (0.07F * 1F * size.getWidth() * size.getHeight() * frameRate);
}
private static final int STATE_RECORDING = 0;
private static final int STATE_NOT_RECORDING = 1;
@ -101,8 +107,8 @@ public class SnapshotVideoRecorder extends VideoRecorder implements RendererFram
LOG.i("Starting the encoder engine.");
// Set default options
if (mResult.videoBitRate <= 0) mResult.videoBitRate = DEFAULT_VIDEO_BITRATE;
if (mResult.videoFrameRate <= 0) mResult.videoFrameRate = DEFAULT_VIDEO_FRAMERATE;
if (mResult.videoBitRate <= 0) mResult.videoBitRate = estimateVideoBitRate(mResult.size, mResult.videoFrameRate);
if (mResult.audioBitRate <= 0) mResult.audioBitRate = DEFAULT_AUDIO_BITRATE;
// Video. Ensure width and height are divisible by 2, as I have read somewhere.

@ -3,7 +3,6 @@ package com.otaliastudios.cameraview.video.encoding;
import android.annotation.SuppressLint;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.AudioTimestamp;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
@ -58,18 +57,11 @@ public class AudioMediaEncoder extends MediaEncoder {
// the actual recording. It's better to reduce this and skip recording at all.
private static final int BUFFER_POOL_MAX_SIZE = 80;
private static long bytesToUs(int bytes) {
return (1000000L * bytes) / BYTE_RATE;
}
private static long bytesToUs(long bytes) {
return (1000000L * bytes) / BYTE_RATE;
}
private boolean mRequestStop = false;
private AudioEncodingHandler mEncoder;
private AudioRecordingThread mRecorder;
private ByteBufferPool mByteBufferPool;
private final AudioTimestamp mTimestamp;
private Config mConfig;
public static class Config {
@ -86,6 +78,7 @@ public class AudioMediaEncoder extends MediaEncoder {
public AudioMediaEncoder(@NonNull Config config) {
super("AudioEncoder");
mConfig = config.copy();
mTimestamp = new AudioTimestamp();
}
@EncoderThread
@ -146,7 +139,7 @@ public class AudioMediaEncoder extends MediaEncoder {
private long mLastTimeUs;
private long mFirstTimeUs = Long.MIN_VALUE;
AudioRecordingThread() {
private AudioRecordingThread() {
final int minBufferSize = AudioRecord.getMinBufferSize(SAMPLING_FREQUENCY, CHANNELS, ENCODING);
int bufferSize = FRAME_SIZE * 25; // Make this bigger so we don't skip frames.
while (bufferSize < minBufferSize) {
@ -212,7 +205,7 @@ public class AudioMediaEncoder extends MediaEncoder {
*/
private void sleep() {
try {
Thread.sleep(bytesToUs(FRAME_SIZE) / 1000);
Thread.sleep(AudioTimestamp.bytesToUs(FRAME_SIZE, BYTE_RATE) / 1000);
} catch (InterruptedException ignore) {}
}
@ -225,7 +218,7 @@ public class AudioMediaEncoder extends MediaEncoder {
* @param endOfStream end of stream?
*/
private void increaseTime(int readBytes, boolean endOfStream) {
mLastTimeUs = onIncreaseTime(readBytes);
mLastTimeUs = mTimestamp.increaseUs(readBytes, BYTE_RATE);
if (mFirstTimeUs == Long.MIN_VALUE) {
mFirstTimeUs = mLastTimeUs;
}
@ -236,76 +229,6 @@ public class AudioMediaEncoder extends MediaEncoder {
}
}
/**
* We have different implementations here, using the last one
* that looks better.
* @param readBytes bytes read
* @return the new presentation time
*/
private long onIncreaseTime(int readBytes) {
return onIncreaseTime3(readBytes);
}
/**
* This method simply assumes that we read everything without losing a single US.
* It will use System.nanoTime() just once, as the starting point.
* Of course we don't as there are things going on in this thread.
*/
@SuppressWarnings("unused")
private long onIncreaseTime1(int readBytes) {
return mLastTimeUs + bytesToUs(readBytes);
}
/**
* Just for testing, this method will use Api 24 method to retrieve the timestamp.
* This way we let the platform choose instead of making assumptions.
*/
@SuppressWarnings("unused")
@RequiresApi(24)
private long onIncreaseTime2(int readBytes) {
if (mApi24Timestamp == null) {
mApi24Timestamp = new AudioTimestamp();
}
mAudioRecord.getTimestamp(mApi24Timestamp, AudioTimestamp.TIMEBASE_MONOTONIC);
return mApi24Timestamp.nanoTime / 1000;
}
private AudioTimestamp mApi24Timestamp;
/**
* This method looks like an improvement over {@link #onIncreaseTime1(int)} as it
* accounts for the current time as well. Adapted & improved. from Kickflip.
*
* This creates regular timestamps unless we accumulate a lot of delay (greater than
* twice the buffer duration), in which case it creates a gap and starts again trying
* to be regular from the new point.
*/
private long onIncreaseTime3(int readBytes) {
long bufferDurationUs = bytesToUs(readBytes);
long bufferEndTimeUs = System.nanoTime() / 1000; // now
long bufferStartTimeUs = bufferEndTimeUs - bufferDurationUs;
// If this is the first time, the base time is the buffer start time.
if (mBytesSinceBaseTime == 0) mBaseTimeUs = bufferStartTimeUs;
// Recompute time assuming that we are respecting the sampling frequency.
// This puts the time at the end of last read buffer, which means, where we
// should be if we had no delay / missed buffers.
long correctedTimeUs = mBaseTimeUs + bytesToUs(mBytesSinceBaseTime);
long correctionUs = bufferStartTimeUs - correctedTimeUs;
// However, if the correction is too big (> 2*bufferDurationUs), reset to this point.
// This is triggered if we lose buffers and are recording/encoding at a slower rate.
if (correctionUs >= 2L * bufferDurationUs) {
mBaseTimeUs = bufferStartTimeUs;
mBytesSinceBaseTime = readBytes;
return mBaseTimeUs;
} else {
mBytesSinceBaseTime += readBytes;
return correctedTimeUs;
}
}
private long mBaseTimeUs;
private long mBytesSinceBaseTime;
}
/**
@ -315,16 +238,16 @@ public class AudioMediaEncoder extends MediaEncoder {
* For example, a stop() must become onStop() soon, can't wait for all this draining.
*/
@SuppressLint("HandlerLeak")
class AudioEncodingHandler extends Handler {
private class AudioEncodingHandler extends Handler {
private InputBufferPool mInputBufferPool = new InputBufferPool();
private LinkedBlockingQueue<InputBuffer> mPendingOps = new LinkedBlockingQueue<>();
AudioEncodingHandler() {
private AudioEncodingHandler() {
super(WorkerHandler.get("AudioEncodingHandler").getLooper());
}
void sendInputBuffer(ByteBuffer buffer, long presentationTimeUs, boolean endOfStream) {
private void sendInputBuffer(ByteBuffer buffer, long presentationTimeUs, boolean endOfStream) {
sendMessage(obtainMessage(
endOfStream ? 1 : 0,
(int) (presentationTimeUs >> 32),
@ -363,7 +286,7 @@ public class AudioMediaEncoder extends MediaEncoder {
private void performPendingOp(InputBuffer buffer) {
LOG.i("encoding thread - performing pending operation for timestamp:", buffer.timestamp, "- encoding.");
buffer.data.put(buffer.source); // TODO this copy is prob. the worst part here for performance
buffer.data.put(buffer.source); // NOTE: this copy is prob. the worst part here for performance
mByteBufferPool.recycle(buffer.source);
mPendingOps.remove(buffer);
encodeInputBuffer(buffer);
@ -374,31 +297,8 @@ public class AudioMediaEncoder extends MediaEncoder {
// NOTE: can consider calling this drainOutput on yet another thread, which would let us
// use an even smaller BUFFER_POOL_MAX_SIZE without losing audio frames. But this way
// we can accumulate delay on this new thread without noticing (no pool getting empty).
if (true) {
drainOutput(eos);
if (eos) WorkerHandler.get("AudioEncodingHandler").getThread().interrupt();
} else {
// Testing the option above.
WorkerHandler.get("AudioEncodingDrainer").remove(drainRunnable);
WorkerHandler.get("AudioEncodingDrainer").remove(drainRunnableEos);
WorkerHandler.get("AudioEncodingDrainer").post(eos ? drainRunnableEos : drainRunnable);
}
drainOutput(eos);
if (eos) WorkerHandler.get("AudioEncodingHandler").getThread().interrupt();
}
private final Runnable drainRunnable = new Runnable() {
@Override
public void run() {
drainOutput(false);
}
};
private final Runnable drainRunnableEos = new Runnable() {
@Override
public void run() {
drainOutput(true);
WorkerHandler.get("AudioEncodingHandler").getThread().interrupt();
WorkerHandler.get("AudioEncodingDrainer").getThread().interrupt();
}
};
}
}

@ -0,0 +1,53 @@
package com.otaliastudios.cameraview.video.encoding;
/**
* Computes timestamps for audio frames.
* Video frames do not need this since the timestamp comes from
* the surface texture.
*/
class AudioTimestamp {
static long bytesToUs(long bytes, int byteRate) {
return (1000000L * bytes) / byteRate;
}
private long mBaseTimeUs;
private long mBytesSinceBaseTime;
/**
* This method accounts for the current time and proved to be the most reliable among
* the ones tested.
*
* This creates regular timestamps unless we accumulate a lot of delay (greater than
* twice the buffer duration), in which case it creates a gap and starts again trying
* to be regular from the new point.
*
* Returns timestamps in the {@link System#nanoTime()} reference.
*/
@SuppressWarnings("SameParameterValue")
long increaseUs(int readBytes, int byteRate) {
long bufferDurationUs = bytesToUs((long) readBytes, byteRate);
long bufferEndTimeUs = System.nanoTime() / 1000; // now
long bufferStartTimeUs = bufferEndTimeUs - bufferDurationUs;
// If this is the first time, the base time is the buffer start time.
if (mBytesSinceBaseTime == 0) mBaseTimeUs = bufferStartTimeUs;
// Recompute time assuming that we are respecting the sampling frequency.
// This puts the time at the end of last read buffer, which means, where we
// should be if we had no delay / missed buffers.
long correctedTimeUs = mBaseTimeUs + bytesToUs(mBytesSinceBaseTime, byteRate);
long correctionUs = bufferStartTimeUs - correctedTimeUs;
// However, if the correction is too big (> 2*bufferDurationUs), reset to this point.
// This is triggered if we lose buffers and are recording/encoding at a slower rate.
if (correctionUs >= 2L * bufferDurationUs) {
mBaseTimeUs = bufferStartTimeUs;
mBytesSinceBaseTime = readBytes;
return mBaseTimeUs;
} else {
mBytesSinceBaseTime += readBytes;
return correctedTimeUs;
}
}
}
Loading…
Cancel
Save