Write zeros when we have gaps

pull/506/head
Mattia Iavarone 6 years ago
parent 63feab4965
commit 4e4041ab07
  1. 110
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioMediaEncoder.java
  2. 26
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioTimestamp.java
  3. 31
      cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/MediaEncoderEngine.java

@ -19,6 +19,8 @@ import androidx.annotation.RequiresApi;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
/**
@ -37,11 +39,14 @@ public class AudioMediaEncoder extends MediaEncoder {
// the recorder thread. It's not safe to have it very large or we can end encoding A LOT AFTER
// the actual recording. It's better to reduce this and skip recording at all.
private static final int BUFFER_POOL_MAX_SIZE = 80;
private static final boolean DEBUG_PERFORMANCE = true;
private static final boolean USE_SEPARATE_ENCODING_THREAD = true; // not clear how much benefit this brings
private boolean mRequestStop = false;
private AudioEncodingHandler mEncoder;
private AudioRecordingThread mRecorder;
private ByteBufferPool mByteBufferPool;
private ByteBuffer mZeroBuffer;
private final AudioTimestamp mTimestamp;
private Config mConfig;
@ -51,7 +56,7 @@ public class AudioMediaEncoder extends MediaEncoder {
public static class Config {
// Configurable options
public int bitRate;
public int bitRate; // ENCODED bit rate
public int channels = 1;
// Not configurable options (for now)
@ -59,7 +64,7 @@ public class AudioMediaEncoder extends MediaEncoder {
private final int encoding = AudioFormat.ENCODING_PCM_16BIT; // Determines the SAMPLE_SIZE
// The 44.1KHz frequency is the only setting guaranteed to be available on all devices.
private final int samplingFrequency = 44100; // samples/sec
private final int sampleSize = 2; // byte/sample/channel
private final int sampleSize = 2; // byte/sample/channel [16bit]
private final int byteRatePerChannel = samplingFrequency * sampleSize; // byte/sec/channel
private final int frameSizePerChannel = 1024; // bytes/frame/channel [AAC constant]
@ -71,12 +76,12 @@ public class AudioMediaEncoder extends MediaEncoder {
return config;
}
private int byteRate() {
private int byteRate() { // RAW byte rate
return byteRatePerChannel * channels; // byte/sec
}
@SuppressWarnings("unused")
private int bitRate() {
private int bitRate() { // RAW bit rate
return byteRate() * 8; // bit/sec
}
@ -111,7 +116,7 @@ public class AudioMediaEncoder extends MediaEncoder {
final MediaFormat audioFormat = MediaFormat.createAudioFormat(mConfig.mimeType, mConfig.samplingFrequency, mConfig.channels);
audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, mConfig.audioFormatChannels());
audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, mConfig.bitRate);
audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, mConfig.bitRate); // TODO multiply by channels?
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, mConfig.channels);
try {
mMediaCodec = MediaCodec.createEncoderByType(mConfig.mimeType);
@ -121,6 +126,7 @@ public class AudioMediaEncoder extends MediaEncoder {
mMediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mMediaCodec.start();
mByteBufferPool = new ByteBufferPool(mConfig.frameSize(), BUFFER_POOL_MAX_SIZE);
mZeroBuffer = ByteBuffer.allocateDirect(mConfig.frameSize());
}
@EncoderThread
@ -162,7 +168,8 @@ public class AudioMediaEncoder extends MediaEncoder {
private long mFirstTimeUs = Long.MIN_VALUE;
private AudioRecordingThread() {
final int minBufferSize = AudioRecord.getMinBufferSize(mConfig.samplingFrequency, mConfig.channels, mConfig.encoding);
final int minBufferSize = AudioRecord.getMinBufferSize(mConfig.samplingFrequency,
mConfig.channels, mConfig.encoding);
int bufferSize = mConfig.frameSize() * 25; // Make this bigger so we don't skip frames.
while (bufferSize < minBufferSize) {
bufferSize += mConfig.frameSize(); // Unlikely I think.
@ -206,7 +213,20 @@ public class AudioMediaEncoder extends MediaEncoder {
}
} else {
mCurrentBuffer.clear();
// When stereo, we read twice the data here and AudioRecord will fill the buffer
// with left and right bytes. https://stackoverflow.com/q/20594750/4288782
if (DEBUG_PERFORMANCE) {
float before = System.nanoTime() / 1000000F;
mReadBytes = mAudioRecord.read(mCurrentBuffer, mConfig.frameSize());
float after = System.nanoTime() / 1000000F;
float delayMillis = after - before;
float durationMillis = AudioTimestamp.bytesToUs(mReadBytes, mConfig.byteRate()) / 1000F;
LOG.v("read thread - reading took:", delayMillis,
"should be:", durationMillis,
"delta:", delayMillis - durationMillis);
} else {
mReadBytes = mAudioRecord.read(mCurrentBuffer, mConfig.frameSize());
}
LOG.i("read thread - eos:", endOfStream, "- Read new audio frame. Bytes:", mReadBytes);
if (mReadBytes > 0) { // Good read: increase PTS.
increaseTime(mReadBytes, endOfStream);
@ -227,9 +247,9 @@ public class AudioMediaEncoder extends MediaEncoder {
*/
private void sleep() {
try {
Thread.sleep(AudioTimestamp.bytesToUs(
Thread.sleep(AudioTimestamp.bytesToMillis(
mConfig.frameSize() * 6,
mConfig.byteRate()) / 1000);
mConfig.byteRate()));
} catch (InterruptedException ignore) {}
}
@ -247,13 +267,31 @@ public class AudioMediaEncoder extends MediaEncoder {
mFirstTimeUs = mLastTimeUs;
// Compute the first frame milliseconds as well.
notifyFirstFrameMillis(System.currentTimeMillis()
- AudioTimestamp.bytesToUs(readBytes, mConfig.byteRate()) / 1000L);
- AudioTimestamp.bytesToMillis(readBytes, mConfig.byteRate()));
}
boolean didReachMaxLength = (mLastTimeUs - mFirstTimeUs) > getMaxLengthMillis() * 1000L;
if (didReachMaxLength && !endOfStream) {
LOG.w("read thread - this frame reached the maxLength! deltaUs:", mLastTimeUs - mFirstTimeUs);
notifyMaxLengthReached();
}
int gaps = mTimestamp.getGapCount(mConfig.frameSize(), mConfig.byteRate());
if (gaps > 0) {
long gapStart = mTimestamp.getGapStartUs(mLastTimeUs);
long frameUs = AudioTimestamp.bytesToUs(mConfig.frameSize(), mConfig.byteRate());
LOG.w("read thread - GAPS: trying to add", gaps, "zeroed buffers");
for (int i = 0; i < gaps; i++) {
ByteBuffer zeroBuffer = mByteBufferPool.get();
if (zeroBuffer == null) {
LOG.e("read thread - GAPS: aborting because we have no free buffer.");
break;
};
zeroBuffer.position(0);
zeroBuffer.put(mZeroBuffer);
zeroBuffer.clear();
mEncoder.sendInputBuffer(zeroBuffer, gapStart, false);
gapStart += frameUs;
}
}
}
}
@ -275,19 +313,26 @@ public class AudioMediaEncoder extends MediaEncoder {
}
// Just to debug performance.
// private int mSendCount = 0;
// private int mExecuteCount = 0;
// private long mAvgSendDelay = 0;
// private long mAvgExecuteDelay = 0;
// private Map<Long, Long> mSendStartMap = new HashMap<>();
private int mSendCount = 0;
private int mExecuteCount = 0;
private long mAvgSendDelay = 0;
private long mAvgExecuteDelay = 0;
private Map<Long, Long> mSendStartMap = new HashMap<>();
private void sendInputBuffer(ByteBuffer buffer, long presentationTimeUs, boolean endOfStream) {
// mSendStartMap.put(presentationTimeUs, System.nanoTime() / 1000000);
sendMessage(obtainMessage(
if (DEBUG_PERFORMANCE) {
mSendStartMap.put(presentationTimeUs, System.nanoTime() / 1000000);
}
Message message = obtainMessage(
endOfStream ? 1 : 0,
(int) (presentationTimeUs >> 32),
(int) (presentationTimeUs),
buffer));
buffer);
if (USE_SEPARATE_ENCODING_THREAD) {
sendMessage(message);
} else {
handleMessage(message);
}
}
@Override
@ -298,11 +343,15 @@ public class AudioMediaEncoder extends MediaEncoder {
LOG.i("encoding thread - got buffer. timestamp:", timestamp, "eos:", endOfStream);
// Performance logging
// long sendEnd = System.nanoTime() / 1000000;
// long sendStart = mSendStartMap.remove(timestamp);
// mAvgSendDelay = ((mAvgSendDelay * mSendCount) + (sendEnd - sendStart)) / (++mSendCount);
// LOG.v("send delay millis:", sendEnd - sendStart, "average:", mAvgSendDelay);
// long executeStart = System.nanoTime() / 1000000;
long executeStart;
if (DEBUG_PERFORMANCE) {
long sendEnd = System.nanoTime() / 1000000;
//noinspection ConstantConditions
long sendStart = mSendStartMap.remove(timestamp);
mAvgSendDelay = ((mAvgSendDelay * mSendCount) + (sendEnd - sendStart)) / (++mSendCount);
LOG.v("send delay millis:", sendEnd - sendStart, "average:", mAvgSendDelay);
executeStart = System.nanoTime() / 1000000;
}
// Actual work
ByteBuffer buffer = (ByteBuffer) msg.obj;
@ -314,7 +363,6 @@ public class AudioMediaEncoder extends MediaEncoder {
inputBuffer.length = readBytes;
inputBuffer.isEndOfStream = endOfStream;
mPendingOps.add(inputBuffer);
LOG.i("encoding thread - performing", mPendingOps.size(), "pending operations.");
while ((inputBuffer = mPendingOps.peek()) != null) {
if (endOfStream) {
@ -327,9 +375,11 @@ public class AudioMediaEncoder extends MediaEncoder {
}
}
// long executeEnd = System.nanoTime() / 1000000;
// mAvgExecuteDelay = ((mAvgExecuteDelay * mExecuteCount) + (executeEnd - executeStart)) / (++mExecuteCount);
// LOG.v("execute delay millis:", executeEnd - executeStart, "average:", mAvgExecuteDelay);
if (DEBUG_PERFORMANCE) {
long executeEnd = System.nanoTime() / 1000000;
mAvgExecuteDelay = ((mAvgExecuteDelay * mExecuteCount) + (executeEnd - executeStart)) / (++mExecuteCount);
LOG.v("execute delay millis:", executeEnd - executeStart, "average:", mAvgExecuteDelay);
}
}
private void performPendingOp(InputBuffer buffer) {
@ -348,8 +398,12 @@ public class AudioMediaEncoder extends MediaEncoder {
drainOutput(eos);
if (eos) {
// Not sure we want this: WorkerHandler.get("AudioEncodingHandler").getThread().interrupt();
// LOG.e("EXECUTE DELAY MILLIS:", mAvgExecuteDelay);
// LOG.e("SEND DELAY MILLIS:", mAvgSendDelay);
if (DEBUG_PERFORMANCE) {
// With MONO, the count is about 370. With STEREO, the count is about 220.
// This reflects the count difference that we see in engine.write (MONO:200 STEREO:100).
LOG.e("EXECUTE DELAY MILLIS:", mAvgExecuteDelay, "COUNT:", mExecuteCount);
LOG.e("SEND DELAY MILLIS:", mAvgSendDelay, "COUNT:", mSendCount);
}
}
}
}

@ -1,18 +1,30 @@
package com.otaliastudios.cameraview.video.encoding;
import android.util.Log;
/**
* Computes timestamps for audio frames.
* Video frames do not need this since the timestamp comes from
* the surface texture.
*
* This is independent from the channels count, as long as the read bytes include
* all channels and the byte rate accounts for this as well.
* If channels is 2, both values will be doubled and we behave the same.
*/
class AudioTimestamp {
@SuppressWarnings("WeakerAccess")
static long bytesToUs(long bytes, int byteRate) {
return (1000000L * bytes) / byteRate;
}
static long bytesToMillis(long bytes, int byteRate) {
return (1000L * bytes) / byteRate;
}
private long mBaseTimeUs;
private long mBytesSinceBaseTime;
private long mGapUs;
/**
* This method accounts for the current time and proved to be the most reliable among
@ -44,10 +56,24 @@ class AudioTimestamp {
if (correctionUs >= 2L * bufferDurationUs) {
mBaseTimeUs = bufferStartTimeUs;
mBytesSinceBaseTime = readBytes;
mGapUs = correctionUs;
return mBaseTimeUs;
} else {
mGapUs = 0;
mBytesSinceBaseTime += readBytes;
return correctedTimeUs;
}
}
// This is guaranteed to be > 1 (actually > 2, since 2 is the constant we
// use in the correction check).
int getGapCount(int frameBytes, int byteRate) {
if (mGapUs == 0) return 0;
long durationUs = bytesToUs((long) frameBytes, byteRate);
return (int) (mGapUs / durationUs);
}
long getGapStartUs(long lastTimeUs) {
return lastTimeUs - mGapUs;
}
}

@ -1,8 +1,10 @@
package com.otaliastudios.cameraview.video.encoding;
import android.annotation.SuppressLint;
import android.media.MediaFormat;
import android.media.MediaMuxer;
import android.os.Build;
import android.text.format.DateFormat;
import com.otaliastudios.cameraview.CameraLogger;
@ -13,6 +15,10 @@ import androidx.annotation.RequiresApi;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The entry point for encoding video files.
@ -73,13 +79,14 @@ public class MediaEncoderEngine {
private final static String TAG = MediaEncoderEngine.class.getSimpleName();
private final static CameraLogger LOG = CameraLogger.create(TAG);
private static final boolean DEBUG_PERFORMANCE = true;
@SuppressWarnings("WeakerAccess")
public final static int END_BY_USER = 0;
public final static int END_BY_MAX_DURATION = 1;
public final static int END_BY_MAX_SIZE = 2;
private ArrayList<MediaEncoder> mEncoders;
private List<MediaEncoder> mEncoders;
private MediaMuxer mMediaMuxer;
private int mStartedEncodersCount;
private int mReleasedEncodersCount;
@ -286,6 +293,9 @@ public class MediaEncoderEngine {
}
}
@SuppressLint("UseSparseArrays")
private Map<Integer, Integer> mDebugCount = new HashMap<>();
/**
* Writes the given data to the muxer. Should be called after {@link #isStarted()}
* returns true. Note: this seems to be thread safe, no lock.
@ -294,7 +304,24 @@ public class MediaEncoderEngine {
if (!mMediaMuxerStarted) {
throw new IllegalStateException("Trying to write before muxer started");
}
LOG.v("write:", "Writing OutputBuffer - track:", buffer.trackIndex, "presentation:", buffer.info.presentationTimeUs);
if (DEBUG_PERFORMANCE) {
// When AUDIO = mono, this is called about twice the time. (200 vs 100 for 5 sec).
Integer count = mDebugCount.get(buffer.trackIndex);
mDebugCount.put(buffer.trackIndex, count == null ? 1 : ++count);
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(buffer.info.presentationTimeUs / 1000);
LOG.v("write:", "Writing into muxer -",
"track:", buffer.trackIndex,
"presentation:", buffer.info.presentationTimeUs,
"readable:", calendar.get(Calendar.SECOND) + ":" + calendar.get(Calendar.MILLISECOND),
"count:", count);
} else {
LOG.v("write:", "Writing into muxer -",
"track:", buffer.trackIndex,
"presentation:", buffer.info.presentationTimeUs);
}
mMediaMuxer.writeSampleData(buffer.trackIndex, buffer.data, buffer.info);
pool.recycle(buffer);
}

Loading…
Cancel
Save