From 3f876044a4657bb14c51c83a7099b03e7be5df57 Mon Sep 17 00:00:00 2001 From: Mattia Iavarone Date: Tue, 16 Jul 2019 14:20:15 -0300 Subject: [PATCH] Improve comments --- .../video/encoding/AudioMediaEncoder.java | 15 ++++++-- .../video/encoding/AudioTimestamp.java | 36 ++++++++++++++----- 2 files changed, 40 insertions(+), 11 deletions(-) diff --git a/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioMediaEncoder.java b/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioMediaEncoder.java index 7cd2f16b..7619a3b4 100644 --- a/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioMediaEncoder.java +++ b/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioMediaEncoder.java @@ -103,7 +103,7 @@ public class AudioMediaEncoder extends MediaEncoder { public AudioMediaEncoder(@NonNull Config config) { super("AudioEncoder"); mConfig = config.copy(); - mTimestamp = new AudioTimestamp(); + mTimestamp = new AudioTimestamp(mConfig.byteRate()); // These two were in onPrepare() but it's better to do warm-up here // since thread and looper creation is expensive. mEncoder = new AudioEncodingHandler(); @@ -262,19 +262,28 @@ public class AudioMediaEncoder extends MediaEncoder { * @param endOfStream end of stream? */ private void increaseTime(int readBytes, boolean endOfStream) { - mLastTimeUs = mTimestamp.increaseUs(readBytes, mConfig.byteRate()); + // Get the latest frame timestamp. + mLastTimeUs = mTimestamp.increaseUs(readBytes); if (mFirstTimeUs == Long.MIN_VALUE) { mFirstTimeUs = mLastTimeUs; // Compute the first frame milliseconds as well. notifyFirstFrameMillis(System.currentTimeMillis() - AudioTimestamp.bytesToMillis(readBytes, mConfig.byteRate())); } + + // See if we reached the max length value. boolean didReachMaxLength = (mLastTimeUs - mFirstTimeUs) > getMaxLengthMillis() * 1000L; if (didReachMaxLength && !endOfStream) { LOG.w("read thread - this frame reached the maxLength! deltaUs:", mLastTimeUs - mFirstTimeUs); notifyMaxLengthReached(); } - int gaps = mTimestamp.getGapCount(mConfig.frameSize(), mConfig.byteRate()); + + // Add zeroes if we have huge gaps. Even if timestamps are correct, if we have gaps between + // them, the encoder might shrink all timestamps to have a continuous audio. This results + // in a video that is fast-forwarded. + // Adding zeroes does not solve the gaps issue - audio will still be distorted. But at + // least we get a video that has the correct playback speed. + int gaps = mTimestamp.getGapCount(mConfig.frameSize()); if (gaps > 0) { long gapStart = mTimestamp.getGapStartUs(mLastTimeUs); long frameUs = AudioTimestamp.bytesToUs(mConfig.frameSize(), mConfig.byteRate()); diff --git a/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioTimestamp.java b/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioTimestamp.java index e25917e1..8a4039c6 100644 --- a/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioTimestamp.java +++ b/cameraview/src/main/java/com/otaliastudios/cameraview/video/encoding/AudioTimestamp.java @@ -10,10 +10,12 @@ import android.util.Log; * This is independent from the channels count, as long as the read bytes include * all channels and the byte rate accounts for this as well. * If channels is 2, both values will be doubled and we behave the same. + * + * This class keeps track of gaps between frames. + * This can be used, for example, to write zeros instead of nothing. */ class AudioTimestamp { - @SuppressWarnings("WeakerAccess") static long bytesToUs(long bytes, int byteRate) { return (1000000L * bytes) / byteRate; } @@ -22,10 +24,15 @@ class AudioTimestamp { return (1000L * bytes) / byteRate; } + private int mByteRate; private long mBaseTimeUs; private long mBytesSinceBaseTime; private long mGapUs; + AudioTimestamp(int byteRate) { + mByteRate = byteRate; + } + /** * This method accounts for the current time and proved to be the most reliable among * the ones tested. @@ -37,8 +44,8 @@ class AudioTimestamp { * Returns timestamps in the {@link System#nanoTime()} reference. */ @SuppressWarnings("SameParameterValue") - long increaseUs(int readBytes, int byteRate) { - long bufferDurationUs = bytesToUs((long) readBytes, byteRate); + long increaseUs(int readBytes) { + long bufferDurationUs = bytesToUs((long) readBytes, mByteRate); long bufferEndTimeUs = System.nanoTime() / 1000; // now long bufferStartTimeUs = bufferEndTimeUs - bufferDurationUs; @@ -48,7 +55,7 @@ class AudioTimestamp { // Recompute time assuming that we are respecting the sampling frequency. // This puts the time at the end of last read buffer, which means, where we // should be if we had no delay / missed buffers. - long correctedTimeUs = mBaseTimeUs + bytesToUs(mBytesSinceBaseTime, byteRate); + long correctedTimeUs = mBaseTimeUs + bytesToUs(mBytesSinceBaseTime, mByteRate); long correctionUs = bufferStartTimeUs - correctedTimeUs; // However, if the correction is too big (> 2*bufferDurationUs), reset to this point. @@ -65,14 +72,27 @@ class AudioTimestamp { } } - // This is guaranteed to be > 1 (actually > 2, since 2 is the constant we - // use in the correction check). - int getGapCount(int frameBytes, int byteRate) { + /** + * Returns the number of gaps (meaning, missing frames) assuming that each + * frame has frameBytes size. Possibly 0. + * + * @param frameBytes size of standard frame + * @return number of gaps + */ + int getGapCount(int frameBytes) { if (mGapUs == 0) return 0; - long durationUs = bytesToUs((long) frameBytes, byteRate); + long durationUs = bytesToUs((long) frameBytes, mByteRate); return (int) (mGapUs / durationUs); } + /** + * Returns the timestamp of the first missing frame. + * Should be called only after {@link #getGapCount(int)} returns something + * greater than zero. + * + * @param lastTimeUs the last real frame timestamp + * @return the first missing frame timestamp + */ long getGapStartUs(long lastTimeUs) { return lastTimeUs - mGapUs; }