Commit e6ce6764 by andrewlewis Committed by Oliver Woodman

Clean up DefaultAudioSink

This is a first step towards factoring out position tracking functionality.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=189027731
parent 0b182d18
...@@ -55,8 +55,12 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -55,8 +55,12 @@ public final class DefaultAudioSink implements AudioSink {
*/ */
public static final class InvalidAudioTrackTimestampException extends RuntimeException { public static final class InvalidAudioTrackTimestampException extends RuntimeException {
/** @param message The detail message for this exception. */ /**
public InvalidAudioTrackTimestampException(String message) { * Creates a new invalid timestamp exception with the specified message.
*
* @param message The detail message for this exception.
*/
private InvalidAudioTrackTimestampException(String message) {
super(message); super(message);
} }
...@@ -188,8 +192,8 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -188,8 +192,8 @@ public final class DefaultAudioSink implements AudioSink {
private boolean isInputPcm; private boolean isInputPcm;
private boolean shouldConvertHighResIntPcmToFloat; private boolean shouldConvertHighResIntPcmToFloat;
private int inputSampleRate; private int inputSampleRate;
private int sampleRate; private int outputSampleRate;
private int channelConfig; private int outputChannelConfig;
private @C.Encoding int outputEncoding; private @C.Encoding int outputEncoding;
private AudioAttributes audioAttributes; private AudioAttributes audioAttributes;
private boolean processingEnabled; private boolean processingEnabled;
...@@ -271,8 +275,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -271,8 +275,7 @@ public final class DefaultAudioSink implements AudioSink {
releasingConditionVariable = new ConditionVariable(true); releasingConditionVariable = new ConditionVariable(true);
if (Util.SDK_INT >= 18) { if (Util.SDK_INT >= 18) {
try { try {
getLatencyMethod = getLatencyMethod = AudioTrack.class.getMethod("getLatency", (Class<?>[]) null);
AudioTrack.class.getMethod("getLatency", (Class<?>[]) null);
} catch (NoSuchMethodException e) { } catch (NoSuchMethodException e) {
// There's no guarantee this method exists. Do nothing. // There's no guarantee this method exists. Do nothing.
} }
...@@ -305,6 +308,8 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -305,6 +308,8 @@ public final class DefaultAudioSink implements AudioSink {
playbackParametersCheckpoints = new ArrayDeque<>(); playbackParametersCheckpoints = new ArrayDeque<>();
} }
// AudioSink implementation.
@Override @Override
public void setListener(Listener listener) { public void setListener(Listener listener) {
this.listener = listener; this.listener = listener;
...@@ -312,7 +317,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -312,7 +317,7 @@ public final class DefaultAudioSink implements AudioSink {
@Override @Override
public boolean isEncodingSupported(@C.Encoding int encoding) { public boolean isEncodingSupported(@C.Encoding int encoding) {
if (isEncodingPcm(encoding)) { if (Util.isEncodingPcm(encoding)) {
// AudioTrack supports 16-bit integer PCM output in all platform API versions, and float // AudioTrack supports 16-bit integer PCM output in all platform API versions, and float
// output from platform API version 21 only. Other integer PCM encodings are resampled by this // output from platform API version 21 only. Other integer PCM encodings are resampled by this
// sink to 16-bit PCM. // sink to 16-bit PCM.
...@@ -338,9 +343,10 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -338,9 +343,10 @@ public final class DefaultAudioSink implements AudioSink {
long positionUs; long positionUs;
if (audioTimestampSet) { if (audioTimestampSet) {
// Calculate the speed-adjusted position using the timestamp (which may be in the future). // Calculate the speed-adjusted position using the timestamp (which may be in the future).
long elapsedSinceTimestampUs = systemClockUs - (audioTrackUtil.getTimestampNanoTime() / 1000); long elapsedSinceTimestampUs = systemClockUs - audioTrackUtil.getTimestampSystemTimeUs();
long elapsedSinceTimestampFrames = durationUsToFrames(elapsedSinceTimestampUs); long elapsedSinceTimestampFrames = durationUsToFrames(elapsedSinceTimestampUs);
long elapsedFrames = audioTrackUtil.getTimestampFramePosition() + elapsedSinceTimestampFrames; long elapsedFrames =
audioTrackUtil.getTimestampPositionFrames() + elapsedSinceTimestampFrames;
positionUs = framesToDurationUs(elapsedFrames); positionUs = framesToDurationUs(elapsedFrames);
} else { } else {
if (playheadOffsetCount == 0) { if (playheadOffsetCount == 0) {
...@@ -369,7 +375,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -369,7 +375,7 @@ public final class DefaultAudioSink implements AudioSink {
this.inputSampleRate = inputSampleRate; this.inputSampleRate = inputSampleRate;
int channelCount = inputChannelCount; int channelCount = inputChannelCount;
int sampleRate = inputSampleRate; int sampleRate = inputSampleRate;
isInputPcm = isEncodingPcm(inputEncoding); isInputPcm = Util.isEncodingPcm(inputEncoding);
shouldConvertHighResIntPcmToFloat = shouldConvertHighResIntPcmToFloat =
enableConvertHighResIntPcmToFloat enableConvertHighResIntPcmToFloat
&& isEncodingSupported(C.ENCODING_PCM_32BIT) && isEncodingSupported(C.ENCODING_PCM_32BIT)
...@@ -448,8 +454,11 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -448,8 +454,11 @@ public final class DefaultAudioSink implements AudioSink {
channelConfig = AudioFormat.CHANNEL_OUT_STEREO; channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
} }
if (!flush && isInitialized() && outputEncoding == encoding && this.sampleRate == sampleRate if (!flush
&& this.channelConfig == channelConfig) { && isInitialized()
&& outputEncoding == encoding
&& outputSampleRate == sampleRate
&& outputChannelConfig == channelConfig) {
// We already have an audio track with the correct sample rate, channel config and encoding. // We already have an audio track with the correct sample rate, channel config and encoding.
return; return;
} }
...@@ -457,12 +466,11 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -457,12 +466,11 @@ public final class DefaultAudioSink implements AudioSink {
reset(); reset();
this.processingEnabled = processingEnabled; this.processingEnabled = processingEnabled;
this.sampleRate = sampleRate; outputSampleRate = sampleRate;
this.channelConfig = channelConfig; outputChannelConfig = channelConfig;
outputEncoding = encoding; outputEncoding = encoding;
if (isInputPcm) { outputPcmFrameSize =
outputPcmFrameSize = Util.getPcmFrameSize(outputEncoding, channelCount); isInputPcm ? Util.getPcmFrameSize(outputEncoding, channelCount) : C.LENGTH_UNSET;
}
if (specifiedBufferSize != 0) { if (specifiedBufferSize != 0) {
bufferSize = specifiedBufferSize; bufferSize = specifiedBufferSize;
} else if (isInputPcm) { } else if (isInputPcm) {
...@@ -550,6 +558,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -550,6 +558,7 @@ public final class DefaultAudioSink implements AudioSink {
audioTrackUtil.reconfigure(audioTrack, needsPassthroughWorkarounds()); audioTrackUtil.reconfigure(audioTrack, needsPassthroughWorkarounds());
setVolumeInternal(); setVolumeInternal();
hasData = false; hasData = false;
latencyUs = 0;
} }
@Override @Override
...@@ -967,7 +976,6 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -967,7 +976,6 @@ public final class DefaultAudioSink implements AudioSink {
avSyncHeader = null; avSyncHeader = null;
bytesUntilNextAvSync = 0; bytesUntilNextAvSync = 0;
startMediaTimeState = START_NOT_SET; startMediaTimeState = START_NOT_SET;
latencyUs = 0;
resetSyncParams(); resetSyncParams();
if (audioTrack.getPlayState() == PLAYSTATE_PLAYING) { if (audioTrack.getPlayState() == PLAYSTATE_PLAYING) {
audioTrack.pause(); audioTrack.pause();
...@@ -1067,15 +1075,15 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1067,15 +1075,15 @@ public final class DefaultAudioSink implements AudioSink {
// The AudioTrack hasn't output anything yet. // The AudioTrack hasn't output anything yet.
return; return;
} }
long systemClockUs = System.nanoTime() / 1000; long systemTimeUs = System.nanoTime() / 1000;
if (systemClockUs - lastPlayheadSampleTimeUs >= MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US) { if (systemTimeUs - lastPlayheadSampleTimeUs >= MIN_PLAYHEAD_OFFSET_SAMPLE_INTERVAL_US) {
// Take a new sample and update the smoothed offset between the system clock and the playhead. // Take a new sample and update the smoothed offset between the system clock and the playhead.
playheadOffsets[nextPlayheadOffsetIndex] = playbackPositionUs - systemClockUs; playheadOffsets[nextPlayheadOffsetIndex] = playbackPositionUs - systemTimeUs;
nextPlayheadOffsetIndex = (nextPlayheadOffsetIndex + 1) % MAX_PLAYHEAD_OFFSET_COUNT; nextPlayheadOffsetIndex = (nextPlayheadOffsetIndex + 1) % MAX_PLAYHEAD_OFFSET_COUNT;
if (playheadOffsetCount < MAX_PLAYHEAD_OFFSET_COUNT) { if (playheadOffsetCount < MAX_PLAYHEAD_OFFSET_COUNT) {
playheadOffsetCount++; playheadOffsetCount++;
} }
lastPlayheadSampleTimeUs = systemClockUs; lastPlayheadSampleTimeUs = systemTimeUs;
smoothedPlayheadOffsetUs = 0; smoothedPlayheadOffsetUs = 0;
for (int i = 0; i < playheadOffsetCount; i++) { for (int i = 0; i < playheadOffsetCount; i++) {
smoothedPlayheadOffsetUs += playheadOffsets[i] / playheadOffsetCount; smoothedPlayheadOffsetUs += playheadOffsets[i] / playheadOffsetCount;
...@@ -1088,31 +1096,52 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1088,31 +1096,52 @@ public final class DefaultAudioSink implements AudioSink {
return; return;
} }
if (systemClockUs - lastTimestampSampleTimeUs >= MIN_TIMESTAMP_SAMPLE_INTERVAL_US) { if (systemTimeUs - lastTimestampSampleTimeUs >= MIN_TIMESTAMP_SAMPLE_INTERVAL_US) {
audioTimestampSet = audioTrackUtil.updateTimestamp(); audioTimestampSet = audioTrackUtil.updateTimestamp();
if (audioTimestampSet) { if (audioTimestampSet) {
// Perform sanity checks on the timestamp. // Perform sanity checks on the timestamp.
long audioTimestampUs = audioTrackUtil.getTimestampNanoTime() / 1000; long audioTimestampSystemTimeUs = audioTrackUtil.getTimestampSystemTimeUs();
long audioTimestampFramePosition = audioTrackUtil.getTimestampFramePosition(); long audioTimestampPositionFrames = audioTrackUtil.getTimestampPositionFrames();
if (audioTimestampUs < resumeSystemTimeUs) { if (audioTimestampSystemTimeUs < resumeSystemTimeUs) {
// The timestamp corresponds to a time before the track was most recently resumed. // The timestamp corresponds to a time before the track was most recently resumed.
audioTimestampSet = false; audioTimestampSet = false;
} else if (Math.abs(audioTimestampUs - systemClockUs) > MAX_AUDIO_TIMESTAMP_OFFSET_US) { } else if (Math.abs(audioTimestampSystemTimeUs - systemTimeUs)
> MAX_AUDIO_TIMESTAMP_OFFSET_US) {
// The timestamp time base is probably wrong. // The timestamp time base is probably wrong.
String message = "Spurious audio timestamp (system clock mismatch): " String message =
+ audioTimestampFramePosition + ", " + audioTimestampUs + ", " + systemClockUs + ", " "Spurious audio timestamp (system clock mismatch): "
+ playbackPositionUs + ", " + getSubmittedFrames() + ", " + getWrittenFrames(); + audioTimestampPositionFrames
+ ", "
+ audioTimestampSystemTimeUs
+ ", "
+ systemTimeUs
+ ", "
+ playbackPositionUs
+ ", "
+ getSubmittedFrames()
+ ", "
+ getWrittenFrames();
if (failOnSpuriousAudioTimestamp) { if (failOnSpuriousAudioTimestamp) {
throw new InvalidAudioTrackTimestampException(message); throw new InvalidAudioTrackTimestampException(message);
} }
Log.w(TAG, message); Log.w(TAG, message);
audioTimestampSet = false; audioTimestampSet = false;
} else if (Math.abs(framesToDurationUs(audioTimestampFramePosition) - playbackPositionUs) } else if (Math.abs(framesToDurationUs(audioTimestampPositionFrames) - playbackPositionUs)
> MAX_AUDIO_TIMESTAMP_OFFSET_US) { > MAX_AUDIO_TIMESTAMP_OFFSET_US) {
// The timestamp frame position is probably wrong. // The timestamp frame position is probably wrong.
String message = "Spurious audio timestamp (frame position mismatch): " String message =
+ audioTimestampFramePosition + ", " + audioTimestampUs + ", " + systemClockUs + ", " "Spurious audio timestamp (frame position mismatch): "
+ playbackPositionUs + ", " + getSubmittedFrames() + ", " + getWrittenFrames(); + audioTimestampPositionFrames
+ ", "
+ audioTimestampSystemTimeUs
+ ", "
+ systemTimeUs
+ ", "
+ playbackPositionUs
+ ", "
+ getSubmittedFrames()
+ ", "
+ getWrittenFrames();
if (failOnSpuriousAudioTimestamp) { if (failOnSpuriousAudioTimestamp) {
throw new InvalidAudioTrackTimestampException(message); throw new InvalidAudioTrackTimestampException(message);
} }
...@@ -1138,7 +1167,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1138,7 +1167,7 @@ public final class DefaultAudioSink implements AudioSink {
getLatencyMethod = null; getLatencyMethod = null;
} }
} }
lastTimestampSampleTimeUs = systemClockUs; lastTimestampSampleTimeUs = systemTimeUs;
} }
} }
...@@ -1151,11 +1180,11 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1151,11 +1180,11 @@ public final class DefaultAudioSink implements AudioSink {
} }
private long framesToDurationUs(long frameCount) { private long framesToDurationUs(long frameCount) {
return (frameCount * C.MICROS_PER_SECOND) / sampleRate; return (frameCount * C.MICROS_PER_SECOND) / outputSampleRate;
} }
private long durationUsToFrames(long durationUs) { private long durationUsToFrames(long durationUs) {
return (durationUs * sampleRate) / C.MICROS_PER_SECOND; return (durationUs * outputSampleRate) / C.MICROS_PER_SECOND;
} }
private long getSubmittedFrames() { private long getSubmittedFrames() {
...@@ -1203,12 +1232,25 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1203,12 +1232,25 @@ public final class DefaultAudioSink implements AudioSink {
} else { } else {
int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage); int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage);
if (audioSessionId == C.AUDIO_SESSION_ID_UNSET) { if (audioSessionId == C.AUDIO_SESSION_ID_UNSET) {
audioTrack = new AudioTrack(streamType, sampleRate, channelConfig, outputEncoding, audioTrack =
bufferSize, MODE_STREAM); new AudioTrack(
streamType,
outputSampleRate,
outputChannelConfig,
outputEncoding,
bufferSize,
MODE_STREAM);
} else { } else {
// Re-attach to the same audio session. // Re-attach to the same audio session.
audioTrack = new AudioTrack(streamType, sampleRate, channelConfig, outputEncoding, audioTrack =
bufferSize, MODE_STREAM, audioSessionId); new AudioTrack(
streamType,
outputSampleRate,
outputChannelConfig,
outputEncoding,
bufferSize,
MODE_STREAM,
audioSessionId);
} }
} }
...@@ -1220,7 +1262,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1220,7 +1262,7 @@ public final class DefaultAudioSink implements AudioSink {
// The track has already failed to initialize, so it wouldn't be that surprising if release // The track has already failed to initialize, so it wouldn't be that surprising if release
// were to fail too. Swallow the exception. // were to fail too. Swallow the exception.
} }
throw new InitializationException(state, sampleRate, channelConfig, bufferSize); throw new InitializationException(state, outputSampleRate, outputChannelConfig, bufferSize);
} }
return audioTrack; return audioTrack;
} }
...@@ -1237,11 +1279,12 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1237,11 +1279,12 @@ public final class DefaultAudioSink implements AudioSink {
} else { } else {
attributes = audioAttributes.getAudioAttributesV21(); attributes = audioAttributes.getAudioAttributesV21();
} }
AudioFormat format = new AudioFormat.Builder() AudioFormat format =
.setChannelMask(channelConfig) new AudioFormat.Builder()
.setEncoding(outputEncoding) .setChannelMask(outputChannelConfig)
.setSampleRate(sampleRate) .setEncoding(outputEncoding)
.build(); .setSampleRate(outputSampleRate)
.build();
int audioSessionId = this.audioSessionId != C.AUDIO_SESSION_ID_UNSET ? this.audioSessionId int audioSessionId = this.audioSessionId != C.AUDIO_SESSION_ID_UNSET ? this.audioSessionId
: AudioManager.AUDIO_SESSION_ID_GENERATE; : AudioManager.AUDIO_SESSION_ID_GENERATE;
return new AudioTrack(attributes, format, bufferSize, MODE_STREAM, audioSessionId); return new AudioTrack(attributes, format, bufferSize, MODE_STREAM, audioSessionId);
...@@ -1262,12 +1305,6 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1262,12 +1305,6 @@ public final class DefaultAudioSink implements AudioSink {
: toIntPcmAvailableAudioProcessors; : toIntPcmAvailableAudioProcessors;
} }
private static boolean isEncodingPcm(@C.Encoding int encoding) {
return encoding == C.ENCODING_PCM_8BIT || encoding == C.ENCODING_PCM_16BIT
|| encoding == C.ENCODING_PCM_24BIT || encoding == C.ENCODING_PCM_32BIT
|| encoding == C.ENCODING_PCM_FLOAT;
}
private static int getFramesPerEncodedSample(@C.Encoding int encoding, ByteBuffer buffer) { private static int getFramesPerEncodedSample(@C.Encoding int encoding, ByteBuffer buffer) {
if (encoding == C.ENCODING_DTS || encoding == C.ENCODING_DTS_HD) { if (encoding == C.ENCODING_DTS || encoding == C.ENCODING_DTS_HD) {
return DtsUtil.parseDtsAudioSampleCount(buffer); return DtsUtil.parseDtsAudioSampleCount(buffer);
...@@ -1480,8 +1517,8 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1480,8 +1517,8 @@ public final class DefaultAudioSink implements AudioSink {
} }
/** /**
* Updates the values returned by {@link #getTimestampNanoTime()} and * Updates the values returned by {@link #getTimestampSystemTimeUs()} and {@link
* {@link #getTimestampFramePosition()}. * #getTimestampPositionFrames()}.
* *
* @return Whether the timestamp values were updated. * @return Whether the timestamp values were updated.
*/ */
...@@ -1490,31 +1527,31 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1490,31 +1527,31 @@ public final class DefaultAudioSink implements AudioSink {
} }
/** /**
* Returns the {@link android.media.AudioTimestamp#nanoTime} obtained during the most recent * Returns the system time in microseconds of the last {@link AudioTimestamp}, obtained during
* call to {@link #updateTimestamp()} that returned true. * the most recent call to {@link #updateTimestamp()} that returned true.
* *
* @return The nanoTime obtained during the most recent call to {@link #updateTimestamp()} that * @return The system time in microseconds of the last {@link AudioTimestamp}, obtained during
* returned true. * the most recent call to {@link #updateTimestamp()} that returned true.
* @throws UnsupportedOperationException If the implementation does not support audio timestamp * @throws UnsupportedOperationException If the implementation does not support audio timestamp
* queries. {@link #updateTimestamp()} will always return false in this case. * queries. {@link #updateTimestamp()} will always return false in this case.
*/ */
public long getTimestampNanoTime() { public long getTimestampSystemTimeUs() {
// Should never be called if updateTimestamp() returned false. // Should never be called if updateTimestamp() returned false.
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
/** /**
* Returns the {@link android.media.AudioTimestamp#framePosition} obtained during the most * Returns the position in frames of the last {@link AudioTimestamp}, obtained during the most
* recent call to {@link #updateTimestamp()} that returned true. The value is adjusted so that * recent call to {@link #updateTimestamp()} that returned true. The value is adjusted so that
* wrap around only occurs if the value exceeds {@link Long#MAX_VALUE} (which in practice will * wrap around only occurs if the value exceeds {@link Long#MAX_VALUE} (which in practice will
* never happen). * never happen).
* *
* @return The framePosition obtained during the most recent call to {@link #updateTimestamp()} * @return The position in frames of the last {@link AudioTimestamp}, obtained during the most
* that returned true. * recent call to {@link #updateTimestamp()} that returned true.
* @throws UnsupportedOperationException If the implementation does not support audio timestamp * @throws UnsupportedOperationException If the implementation does not support audio timestamp
* queries. {@link #updateTimestamp()} will always return false in this case. * queries. {@link #updateTimestamp()} will always return false in this case.
*/ */
public long getTimestampFramePosition() { public long getTimestampPositionFrames() {
// Should never be called if updateTimestamp() returned false. // Should never be called if updateTimestamp() returned false.
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
...@@ -1526,9 +1563,9 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1526,9 +1563,9 @@ public final class DefaultAudioSink implements AudioSink {
private final AudioTimestamp audioTimestamp; private final AudioTimestamp audioTimestamp;
private long rawTimestampFramePositionWrapCount; private long rawTimestampPositionFramesWrapCount;
private long lastRawTimestampFramePosition; private long lastRawTimestampPositionFrames;
private long lastTimestampFramePosition; private long lastTimestampPositionFrames;
public AudioTrackUtilV19() { public AudioTrackUtilV19() {
audioTimestamp = new AudioTimestamp(); audioTimestamp = new AudioTimestamp();
...@@ -1537,34 +1574,35 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1537,34 +1574,35 @@ public final class DefaultAudioSink implements AudioSink {
@Override @Override
public void reconfigure(AudioTrack audioTrack, boolean needsPassthroughWorkaround) { public void reconfigure(AudioTrack audioTrack, boolean needsPassthroughWorkaround) {
super.reconfigure(audioTrack, needsPassthroughWorkaround); super.reconfigure(audioTrack, needsPassthroughWorkaround);
rawTimestampFramePositionWrapCount = 0; rawTimestampPositionFramesWrapCount = 0;
lastRawTimestampFramePosition = 0; lastRawTimestampPositionFrames = 0;
lastTimestampFramePosition = 0; lastTimestampPositionFrames = 0;
} }
@Override @Override
public boolean updateTimestamp() { public boolean updateTimestamp() {
boolean updated = audioTrack.getTimestamp(audioTimestamp); boolean updated = audioTrack.getTimestamp(audioTimestamp);
if (updated) { if (updated) {
long rawFramePosition = audioTimestamp.framePosition; long rawPositionFrames = audioTimestamp.framePosition;
if (lastRawTimestampFramePosition > rawFramePosition) { if (lastRawTimestampPositionFrames > rawPositionFrames) {
// The value must have wrapped around. // The value must have wrapped around.
rawTimestampFramePositionWrapCount++; rawTimestampPositionFramesWrapCount++;
} }
lastRawTimestampFramePosition = rawFramePosition; lastRawTimestampPositionFrames = rawPositionFrames;
lastTimestampFramePosition = rawFramePosition + (rawTimestampFramePositionWrapCount << 32); lastTimestampPositionFrames =
rawPositionFrames + (rawTimestampPositionFramesWrapCount << 32);
} }
return updated; return updated;
} }
@Override @Override
public long getTimestampNanoTime() { public long getTimestampSystemTimeUs() {
return audioTimestamp.nanoTime; return audioTimestamp.nanoTime / 1000;
} }
@Override @Override
public long getTimestampFramePosition() { public long getTimestampPositionFrames() {
return lastTimestampFramePosition; return lastTimestampPositionFrames;
} }
} }
......
...@@ -942,6 +942,20 @@ public final class Util { ...@@ -942,6 +942,20 @@ public final class Util {
} }
/** /**
* Returns whether {@code encoding} is one of the PCM encodings.
*
* @param encoding The encoding of the audio data.
* @return Whether the encoding is one of the PCM encodings.
*/
public static boolean isEncodingPcm(@C.Encoding int encoding) {
return encoding == C.ENCODING_PCM_8BIT
|| encoding == C.ENCODING_PCM_16BIT
|| encoding == C.ENCODING_PCM_24BIT
|| encoding == C.ENCODING_PCM_32BIT
|| encoding == C.ENCODING_PCM_FLOAT;
}
/**
* Returns whether {@code encoding} is high resolution (&gt; 16-bit) integer PCM. * Returns whether {@code encoding} is high resolution (&gt; 16-bit) integer PCM.
* *
* @param encoding The encoding of the audio data. * @param encoding The encoding of the audio data.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment