Commit 3e6cf42f by andrewlewis Committed by Andrew Lewis

Factor out default audio sink configuration

This cleanup is in preparation for draining audio processors on reconfiguration
to a format that doesn't require a new AudioTrack to be created.

PiperOrigin-RevId: 233990983
parent 12ed18c7
...@@ -242,18 +242,10 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -242,18 +242,10 @@ public final class DefaultAudioSink implements AudioSink {
/** Used to keep the audio session active on pre-V21 builds (see {@link #initialize()}). */ /** Used to keep the audio session active on pre-V21 builds (see {@link #initialize()}). */
@Nullable private AudioTrack keepSessionIdAudioTrack; @Nullable private AudioTrack keepSessionIdAudioTrack;
private Configuration configuration;
private AudioTrack audioTrack; private AudioTrack audioTrack;
private boolean isInputPcm;
private boolean shouldConvertHighResIntPcmToFloat;
private int inputSampleRate;
private int outputSampleRate;
private int outputChannelConfig;
private @C.Encoding int outputEncoding;
private AudioAttributes audioAttributes;
private boolean processingEnabled;
private boolean canApplyPlaybackParameters;
private int bufferSize;
private AudioAttributes audioAttributes;
@Nullable private PlaybackParameters afterDrainPlaybackParameters; @Nullable private PlaybackParameters afterDrainPlaybackParameters;
private PlaybackParameters playbackParameters; private PlaybackParameters playbackParameters;
private long playbackParametersOffsetUs; private long playbackParametersOffsetUs;
...@@ -262,10 +254,8 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -262,10 +254,8 @@ public final class DefaultAudioSink implements AudioSink {
@Nullable private ByteBuffer avSyncHeader; @Nullable private ByteBuffer avSyncHeader;
private int bytesUntilNextAvSync; private int bytesUntilNextAvSync;
private int pcmFrameSize;
private long submittedPcmBytes; private long submittedPcmBytes;
private long submittedEncodedFrames; private long submittedEncodedFrames;
private int outputPcmFrameSize;
private long writtenPcmBytes; private long writtenPcmBytes;
private long writtenEncodedFrames; private long writtenEncodedFrames;
private int framesPerEncodedSample; private int framesPerEncodedSample;
...@@ -397,7 +387,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -397,7 +387,7 @@ public final class DefaultAudioSink implements AudioSink {
return CURRENT_POSITION_NOT_SET; return CURRENT_POSITION_NOT_SET;
} }
long positionUs = audioTrackPositionTracker.getCurrentPositionUs(sourceEnded); long positionUs = audioTrackPositionTracker.getCurrentPositionUs(sourceEnded);
positionUs = Math.min(positionUs, framesToDurationUs(getWrittenFrames())); positionUs = Math.min(positionUs, configuration.framesToDurationUs(getWrittenFrames()));
return startMediaTimeUs + applySkipping(applySpeedup(positionUs)); return startMediaTimeUs + applySkipping(applySpeedup(positionUs));
} }
...@@ -411,23 +401,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -411,23 +401,7 @@ public final class DefaultAudioSink implements AudioSink {
int trimStartFrames, int trimStartFrames,
int trimEndFrames) int trimEndFrames)
throws ConfigurationException { throws ConfigurationException {
boolean flush = false; if (Util.SDK_INT < 21 && inputChannelCount == 8 && outputChannels == null) {
this.inputSampleRate = inputSampleRate;
int channelCount = inputChannelCount;
int sampleRate = inputSampleRate;
isInputPcm = Util.isEncodingLinearPcm(inputEncoding);
shouldConvertHighResIntPcmToFloat =
enableConvertHighResIntPcmToFloat
&& supportsOutput(channelCount, C.ENCODING_PCM_FLOAT)
&& Util.isEncodingHighResolutionIntegerPcm(inputEncoding);
if (isInputPcm) {
pcmFrameSize = Util.getPcmFrameSize(inputEncoding, channelCount);
}
@C.Encoding int encoding = inputEncoding;
boolean processingEnabled = isInputPcm && inputEncoding != C.ENCODING_PCM_FLOAT;
canApplyPlaybackParameters = processingEnabled && !shouldConvertHighResIntPcmToFloat;
if (Util.SDK_INT < 21 && channelCount == 8 && outputChannels == null) {
// AudioTrack doesn't support 8 channel output before Android L. Discard the last two (side) // AudioTrack doesn't support 8 channel output before Android L. Discard the last two (side)
// channels to give a 6 channel stream that is supported. // channels to give a 6 channel stream that is supported.
outputChannels = new int[6]; outputChannels = new int[6];
...@@ -436,10 +410,24 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -436,10 +410,24 @@ public final class DefaultAudioSink implements AudioSink {
} }
} }
boolean isInputPcm = Util.isEncodingLinearPcm(inputEncoding);
boolean processingEnabled = isInputPcm && inputEncoding != C.ENCODING_PCM_FLOAT;
int sampleRate = inputSampleRate;
int channelCount = inputChannelCount;
@C.Encoding int encoding = inputEncoding;
boolean shouldConvertHighResIntPcmToFloat =
enableConvertHighResIntPcmToFloat
&& supportsOutput(inputChannelCount, C.ENCODING_PCM_FLOAT)
&& Util.isEncodingHighResolutionIntegerPcm(inputEncoding);
AudioProcessor[] availableAudioProcessors =
shouldConvertHighResIntPcmToFloat
? toFloatPcmAvailableAudioProcessors
: toIntPcmAvailableAudioProcessors;
boolean flush = false;
if (processingEnabled) { if (processingEnabled) {
trimmingAudioProcessor.setTrimFrameCount(trimStartFrames, trimEndFrames); trimmingAudioProcessor.setTrimFrameCount(trimStartFrames, trimEndFrames);
channelMappingAudioProcessor.setChannelMap(outputChannels); channelMappingAudioProcessor.setChannelMap(outputChannels);
for (AudioProcessor audioProcessor : getAvailableAudioProcessors()) { for (AudioProcessor audioProcessor : availableAudioProcessors) {
try { try {
flush |= audioProcessor.configure(sampleRate, channelCount, encoding); flush |= audioProcessor.configure(sampleRate, channelCount, encoding);
} catch (AudioProcessor.UnhandledFormatException e) { } catch (AudioProcessor.UnhandledFormatException e) {
...@@ -453,53 +441,39 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -453,53 +441,39 @@ public final class DefaultAudioSink implements AudioSink {
} }
} }
int channelConfig = getChannelConfig(channelCount, isInputPcm); int outputChannelConfig = getChannelConfig(channelCount, isInputPcm);
if (channelConfig == AudioFormat.CHANNEL_INVALID) { if (outputChannelConfig == AudioFormat.CHANNEL_INVALID) {
throw new ConfigurationException("Unsupported channel count: " + channelCount); throw new ConfigurationException("Unsupported channel count: " + channelCount);
} }
if (!flush int inputPcmFrameSize =
&& isInitialized() isInputPcm ? Util.getPcmFrameSize(inputEncoding, inputChannelCount) : C.LENGTH_UNSET;
&& outputEncoding == encoding int outputPcmFrameSize =
&& outputSampleRate == sampleRate isInputPcm ? Util.getPcmFrameSize(encoding, channelCount) : C.LENGTH_UNSET;
&& outputChannelConfig == channelConfig) { boolean canApplyPlaybackParameters = processingEnabled && !shouldConvertHighResIntPcmToFloat;
// We already have an audio track with the correct sample rate, channel config and encoding. Configuration pendingConfiguration =
return; new Configuration(
} isInputPcm,
inputPcmFrameSize,
inputSampleRate,
outputPcmFrameSize,
sampleRate,
outputChannelConfig,
encoding,
specifiedBufferSize,
processingEnabled,
canApplyPlaybackParameters,
availableAudioProcessors);
if (flush || configuration == null || !pendingConfiguration.canReuseAudioTrack(configuration)) {
flush(); flush();
this.processingEnabled = processingEnabled;
outputSampleRate = sampleRate;
outputChannelConfig = channelConfig;
outputEncoding = encoding;
outputPcmFrameSize =
isInputPcm ? Util.getPcmFrameSize(outputEncoding, channelCount) : C.LENGTH_UNSET;
bufferSize = specifiedBufferSize != 0 ? specifiedBufferSize : getDefaultBufferSize();
}
private int getDefaultBufferSize() {
if (isInputPcm) {
int minBufferSize =
AudioTrack.getMinBufferSize(outputSampleRate, outputChannelConfig, outputEncoding);
Assertions.checkState(minBufferSize != ERROR_BAD_VALUE);
int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR;
int minAppBufferSize = (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize;
int maxAppBufferSize = (int) Math.max(minBufferSize,
durationUsToFrames(MAX_BUFFER_DURATION_US) * outputPcmFrameSize);
return Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize);
} else {
int rate = getMaximumEncodedRateBytesPerSecond(outputEncoding);
if (outputEncoding == C.ENCODING_AC3) {
rate *= AC3_BUFFER_MULTIPLICATION_FACTOR;
}
return (int) (PASSTHROUGH_BUFFER_DURATION_US * rate / C.MICROS_PER_SECOND);
} }
configuration = pendingConfiguration;
} }
private void setupAudioProcessors() { private void setupAudioProcessors() {
AudioProcessor[] audioProcessors = configuration.availableAudioProcessors;
ArrayList<AudioProcessor> newAudioProcessors = new ArrayList<>(); ArrayList<AudioProcessor> newAudioProcessors = new ArrayList<>();
for (AudioProcessor audioProcessor : getAvailableAudioProcessors()) { for (AudioProcessor audioProcessor : audioProcessors) {
if (audioProcessor.isActive()) { if (audioProcessor.isActive()) {
newAudioProcessors.add(audioProcessor); newAudioProcessors.add(audioProcessor);
} else { } else {
...@@ -528,7 +502,9 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -528,7 +502,9 @@ public final class DefaultAudioSink implements AudioSink {
// initialization of the audio track to fail. // initialization of the audio track to fail.
releasingConditionVariable.block(); releasingConditionVariable.block();
audioTrack = initializeAudioTrack(); audioTrack =
Assertions.checkNotNull(configuration)
.buildAudioTrack(tunneling, audioAttributes, audioSessionId);
int audioSessionId = audioTrack.getAudioSessionId(); int audioSessionId = audioTrack.getAudioSessionId();
if (enablePreV21AudioSessionWorkaround) { if (enablePreV21AudioSessionWorkaround) {
if (Util.SDK_INT < 21) { if (Util.SDK_INT < 21) {
...@@ -551,13 +527,16 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -551,13 +527,16 @@ public final class DefaultAudioSink implements AudioSink {
} }
playbackParameters = playbackParameters =
canApplyPlaybackParameters configuration.canApplyPlaybackParameters
? audioProcessorChain.applyPlaybackParameters(playbackParameters) ? audioProcessorChain.applyPlaybackParameters(playbackParameters)
: PlaybackParameters.DEFAULT; : PlaybackParameters.DEFAULT;
setupAudioProcessors(); setupAudioProcessors();
audioTrackPositionTracker.setAudioTrack( audioTrackPositionTracker.setAudioTrack(
audioTrack, outputEncoding, outputPcmFrameSize, bufferSize); audioTrack,
configuration.outputEncoding,
configuration.outputPcmFrameSize,
configuration.bufferSize);
setVolumeInternal(); setVolumeInternal();
if (auxEffectInfo.effectId != AuxEffectInfo.NO_AUX_EFFECT_ID) { if (auxEffectInfo.effectId != AuxEffectInfo.NO_AUX_EFFECT_ID) {
...@@ -606,9 +585,9 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -606,9 +585,9 @@ public final class DefaultAudioSink implements AudioSink {
return true; return true;
} }
if (!isInputPcm && framesPerEncodedSample == 0) { if (!configuration.isInputPcm && framesPerEncodedSample == 0) {
// If this is the first encoded sample, calculate the sample size in frames. // If this is the first encoded sample, calculate the sample size in frames.
framesPerEncodedSample = getFramesPerEncodedSample(outputEncoding, buffer); framesPerEncodedSample = getFramesPerEncodedSample(configuration.outputEncoding, buffer);
if (framesPerEncodedSample == 0) { if (framesPerEncodedSample == 0) {
// We still don't know the number of frames per sample, so drop the buffer. // We still don't know the number of frames per sample, so drop the buffer.
// For TrueHD this can occur after some seek operations, as not every sample starts with // For TrueHD this can occur after some seek operations, as not every sample starts with
...@@ -631,7 +610,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -631,7 +610,7 @@ public final class DefaultAudioSink implements AudioSink {
new PlaybackParametersCheckpoint( new PlaybackParametersCheckpoint(
newPlaybackParameters, newPlaybackParameters,
Math.max(0, presentationTimeUs), Math.max(0, presentationTimeUs),
framesToDurationUs(getWrittenFrames()))); configuration.framesToDurationUs(getWrittenFrames())));
// Update the set of active audio processors to take into account the new parameters. // Update the set of active audio processors to take into account the new parameters.
setupAudioProcessors(); setupAudioProcessors();
} }
...@@ -643,7 +622,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -643,7 +622,7 @@ public final class DefaultAudioSink implements AudioSink {
// Sanity check that presentationTimeUs is consistent with the expected value. // Sanity check that presentationTimeUs is consistent with the expected value.
long expectedPresentationTimeUs = long expectedPresentationTimeUs =
startMediaTimeUs startMediaTimeUs
+ inputFramesToDurationUs( + configuration.inputFramesToDurationUs(
getSubmittedFrames() - trimmingAudioProcessor.getTrimmedFrameCount()); getSubmittedFrames() - trimmingAudioProcessor.getTrimmedFrameCount());
if (startMediaTimeState == START_IN_SYNC if (startMediaTimeState == START_IN_SYNC
&& Math.abs(expectedPresentationTimeUs - presentationTimeUs) > 200000) { && Math.abs(expectedPresentationTimeUs - presentationTimeUs) > 200000) {
...@@ -663,7 +642,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -663,7 +642,7 @@ public final class DefaultAudioSink implements AudioSink {
} }
} }
if (isInputPcm) { if (configuration.isInputPcm) {
submittedPcmBytes += buffer.remaining(); submittedPcmBytes += buffer.remaining();
} else { } else {
submittedEncodedFrames += framesPerEncodedSample; submittedEncodedFrames += framesPerEncodedSample;
...@@ -672,7 +651,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -672,7 +651,7 @@ public final class DefaultAudioSink implements AudioSink {
inputBuffer = buffer; inputBuffer = buffer;
} }
if (processingEnabled) { if (configuration.processingEnabled) {
processBuffers(presentationTimeUs); processBuffers(presentationTimeUs);
} else { } else {
writeBuffer(inputBuffer, presentationTimeUs); writeBuffer(inputBuffer, presentationTimeUs);
...@@ -769,11 +748,11 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -769,11 +748,11 @@ public final class DefaultAudioSink implements AudioSink {
throw new WriteException(bytesWritten); throw new WriteException(bytesWritten);
} }
if (isInputPcm) { if (configuration.isInputPcm) {
writtenPcmBytes += bytesWritten; writtenPcmBytes += bytesWritten;
} }
if (bytesWritten == bytesRemaining) { if (bytesWritten == bytesRemaining) {
if (!isInputPcm) { if (!configuration.isInputPcm) {
writtenEncodedFrames += framesPerEncodedSample; writtenEncodedFrames += framesPerEncodedSample;
} }
outputBuffer = null; outputBuffer = null;
...@@ -798,7 +777,8 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -798,7 +777,8 @@ public final class DefaultAudioSink implements AudioSink {
private boolean drainAudioProcessorsToEndOfStream() throws WriteException { private boolean drainAudioProcessorsToEndOfStream() throws WriteException {
boolean audioProcessorNeedsEndOfStream = false; boolean audioProcessorNeedsEndOfStream = false;
if (drainingAudioProcessorIndex == C.INDEX_UNSET) { if (drainingAudioProcessorIndex == C.INDEX_UNSET) {
drainingAudioProcessorIndex = processingEnabled ? 0 : activeAudioProcessors.length; drainingAudioProcessorIndex =
configuration.processingEnabled ? 0 : activeAudioProcessors.length;
audioProcessorNeedsEndOfStream = true; audioProcessorNeedsEndOfStream = true;
} }
while (drainingAudioProcessorIndex < activeAudioProcessors.length) { while (drainingAudioProcessorIndex < activeAudioProcessors.length) {
...@@ -837,7 +817,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -837,7 +817,7 @@ public final class DefaultAudioSink implements AudioSink {
@Override @Override
public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) { public PlaybackParameters setPlaybackParameters(PlaybackParameters playbackParameters) {
if (isInitialized() && !canApplyPlaybackParameters) { if (configuration != null && !configuration.canApplyPlaybackParameters) {
this.playbackParameters = PlaybackParameters.DEFAULT; this.playbackParameters = PlaybackParameters.DEFAULT;
return this.playbackParameters; return this.playbackParameters;
} }
...@@ -1060,99 +1040,27 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1060,99 +1040,27 @@ public final class DefaultAudioSink implements AudioSink {
} }
private long applySkipping(long positionUs) { private long applySkipping(long positionUs) {
return positionUs + framesToDurationUs(audioProcessorChain.getSkippedOutputFrameCount()); return positionUs
+ configuration.framesToDurationUs(audioProcessorChain.getSkippedOutputFrameCount());
} }
private boolean isInitialized() { private boolean isInitialized() {
return audioTrack != null; return audioTrack != null;
} }
private long inputFramesToDurationUs(long frameCount) {
return (frameCount * C.MICROS_PER_SECOND) / inputSampleRate;
}
private long framesToDurationUs(long frameCount) {
return (frameCount * C.MICROS_PER_SECOND) / outputSampleRate;
}
private long durationUsToFrames(long durationUs) {
return (durationUs * outputSampleRate) / C.MICROS_PER_SECOND;
}
private long getSubmittedFrames() { private long getSubmittedFrames() {
return isInputPcm ? (submittedPcmBytes / pcmFrameSize) : submittedEncodedFrames; return configuration.isInputPcm
? (submittedPcmBytes / configuration.inputPcmFrameSize)
: submittedEncodedFrames;
} }
private long getWrittenFrames() { private long getWrittenFrames() {
return isInputPcm ? (writtenPcmBytes / outputPcmFrameSize) : writtenEncodedFrames; return configuration.isInputPcm
} ? (writtenPcmBytes / configuration.outputPcmFrameSize)
: writtenEncodedFrames;
private AudioTrack initializeAudioTrack() throws InitializationException {
AudioTrack audioTrack;
if (Util.SDK_INT >= 21) {
audioTrack = createAudioTrackV21();
} else {
int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage);
if (audioSessionId == C.AUDIO_SESSION_ID_UNSET) {
audioTrack =
new AudioTrack(
streamType,
outputSampleRate,
outputChannelConfig,
outputEncoding,
bufferSize,
MODE_STREAM);
} else {
// Re-attach to the same audio session.
audioTrack =
new AudioTrack(
streamType,
outputSampleRate,
outputChannelConfig,
outputEncoding,
bufferSize,
MODE_STREAM,
audioSessionId);
}
}
int state = audioTrack.getState();
if (state != STATE_INITIALIZED) {
try {
audioTrack.release();
} catch (Exception e) {
// The track has already failed to initialize, so it wouldn't be that surprising if release
// were to fail too. Swallow the exception.
}
throw new InitializationException(state, outputSampleRate, outputChannelConfig, bufferSize);
}
return audioTrack;
}
@TargetApi(21)
private AudioTrack createAudioTrackV21() {
android.media.AudioAttributes attributes;
if (tunneling) {
attributes = new android.media.AudioAttributes.Builder()
.setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
.setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
.setUsage(android.media.AudioAttributes.USAGE_MEDIA)
.build();
} else {
attributes = audioAttributes.getAudioAttributesV21();
}
AudioFormat format =
new AudioFormat.Builder()
.setChannelMask(outputChannelConfig)
.setEncoding(outputEncoding)
.setSampleRate(outputSampleRate)
.build();
int audioSessionId = this.audioSessionId != C.AUDIO_SESSION_ID_UNSET ? this.audioSessionId
: AudioManager.AUDIO_SESSION_ID_GENERATE;
return new AudioTrack(attributes, format, bufferSize, MODE_STREAM, audioSessionId);
} }
private AudioTrack initializeKeepSessionIdAudioTrack(int audioSessionId) { private static AudioTrack initializeKeepSessionIdAudioTrack(int audioSessionId) {
int sampleRate = 4000; // Equal to private AudioTrack.MIN_SAMPLE_RATE. int sampleRate = 4000; // Equal to private AudioTrack.MIN_SAMPLE_RATE.
int channelConfig = AudioFormat.CHANNEL_OUT_MONO; int channelConfig = AudioFormat.CHANNEL_OUT_MONO;
@C.PcmEncoding int encoding = C.ENCODING_PCM_16BIT; @C.PcmEncoding int encoding = C.ENCODING_PCM_16BIT;
...@@ -1161,12 +1069,6 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1161,12 +1069,6 @@ public final class DefaultAudioSink implements AudioSink {
MODE_STATIC, audioSessionId); MODE_STATIC, audioSessionId);
} }
private AudioProcessor[] getAvailableAudioProcessors() {
return shouldConvertHighResIntPcmToFloat
? toFloatPcmAvailableAudioProcessors
: toIntPcmAvailableAudioProcessors;
}
private static int getChannelConfig(int channelCount, boolean isInputPcm) { private static int getChannelConfig(int channelCount, boolean isInputPcm) {
if (Util.SDK_INT <= 28 && !isInputPcm) { if (Util.SDK_INT <= 28 && !isInputPcm) {
// In passthrough mode the channel count used to configure the audio track doesn't affect how // In passthrough mode the channel count used to configure the audio track doesn't affect how
...@@ -1288,9 +1190,7 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1288,9 +1190,7 @@ public final class DefaultAudioSink implements AudioSink {
audioTrack.setStereoVolume(volume, volume); audioTrack.setStereoVolume(volume, volume);
} }
/** /** Stores playback parameters with the position and media time at which they apply. */
* Stores playback parameters with the position and media time at which they apply.
*/
private static final class PlaybackParametersCheckpoint { private static final class PlaybackParametersCheckpoint {
private final PlaybackParameters playbackParameters; private final PlaybackParameters playbackParameters;
...@@ -1371,4 +1271,159 @@ public final class DefaultAudioSink implements AudioSink { ...@@ -1371,4 +1271,159 @@ public final class DefaultAudioSink implements AudioSink {
} }
} }
} }
/** Stores configuration relating to the audio format. */
private static final class Configuration {
public final boolean isInputPcm;
public final int inputPcmFrameSize;
public final int inputSampleRate;
public final int outputPcmFrameSize;
public final int outputSampleRate;
public final int outputChannelConfig;
@C.Encoding public final int outputEncoding;
public final int bufferSize;
public final boolean processingEnabled;
public final boolean canApplyPlaybackParameters;
public final AudioProcessor[] availableAudioProcessors;
public Configuration(
boolean isInputPcm,
int inputPcmFrameSize,
int inputSampleRate,
int outputPcmFrameSize,
int outputSampleRate,
int outputChannelConfig,
int outputEncoding,
int specifiedBufferSize,
boolean processingEnabled,
boolean canApplyPlaybackParameters,
AudioProcessor[] availableAudioProcessors) {
this.isInputPcm = isInputPcm;
this.inputPcmFrameSize = inputPcmFrameSize;
this.inputSampleRate = inputSampleRate;
this.outputPcmFrameSize = outputPcmFrameSize;
this.outputSampleRate = outputSampleRate;
this.outputChannelConfig = outputChannelConfig;
this.outputEncoding = outputEncoding;
this.bufferSize = specifiedBufferSize != 0 ? specifiedBufferSize : getDefaultBufferSize();
this.processingEnabled = processingEnabled;
this.canApplyPlaybackParameters = canApplyPlaybackParameters;
this.availableAudioProcessors = availableAudioProcessors;
}
public boolean canReuseAudioTrack(Configuration audioTrackConfiguration) {
return audioTrackConfiguration.outputEncoding == outputEncoding
&& audioTrackConfiguration.outputSampleRate == outputSampleRate
&& audioTrackConfiguration.outputChannelConfig == outputChannelConfig;
}
public long inputFramesToDurationUs(long frameCount) {
return (frameCount * C.MICROS_PER_SECOND) / inputSampleRate;
}
public long framesToDurationUs(long frameCount) {
return (frameCount * C.MICROS_PER_SECOND) / outputSampleRate;
}
public long durationUsToFrames(long durationUs) {
return (durationUs * outputSampleRate) / C.MICROS_PER_SECOND;
}
public AudioTrack buildAudioTrack(
boolean tunneling, AudioAttributes audioAttributes, int audioSessionId)
throws InitializationException {
AudioTrack audioTrack;
if (Util.SDK_INT >= 21) {
audioTrack = createAudioTrackV21(tunneling, audioAttributes, audioSessionId);
} else {
int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage);
if (audioSessionId == C.AUDIO_SESSION_ID_UNSET) {
audioTrack =
new AudioTrack(
streamType,
outputSampleRate,
outputChannelConfig,
outputEncoding,
bufferSize,
MODE_STREAM);
} else {
// Re-attach to the same audio session.
audioTrack =
new AudioTrack(
streamType,
outputSampleRate,
outputChannelConfig,
outputEncoding,
bufferSize,
MODE_STREAM,
audioSessionId);
}
}
int state = audioTrack.getState();
if (state != STATE_INITIALIZED) {
try {
audioTrack.release();
} catch (Exception e) {
// The track has already failed to initialize, so it wouldn't be that surprising if
// release were to fail too. Swallow the exception.
}
throw new InitializationException(state, outputSampleRate, outputChannelConfig, bufferSize);
}
return audioTrack;
}
@TargetApi(21)
private AudioTrack createAudioTrackV21(
boolean tunneling, AudioAttributes audioAttributes, int audioSessionId) {
android.media.AudioAttributes attributes;
if (tunneling) {
attributes =
new android.media.AudioAttributes.Builder()
.setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
.setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
.setUsage(android.media.AudioAttributes.USAGE_MEDIA)
.build();
} else {
attributes = audioAttributes.getAudioAttributesV21();
}
AudioFormat format =
new AudioFormat.Builder()
.setChannelMask(outputChannelConfig)
.setEncoding(outputEncoding)
.setSampleRate(outputSampleRate)
.build();
return new AudioTrack(
attributes,
format,
bufferSize,
MODE_STREAM,
audioSessionId != C.AUDIO_SESSION_ID_UNSET
? audioSessionId
: AudioManager.AUDIO_SESSION_ID_GENERATE);
}
private int getDefaultBufferSize() {
if (isInputPcm) {
int minBufferSize =
AudioTrack.getMinBufferSize(outputSampleRate, outputChannelConfig, outputEncoding);
Assertions.checkState(minBufferSize != ERROR_BAD_VALUE);
int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR;
int minAppBufferSize =
(int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize;
int maxAppBufferSize =
(int)
Math.max(
minBufferSize, durationUsToFrames(MAX_BUFFER_DURATION_US) * outputPcmFrameSize);
return Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize);
} else {
int rate = getMaximumEncodedRateBytesPerSecond(outputEncoding);
if (outputEncoding == C.ENCODING_AC3) {
rate *= AC3_BUFFER_MULTIPLICATION_FACTOR;
}
return (int) (PASSTHROUGH_BUFFER_DURATION_US * rate / C.MICROS_PER_SECOND);
}
}
}
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment