retain stereo on dead audio resampling

This commit is contained in:
Zach Pomerantz 2016-10-04 17:50:34 -07:00
parent 4974c88880
commit 972a611d03
3 changed files with 28 additions and 27 deletions

View file

@ -1152,9 +1152,9 @@ bool AudioClient::outputLocalInjector(bool isStereo, AudioInjector* injector) {
}
void AudioClient::outputFormatChanged() {
int outputFormatChannelCountTimesSampleRate = _outputFormat.channelCount() * _outputFormat.sampleRate();
_outputFrameSize = AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * outputFormatChannelCountTimesSampleRate / _desiredOutputFormat.sampleRate();
_receivedAudioStream.outputFormatChanged(outputFormatChannelCountTimesSampleRate);
_outputFrameSize = (AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL * _outputFormat.channelCount() * _outputFormat.sampleRate()) /
_desiredOutputFormat.sampleRate();
_receivedAudioStream.outputFormatChanged(_outputFormat.sampleRate(), _outputFormat.channelCount());
}
bool AudioClient::switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo) {

View file

@ -17,27 +17,27 @@ static const int STEREO_FACTOR = 2;
MixedProcessedAudioStream::MixedProcessedAudioStream(int numFrameSamples, int numFramesCapacity, int numStaticJitterFrames)
: InboundAudioStream(numFrameSamples, numFramesCapacity, numStaticJitterFrames) {}
void MixedProcessedAudioStream::outputFormatChanged(int outputFormatChannelCountTimesSampleRate) {
_outputFormatChannelsTimesSampleRate = outputFormatChannelCountTimesSampleRate;
int deviceOutputFrameSize = networkToDeviceSamples(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
_ringBuffer.resizeForFrameSize(deviceOutputFrameSize);
void MixedProcessedAudioStream::outputFormatChanged(int sampleRate, int channelCount) {
_outputSampleRate = sampleRate;
_outputChannelCount = channelCount;
int deviceOutputFrameFrames = networkToDeviceFrames(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO / STEREO_FACTOR);
int deviceOutputFrameSamples = deviceOutputFrameFrames * STEREO_FACTOR;
_ringBuffer.resizeForFrameSize(deviceOutputFrameSamples);
}
int MixedProcessedAudioStream::writeDroppableSilentSamples(int silentSamples) {
int deviceSilentSamplesWritten = InboundAudioStream::writeDroppableSilentSamples(networkToDeviceSamples(silentSamples));
emit addedSilence(deviceToNetworkSamples(deviceSilentSamplesWritten) / STEREO_FACTOR);
int deviceSilentFrames = networkToDeviceFrames(silentSamples / STEREO_FACTOR);
int deviceSilentSamples = deviceSilentFrames * STEREO_FACTOR;
int deviceSilentSamplesWritten = InboundAudioStream::writeDroppableSilentSamples(deviceSilentSamples);
emit addedSilence(deviceToNetworkFrames(deviceSilentSamplesWritten / STEREO_FACTOR));
return deviceSilentSamplesWritten;
}
int MixedProcessedAudioStream::writeLastFrameRepeatedWithFade(int samples) {
int deviceSamplesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(networkToDeviceSamples(samples));
emit addedLastFrameRepeatedWithFade(deviceToNetworkSamples(deviceSamplesWritten) / STEREO_FACTOR);
int deviceFrames = networkToDeviceFrames(samples / STEREO_FACTOR);
int deviceSamples = deviceFrames * STEREO_FACTOR;
int deviceSamplesWritten = InboundAudioStream::writeLastFrameRepeatedWithFade(deviceSamples);
emit addedLastFrameRepeatedWithFade(deviceToNetworkFrames(deviceSamplesWritten / STEREO_FACTOR));
return deviceSamplesWritten;
}
@ -60,12 +60,12 @@ int MixedProcessedAudioStream::parseAudioData(PacketType type, const QByteArray&
return packetAfterStreamProperties.size();
}
int MixedProcessedAudioStream::networkToDeviceSamples(int networkSamples) {
return (quint64)networkSamples * (quint64)_outputFormatChannelsTimesSampleRate / (quint64)(STEREO_FACTOR
* AudioConstants::SAMPLE_RATE);
int MixedProcessedAudioStream::networkToDeviceFrames(int networkFrames) {
return ((quint64)networkFrames * _outputChannelCount * _outputSampleRate) /
(quint64)(STEREO_FACTOR * AudioConstants::SAMPLE_RATE);
}
int MixedProcessedAudioStream::deviceToNetworkSamples(int deviceSamples) {
return (quint64)deviceSamples * (quint64)(STEREO_FACTOR * AudioConstants::SAMPLE_RATE)
/ (quint64)_outputFormatChannelsTimesSampleRate;
int MixedProcessedAudioStream::deviceToNetworkFrames(int deviceFrames) {
return (quint64)deviceFrames * (quint64)(STEREO_FACTOR * AudioConstants::SAMPLE_RATE) /
(_outputSampleRate * _outputChannelCount);
}

View file

@ -30,7 +30,7 @@ signals:
void processSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer);
public:
void outputFormatChanged(int outputFormatChannelCountTimesSampleRate);
void outputFormatChanged(int sampleRate, int channelCount);
protected:
int writeDroppableSilentSamples(int silentSamples) override;
@ -38,11 +38,12 @@ protected:
int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties) override;
private:
int networkToDeviceSamples(int networkSamples);
int deviceToNetworkSamples(int deviceSamples);
int networkToDeviceFrames(int networkFrames);
int deviceToNetworkFrames(int deviceFrames);
private:
int _outputFormatChannelsTimesSampleRate;
quint64 _outputSampleRate;
quint64 _outputChannelCount;
};
#endif // hifi_MixedProcessedAudioStream_h