revert back to using QByteArray for processReceivedAudio()

This commit is contained in:
ZappoMan 2014-04-15 13:16:52 -07:00
parent 74828a3215
commit cd23b95b42
2 changed files with 13 additions and 13 deletions

View file

@ -652,8 +652,7 @@ void Audio::addReceivedAudioToBuffer(const QByteArray& audioByteArray) {
if (_audioOutput) { if (_audioOutput) {
// Audio output must exist and be correctly set up if we're going to process received audio // Audio output must exist and be correctly set up if we're going to process received audio
_ringBuffer.parseData(audioByteArray); processReceivedAudio(audioByteArray);
processReceivedAudio(_ringBuffer);
} }
Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size()); Application::getInstance()->getBandwidthMeter()->inputStream(BandwidthMeter::AUDIO).updateValue(audioByteArray.size());
@ -753,23 +752,24 @@ void Audio::toggleAudioNoiseReduction() {
_noiseGateEnabled = !_noiseGateEnabled; _noiseGateEnabled = !_noiseGateEnabled;
} }
void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) { void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
_ringBuffer.parseData(audioByteArray);
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate()) float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount()); * (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
if (!ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) { if (!_ringBuffer.isStarved() && _audioOutput && _audioOutput->bytesFree() == _audioOutput->bufferSize()) {
// we don't have any audio data left in the output buffer // we don't have any audio data left in the output buffer
// we just starved // we just starved
//qDebug() << "Audio output just starved."; //qDebug() << "Audio output just starved.";
ringBuffer.setIsStarved(true); _ringBuffer.setIsStarved(true);
_numFramesDisplayStarve = 10; _numFramesDisplayStarve = 10;
} }
// if there is anything in the ring buffer, decide what to do // if there is anything in the ring buffer, decide what to do
if (ringBuffer.samplesAvailable() > 0) { if (_ringBuffer.samplesAvailable() > 0) {
int numNetworkOutputSamples = ringBuffer.samplesAvailable(); int numNetworkOutputSamples = _ringBuffer.samplesAvailable();
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio; int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
QByteArray outputBuffer; QByteArray outputBuffer;
@ -777,13 +777,13 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) {
int numSamplesNeededToStartPlayback = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2); int numSamplesNeededToStartPlayback = NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2);
if (!ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) { if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
// We are still waiting for enough samples to begin playback // We are still waiting for enough samples to begin playback
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback; // qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
} else { } else {
// We are either already playing back, or we have enough audio to start playing back. // We are either already playing back, or we have enough audio to start playing back.
//qDebug() << "pushing " << numNetworkOutputSamples; //qDebug() << "pushing " << numNetworkOutputSamples;
ringBuffer.setIsStarved(false); _ringBuffer.setIsStarved(false);
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples]; int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
if (_processSpatialAudio) { if (_processSpatialAudio) {
@ -791,7 +791,7 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) {
QByteArray buffer; QByteArray buffer;
buffer.resize(numNetworkOutputSamples * sizeof(int16_t)); buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples); _ringBuffer.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
// Accumulate direct transmission of audio from sender to receiver // Accumulate direct transmission of audio from sender to receiver
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal)) { if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncudeOriginal)) {
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples); addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
@ -814,7 +814,7 @@ void Audio::processReceivedAudio(AudioRingBuffer& ringBuffer) {
// copy the samples we'll resample from the ring buffer - this also // copy the samples we'll resample from the ring buffer - this also
// pushes the read pointer of the ring buffer forwards // pushes the read pointer of the ring buffer forwards
ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples); _ringBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
} }

View file

@ -187,7 +187,7 @@ private:
void addProceduralSounds(int16_t* monoInput, int numSamples); void addProceduralSounds(int16_t* monoInput, int numSamples);
// Process received audio // Process received audio
void processReceivedAudio(AudioRingBuffer& ringBuffer); void processReceivedAudio(const QByteArray& audioByteArray);
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo); bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo); bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);