mirror of
https://github.com/overte-org/overte.git
synced 2025-04-23 01:13:32 +02:00
cleaned up code (removed old code that was commented out)
This commit is contained in:
parent
473cbf2afe
commit
059007c99c
4 changed files with 70 additions and 169 deletions
|
@ -499,14 +499,6 @@ void AudioMixer::run() {
|
|||
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
|
||||
|
||||
while (!_isFinished) {
|
||||
|
||||
/*foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||
if (node->getLinkedData()) {
|
||||
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone,
|
||||
_listenerUnattenuatedZone);
|
||||
}
|
||||
}*/
|
||||
|
||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
||||
|
||||
|
@ -599,13 +591,6 @@ void AudioMixer::run() {
|
|||
++_sumListeners;
|
||||
}
|
||||
}
|
||||
/*
|
||||
// push forward the next output pointers for any audio buffers we used
|
||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||
if (node->getLinkedData()) {
|
||||
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
||||
}
|
||||
}*/
|
||||
|
||||
++_numStatFrames;
|
||||
|
||||
|
|
|
@ -98,87 +98,6 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) {
|
||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
||||
if (_ringBuffers[i]->shouldBeAddedToMix()) {
|
||||
// this is a ring buffer that is ready to go
|
||||
// set its flag so we know to push its buffer when all is said and done
|
||||
_ringBuffers[i]->setWillBeAddedToMix(true);
|
||||
|
||||
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
||||
// that would be mixed in
|
||||
_ringBuffers[i]->updateNextOutputTrailingLoudness();
|
||||
|
||||
if (checkSourceZone && checkSourceZone->contains(_ringBuffers[i]->getPosition())) {
|
||||
_ringBuffers[i]->setListenerUnattenuatedZone(listenerZone);
|
||||
} else {
|
||||
_ringBuffers[i]->setListenerUnattenuatedZone(NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
||||
|
||||
QList<PositionalAudioRingBuffer*>::iterator i = _ringBuffers.begin();
|
||||
while (i != _ringBuffers.end()) {
|
||||
// this was a used buffer, push the output pointer forwards
|
||||
PositionalAudioRingBuffer* audioBuffer = *i;
|
||||
|
||||
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 100;
|
||||
|
||||
if (audioBuffer->willBeAddedToMix()) {
|
||||
audioBuffer->shiftReadPosition(audioBuffer->getSamplesPerFrame());
|
||||
audioBuffer->setWillBeAddedToMix(false);
|
||||
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
||||
&& audioBuffer->hasStarted() && audioBuffer->isStarved()
|
||||
&& audioBuffer->getConsecutiveNotMixedCount() > INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD) {
|
||||
// this is an empty audio buffer that has starved, safe to delete
|
||||
// also delete its sequence number stats
|
||||
QUuid streamIdentifier = ((InjectedAudioRingBuffer*)audioBuffer)->getStreamIdentifier();
|
||||
_incomingInjectedAudioSequenceNumberStatsMap.remove(streamIdentifier);
|
||||
delete audioBuffer;
|
||||
i = _ringBuffers.erase(i);
|
||||
continue;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}*/
|
||||
|
||||
/*AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const {
|
||||
|
||||
AudioStreamStats streamStats;
|
||||
|
||||
streamStats._streamType = ringBuffer->getType();
|
||||
if (streamStats._streamType == PositionalAudioRingBuffer::Injector) {
|
||||
streamStats._streamIdentifier = ((InjectedAudioRingBuffer*)ringBuffer)->getStreamIdentifier();
|
||||
const SequenceNumberStats& sequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap[streamStats._streamIdentifier];
|
||||
streamStats._packetStreamStats = sequenceNumberStats.getStats();
|
||||
streamStats._packetStreamWindowStats = sequenceNumberStats.getStatsForHistoryWindow();
|
||||
} else {
|
||||
streamStats._packetStreamStats = _incomingAvatarAudioSequenceNumberStats.getStats();
|
||||
streamStats._packetStreamWindowStats = _incomingAvatarAudioSequenceNumberStats.getStatsForHistoryWindow();
|
||||
}
|
||||
|
||||
const MovingMinMaxAvg<quint64>& timeGapStats = ringBuffer->getInterframeTimeGapStatsForStatsPacket();
|
||||
streamStats._timeGapMin = timeGapStats.getMin();
|
||||
streamStats._timeGapMax = timeGapStats.getMax();
|
||||
streamStats._timeGapAverage = timeGapStats.getAverage();
|
||||
streamStats._timeGapWindowMin = timeGapStats.getWindowMin();
|
||||
streamStats._timeGapWindowMax = timeGapStats.getWindowMax();
|
||||
streamStats._timeGapWindowAverage = timeGapStats.getWindowAverage();
|
||||
|
||||
streamStats._ringBufferFramesAvailable = ringBuffer->framesAvailable();
|
||||
streamStats._ringBufferFramesAvailableAverage = ringBuffer->getFramesAvailableAverage();
|
||||
streamStats._ringBufferDesiredJitterBufferFrames = ringBuffer->getDesiredJitterBufferFrames();
|
||||
streamStats._ringBufferStarveCount = ringBuffer->getStarveCount();
|
||||
streamStats._ringBufferConsecutiveNotMixedCount = ringBuffer->getConsecutiveNotMixedCount();
|
||||
streamStats._ringBufferOverflowCount = ringBuffer->getOverflowCount();
|
||||
streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped();
|
||||
|
||||
return streamStats;
|
||||
}*/
|
||||
|
||||
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
|
||||
char packet[MAX_PACKET_SIZE];
|
||||
NodeList* nodeList = NodeList::getInstance();
|
||||
|
|
|
@ -899,6 +899,11 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
// parse audio data
|
||||
_ringBuffer.parseData(audioByteArray);
|
||||
|
||||
pushAudioToOutput();
|
||||
}
|
||||
|
||||
|
||||
void Audio::pushAudioToOutput() {
|
||||
|
||||
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
||||
// the audio output has no samples to play. set the downstream audio to starved so that it
|
||||
|
@ -906,10 +911,8 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
_ringBuffer.setToStarved();
|
||||
}
|
||||
|
||||
|
||||
|
||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
||||
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
||||
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
||||
|
||||
int numFramesToPush;
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
||||
|
@ -925,90 +928,81 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
|||
AudioRingBuffer::ConstIterator ringBufferNextOutput;
|
||||
if (numFramesToPush > 0 && _ringBuffer.popFrames(&ringBufferNextOutput, numFramesToPush, false)) {
|
||||
|
||||
/*int numSamplesNeededToStartPlayback = std::min(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2),
|
||||
_ringBuffer.getSampleCapacity());
|
||||
|
||||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
|
||||
// We are still waiting for enough samples to begin playback
|
||||
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
||||
_consecutiveNotMixedCount++;
|
||||
} else {*/
|
||||
|
||||
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||
|
||||
QByteArray outputBuffer;
|
||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||
QByteArray outputBuffer;
|
||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||
|
||||
|
||||
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
|
||||
if (_processSpatialAudio) {
|
||||
unsigned int sampleTime = _spatialAudioStart;
|
||||
QByteArray buffer;
|
||||
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
|
||||
int16_t* ringBufferSamples = new int16_t[numNetworkOutputSamples];
|
||||
if (_processSpatialAudio) {
|
||||
unsigned int sampleTime = _spatialAudioStart;
|
||||
QByteArray buffer;
|
||||
buffer.resize(numNetworkOutputSamples * sizeof(int16_t));
|
||||
|
||||
ringBufferNextOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
|
||||
ringBufferNextOutput.readSamples((int16_t*)buffer.data(), numNetworkOutputSamples);
|
||||
|
||||
// Accumulate direct transmission of audio from sender to receiver
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
||||
}
|
||||
|
||||
// Send audio off for spatial processing
|
||||
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||
|
||||
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
||||
// pushes the read pointer of the spatial audio ring buffer forwards
|
||||
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
||||
|
||||
// Advance the start point for the next packet of audio to arrive
|
||||
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
||||
} else {
|
||||
// copy the samples we'll resample from the ring buffer - this also
|
||||
// pushes the read pointer of the ring buffer forwards
|
||||
ringBufferNextOutput.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
||||
// Accumulate direct transmission of audio from sender to receiver
|
||||
if (Menu::getInstance()->isOptionChecked(MenuOption::AudioSpatialProcessingIncludeOriginal)) {
|
||||
emit preProcessOriginalInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||
addSpatialAudioToBuffer(sampleTime, buffer, numNetworkOutputSamples);
|
||||
}
|
||||
|
||||
// copy the packet from the RB to the output
|
||||
linearResampling(ringBufferSamples,
|
||||
(int16_t*) outputBuffer.data(),
|
||||
numNetworkOutputSamples,
|
||||
numDeviceOutputSamples,
|
||||
_desiredOutputFormat, _outputFormat);
|
||||
// Send audio off for spatial processing
|
||||
emit processInboundAudio(sampleTime, buffer, _desiredOutputFormat);
|
||||
|
||||
if (_outputDevice) {
|
||||
_outputDevice->write(outputBuffer);
|
||||
// copy the samples we'll resample from the spatial audio ring buffer - this also
|
||||
// pushes the read pointer of the spatial audio ring buffer forwards
|
||||
_spatialAudioRingBuffer.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
||||
|
||||
// Advance the start point for the next packet of audio to arrive
|
||||
_spatialAudioStart += numNetworkOutputSamples / _desiredOutputFormat.channelCount();
|
||||
} else {
|
||||
// copy the samples we'll resample from the ring buffer - this also
|
||||
// pushes the read pointer of the ring buffer forwards
|
||||
ringBufferNextOutput.readSamples(ringBufferSamples, numNetworkOutputSamples);
|
||||
}
|
||||
|
||||
// copy the packet from the RB to the output
|
||||
linearResampling(ringBufferSamples,
|
||||
(int16_t*)outputBuffer.data(),
|
||||
numNetworkOutputSamples,
|
||||
numDeviceOutputSamples,
|
||||
_desiredOutputFormat, _outputFormat);
|
||||
|
||||
if (_outputDevice) {
|
||||
_outputDevice->write(outputBuffer);
|
||||
}
|
||||
|
||||
if (_scopeEnabled && !_scopeEnabledPause) {
|
||||
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
||||
int16_t* samples = ringBufferSamples;
|
||||
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
||||
|
||||
unsigned int audioChannel = 0;
|
||||
addBufferToScope(
|
||||
_scopeOutputLeft,
|
||||
_scopeOutputOffset,
|
||||
samples, audioChannel, numAudioChannels);
|
||||
|
||||
audioChannel = 1;
|
||||
addBufferToScope(
|
||||
_scopeOutputRight,
|
||||
_scopeOutputOffset,
|
||||
samples, audioChannel, numAudioChannels);
|
||||
|
||||
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
||||
_scopeOutputOffset %= _samplesPerScope;
|
||||
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
||||
}
|
||||
}
|
||||
|
||||
if (_scopeEnabled && !_scopeEnabledPause) {
|
||||
unsigned int numAudioChannels = _desiredOutputFormat.channelCount();
|
||||
int16_t* samples = ringBufferSamples;
|
||||
for (int numSamples = numNetworkOutputSamples / numAudioChannels; numSamples > 0; numSamples -= NETWORK_SAMPLES_PER_FRAME) {
|
||||
|
||||
unsigned int audioChannel = 0;
|
||||
addBufferToScope(
|
||||
_scopeOutputLeft,
|
||||
_scopeOutputOffset,
|
||||
samples, audioChannel, numAudioChannels);
|
||||
|
||||
audioChannel = 1;
|
||||
addBufferToScope(
|
||||
_scopeOutputRight,
|
||||
_scopeOutputOffset,
|
||||
samples, audioChannel, numAudioChannels);
|
||||
|
||||
_scopeOutputOffset += NETWORK_SAMPLES_PER_FRAME;
|
||||
_scopeOutputOffset %= _samplesPerScope;
|
||||
samples += NETWORK_SAMPLES_PER_FRAME * numAudioChannels;
|
||||
}
|
||||
}
|
||||
|
||||
delete[] ringBufferSamples;
|
||||
//}
|
||||
delete[] ringBufferSamples;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
|
||||
|
||||
// zero out the locally injected audio in preparation for audio procedural sounds
|
||||
|
|
|
@ -215,6 +215,9 @@ private:
|
|||
// Process received audio
|
||||
void processReceivedAudio(const QByteArray& audioByteArray);
|
||||
|
||||
// Pushes frames from the output ringbuffer to the audio output device
|
||||
void pushAudioToOutput();
|
||||
|
||||
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
|
||||
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
|
||||
|
||||
|
|
Loading…
Reference in a new issue