mirror of
https://github.com/overte-org/overte.git
synced 2025-07-10 18:58:37 +02:00
cleaned up code (removed old code that was commented out)
This commit is contained in:
parent
473cbf2afe
commit
059007c99c
4 changed files with 70 additions and 169 deletions
|
@ -499,14 +499,6 @@ void AudioMixer::run() {
|
||||||
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
|
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
|
||||||
|
|
||||||
while (!_isFinished) {
|
while (!_isFinished) {
|
||||||
|
|
||||||
/*foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
|
||||||
if (node->getLinkedData()) {
|
|
||||||
((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone,
|
|
||||||
_listenerUnattenuatedZone);
|
|
||||||
}
|
|
||||||
}*/
|
|
||||||
|
|
||||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
||||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
||||||
|
|
||||||
|
@ -599,13 +591,6 @@ void AudioMixer::run() {
|
||||||
++_sumListeners;
|
++_sumListeners;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
// push forward the next output pointers for any audio buffers we used
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
|
||||||
if (node->getLinkedData()) {
|
|
||||||
((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend();
|
|
||||||
}
|
|
||||||
}*/
|
|
||||||
|
|
||||||
++_numStatFrames;
|
++_numStatFrames;
|
||||||
|
|
||||||
|
|
|
@ -98,87 +98,6 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) {
|
|
||||||
for (int i = 0; i < _ringBuffers.size(); i++) {
|
|
||||||
if (_ringBuffers[i]->shouldBeAddedToMix()) {
|
|
||||||
// this is a ring buffer that is ready to go
|
|
||||||
// set its flag so we know to push its buffer when all is said and done
|
|
||||||
_ringBuffers[i]->setWillBeAddedToMix(true);
|
|
||||||
|
|
||||||
// calculate the average loudness for the next NETWORK_BUFFER_LENGTH_SAMPLES_PER_CHANNEL
|
|
||||||
// that would be mixed in
|
|
||||||
_ringBuffers[i]->updateNextOutputTrailingLoudness();
|
|
||||||
|
|
||||||
if (checkSourceZone && checkSourceZone->contains(_ringBuffers[i]->getPosition())) {
|
|
||||||
_ringBuffers[i]->setListenerUnattenuatedZone(listenerZone);
|
|
||||||
} else {
|
|
||||||
_ringBuffers[i]->setListenerUnattenuatedZone(NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void AudioMixerClientData::pushBuffersAfterFrameSend() {
|
|
||||||
|
|
||||||
QList<PositionalAudioRingBuffer*>::iterator i = _ringBuffers.begin();
|
|
||||||
while (i != _ringBuffers.end()) {
|
|
||||||
// this was a used buffer, push the output pointer forwards
|
|
||||||
PositionalAudioRingBuffer* audioBuffer = *i;
|
|
||||||
|
|
||||||
const int INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD = 100;
|
|
||||||
|
|
||||||
if (audioBuffer->willBeAddedToMix()) {
|
|
||||||
audioBuffer->shiftReadPosition(audioBuffer->getSamplesPerFrame());
|
|
||||||
audioBuffer->setWillBeAddedToMix(false);
|
|
||||||
} else if (audioBuffer->getType() == PositionalAudioRingBuffer::Injector
|
|
||||||
&& audioBuffer->hasStarted() && audioBuffer->isStarved()
|
|
||||||
&& audioBuffer->getConsecutiveNotMixedCount() > INJECTOR_CONSECUTIVE_NOT_MIXED_THRESHOLD) {
|
|
||||||
// this is an empty audio buffer that has starved, safe to delete
|
|
||||||
// also delete its sequence number stats
|
|
||||||
QUuid streamIdentifier = ((InjectedAudioRingBuffer*)audioBuffer)->getStreamIdentifier();
|
|
||||||
_incomingInjectedAudioSequenceNumberStatsMap.remove(streamIdentifier);
|
|
||||||
delete audioBuffer;
|
|
||||||
i = _ringBuffers.erase(i);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
}*/
|
|
||||||
|
|
||||||
/*AudioStreamStats AudioMixerClientData::getAudioStreamStatsOfStream(const PositionalAudioRingBuffer* ringBuffer) const {
|
|
||||||
|
|
||||||
AudioStreamStats streamStats;
|
|
||||||
|
|
||||||
streamStats._streamType = ringBuffer->getType();
|
|
||||||
if (streamStats._streamType == PositionalAudioRingBuffer::Injector) {
|
|
||||||
streamStats._streamIdentifier = ((InjectedAudioRingBuffer*)ringBuffer)->getStreamIdentifier();
|
|
||||||
const SequenceNumberStats& sequenceNumberStats = _incomingInjectedAudioSequenceNumberStatsMap[streamStats._streamIdentifier];
|
|
||||||
streamStats._packetStreamStats = sequenceNumberStats.getStats();
|
|
||||||
streamStats._packetStreamWindowStats = sequenceNumberStats.getStatsForHistoryWindow();
|
|
||||||
} else {
|
|
||||||
streamStats._packetStreamStats = _incomingAvatarAudioSequenceNumberStats.getStats();
|
|
||||||
streamStats._packetStreamWindowStats = _incomingAvatarAudioSequenceNumberStats.getStatsForHistoryWindow();
|
|
||||||
}
|
|
||||||
|
|
||||||
const MovingMinMaxAvg<quint64>& timeGapStats = ringBuffer->getInterframeTimeGapStatsForStatsPacket();
|
|
||||||
streamStats._timeGapMin = timeGapStats.getMin();
|
|
||||||
streamStats._timeGapMax = timeGapStats.getMax();
|
|
||||||
streamStats._timeGapAverage = timeGapStats.getAverage();
|
|
||||||
streamStats._timeGapWindowMin = timeGapStats.getWindowMin();
|
|
||||||
streamStats._timeGapWindowMax = timeGapStats.getWindowMax();
|
|
||||||
streamStats._timeGapWindowAverage = timeGapStats.getWindowAverage();
|
|
||||||
|
|
||||||
streamStats._ringBufferFramesAvailable = ringBuffer->framesAvailable();
|
|
||||||
streamStats._ringBufferFramesAvailableAverage = ringBuffer->getFramesAvailableAverage();
|
|
||||||
streamStats._ringBufferDesiredJitterBufferFrames = ringBuffer->getDesiredJitterBufferFrames();
|
|
||||||
streamStats._ringBufferStarveCount = ringBuffer->getStarveCount();
|
|
||||||
streamStats._ringBufferConsecutiveNotMixedCount = ringBuffer->getConsecutiveNotMixedCount();
|
|
||||||
streamStats._ringBufferOverflowCount = ringBuffer->getOverflowCount();
|
|
||||||
streamStats._ringBufferSilentFramesDropped = ringBuffer->getSilentFramesDropped();
|
|
||||||
|
|
||||||
return streamStats;
|
|
||||||
}*/
|
|
||||||
|
|
||||||
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
|
void AudioMixerClientData::sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode) {
|
||||||
char packet[MAX_PACKET_SIZE];
|
char packet[MAX_PACKET_SIZE];
|
||||||
NodeList* nodeList = NodeList::getInstance();
|
NodeList* nodeList = NodeList::getInstance();
|
||||||
|
|
|
@ -899,6 +899,11 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
// parse audio data
|
// parse audio data
|
||||||
_ringBuffer.parseData(audioByteArray);
|
_ringBuffer.parseData(audioByteArray);
|
||||||
|
|
||||||
|
pushAudioToOutput();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Audio::pushAudioToOutput() {
|
||||||
|
|
||||||
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
if (_audioOutput->bytesFree() == _audioOutput->bufferSize()) {
|
||||||
// the audio output has no samples to play. set the downstream audio to starved so that it
|
// the audio output has no samples to play. set the downstream audio to starved so that it
|
||||||
|
@ -906,10 +911,8 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
_ringBuffer.setToStarved();
|
_ringBuffer.setToStarved();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float)_outputFormat.sampleRate())
|
||||||
|
* (_desiredOutputFormat.channelCount() / (float)_outputFormat.channelCount());
|
||||||
float networkOutputToOutputRatio = (_desiredOutputFormat.sampleRate() / (float) _outputFormat.sampleRate())
|
|
||||||
* (_desiredOutputFormat.channelCount() / (float) _outputFormat.channelCount());
|
|
||||||
|
|
||||||
int numFramesToPush;
|
int numFramesToPush;
|
||||||
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
if (Menu::getInstance()->isOptionChecked(MenuOption::DisableQAudioOutputOverflowCheck)) {
|
||||||
|
@ -925,15 +928,6 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
AudioRingBuffer::ConstIterator ringBufferNextOutput;
|
AudioRingBuffer::ConstIterator ringBufferNextOutput;
|
||||||
if (numFramesToPush > 0 && _ringBuffer.popFrames(&ringBufferNextOutput, numFramesToPush, false)) {
|
if (numFramesToPush > 0 && _ringBuffer.popFrames(&ringBufferNextOutput, numFramesToPush, false)) {
|
||||||
|
|
||||||
/*int numSamplesNeededToStartPlayback = std::min(NETWORK_BUFFER_LENGTH_SAMPLES_STEREO + (_jitterBufferSamples * 2),
|
|
||||||
_ringBuffer.getSampleCapacity());
|
|
||||||
|
|
||||||
if (!_ringBuffer.isNotStarvedOrHasMinimumSamples(numSamplesNeededToStartPlayback)) {
|
|
||||||
// We are still waiting for enough samples to begin playback
|
|
||||||
// qDebug() << numNetworkOutputSamples << " samples so far, waiting for " << numSamplesNeededToStartPlayback;
|
|
||||||
_consecutiveNotMixedCount++;
|
|
||||||
} else {*/
|
|
||||||
|
|
||||||
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
int numNetworkOutputSamples = numFramesToPush * NETWORK_BUFFER_LENGTH_SAMPLES_STEREO;
|
||||||
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
int numDeviceOutputSamples = numNetworkOutputSamples / networkOutputToOutputRatio;
|
||||||
|
|
||||||
|
@ -972,7 +966,7 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
|
|
||||||
// copy the packet from the RB to the output
|
// copy the packet from the RB to the output
|
||||||
linearResampling(ringBufferSamples,
|
linearResampling(ringBufferSamples,
|
||||||
(int16_t*) outputBuffer.data(),
|
(int16_t*)outputBuffer.data(),
|
||||||
numNetworkOutputSamples,
|
numNetworkOutputSamples,
|
||||||
numDeviceOutputSamples,
|
numDeviceOutputSamples,
|
||||||
_desiredOutputFormat, _outputFormat);
|
_desiredOutputFormat, _outputFormat);
|
||||||
|
@ -1005,10 +999,10 @@ void Audio::processReceivedAudio(const QByteArray& audioByteArray) {
|
||||||
}
|
}
|
||||||
|
|
||||||
delete[] ringBufferSamples;
|
delete[] ringBufferSamples;
|
||||||
//}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
|
void Audio::processProceduralAudio(int16_t* monoInput, int numSamples) {
|
||||||
|
|
||||||
// zero out the locally injected audio in preparation for audio procedural sounds
|
// zero out the locally injected audio in preparation for audio procedural sounds
|
||||||
|
|
|
@ -215,6 +215,9 @@ private:
|
||||||
// Process received audio
|
// Process received audio
|
||||||
void processReceivedAudio(const QByteArray& audioByteArray);
|
void processReceivedAudio(const QByteArray& audioByteArray);
|
||||||
|
|
||||||
|
// Pushes frames from the output ringbuffer to the audio output device
|
||||||
|
void pushAudioToOutput();
|
||||||
|
|
||||||
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
|
bool switchInputToAudioDevice(const QAudioDeviceInfo& inputDeviceInfo);
|
||||||
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
|
bool switchOutputToAudioDevice(const QAudioDeviceInfo& outputDeviceInfo);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue