mirror of
https://github.com/overte-org/overte.git
synced 2025-04-25 22:56:29 +02:00
Audiomixer now working (added call to updateNextOutputTrailingLoudness())
This commit is contained in:
parent
3d22a11e28
commit
83ba4b9a1c
5 changed files with 39 additions and 21 deletions
|
@ -98,19 +98,15 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
// if the frame to be mixed is silent, don't mix it
|
// if the frame to be mixed is silent, don't mix it
|
||||||
if (bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) {
|
if (bufferToAdd->getNextOutputTrailingLoudness() == 0.0f) {
|
||||||
bufferToAdd->popFrames(1);
|
bufferToAdd->popFrames(1);
|
||||||
printf("trailing loudness too soft: not mixing!\n");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// get pointer to frame to be mixed. If the stream cannot provide a frame (is starved), bail
|
// get pointer to frame to be mixed. If the stream cannot provide a frame (is starved), bail
|
||||||
AudioRingBuffer::ConstIterator nextOutputStart;
|
AudioRingBuffer::ConstIterator nextOutputStart;
|
||||||
if (!bufferToAdd->popFrames(&nextOutputStart, 1)) {
|
if (!bufferToAdd->popFrames(&nextOutputStart, 1)) {
|
||||||
printf("stream is starved! not mixing!\n");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("mixing stream\n");
|
|
||||||
|
|
||||||
float bearingRelativeAngleToSource = 0.0f;
|
float bearingRelativeAngleToSource = 0.0f;
|
||||||
float attenuationCoefficient = 1.0f;
|
float attenuationCoefficient = 1.0f;
|
||||||
int numSamplesDelay = 0;
|
int numSamplesDelay = 0;
|
||||||
|
@ -221,7 +217,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if (!bufferToAdd->isStereo() && shouldAttenuate) {
|
if (!bufferToAdd->isStereo() && shouldAttenuate && false) {
|
||||||
// this is a mono buffer, which means it gets full attenuation and spatialization
|
// this is a mono buffer, which means it gets full attenuation and spatialization
|
||||||
|
|
||||||
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
// if the bearing relative angle to source is > 0 then the delayed channel is the right one
|
||||||
|
@ -269,7 +265,20 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// this is a stereo buffer or an unattenuated buffer, don't perform spatialization
|
|
||||||
|
int stereoDivider = bufferToAdd->isStereo() ? 1 : 2;
|
||||||
|
|
||||||
|
if (!shouldAttenuate) {
|
||||||
|
attenuationCoefficient = 1.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s++) {
|
||||||
|
_clientSamples[s] = glm::clamp(_clientSamples[s] + (int)(nextOutputStart[s / stereoDivider] * attenuationCoefficient),
|
||||||
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*// this is a stereo buffer or an unattenuated buffer, don't perform spatialization
|
||||||
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
for (int s = 0; s < NETWORK_BUFFER_LENGTH_SAMPLES_STEREO; s += 4) {
|
||||||
|
|
||||||
int stereoDivider = bufferToAdd->isStereo() ? 1 : 2;
|
int stereoDivider = bufferToAdd->isStereo() ? 1 : 2;
|
||||||
|
@ -293,7 +302,7 @@ void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuf
|
||||||
+ (int) (nextOutputStart[(s / stereoDivider) + (3 / stereoDivider)]
|
+ (int) (nextOutputStart[(s / stereoDivider) + (3 / stereoDivider)]
|
||||||
* attenuationCoefficient),
|
* attenuationCoefficient),
|
||||||
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
MIN_SAMPLE_VALUE, MAX_SAMPLE_VALUE);
|
||||||
}
|
}*/
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,6 +327,8 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
|
|
||||||
if (*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) {
|
if (*otherNode != *node || otherNodeBuffer->shouldLoopbackForNode()) {
|
||||||
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer);
|
||||||
|
} else {
|
||||||
|
otherNodeBuffer->popFrames(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,7 +86,6 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_isStarved && _ringBuffer.samplesAvailable() >= _desiredJitterBufferFrames * _ringBuffer.getNumFrameSamples()) {
|
if (_isStarved && _ringBuffer.samplesAvailable() >= _desiredJitterBufferFrames * _ringBuffer.getNumFrameSamples()) {
|
||||||
printf("\nstream refilled from starve!\n");
|
|
||||||
_isStarved = false;
|
_isStarved = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,26 +129,25 @@ bool InboundAudioStream::popFrames(AudioRingBuffer::ConstIterator* nextOutput, i
|
||||||
}
|
}
|
||||||
|
|
||||||
bool InboundAudioStream::shouldPop(int numSamples, bool starveOnFail) {
|
bool InboundAudioStream::shouldPop(int numSamples, bool starveOnFail) {
|
||||||
printf("\nshouldPop()\n");
|
|
||||||
|
|
||||||
if (_isStarved) {
|
if (_isStarved) {
|
||||||
printf("\t we're starved, not popping\n");
|
// we're still refilling; don't mix
|
||||||
_consecutiveNotMixedCount++;
|
_consecutiveNotMixedCount++;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_ringBuffer.samplesAvailable() >= numSamples) {
|
if (_ringBuffer.samplesAvailable() >= numSamples) {
|
||||||
printf("have requested samples and not starved, popping\n");
|
// we have enough samples to pop, so we're good to mix
|
||||||
_hasStarted = true;
|
_hasStarted = true;
|
||||||
return true;
|
return true;
|
||||||
} else {
|
|
||||||
if (starveOnFail) {
|
|
||||||
printf("don't have enough samples; starved!\n");
|
|
||||||
setToStarved();
|
|
||||||
_consecutiveNotMixedCount++;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// we don't have enough samples, so set this stream to starve
|
||||||
|
// if starveOnFail is true
|
||||||
|
if (starveOnFail) {
|
||||||
|
setToStarved();
|
||||||
|
_consecutiveNotMixedCount++;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InboundAudioStream::setToStarved() {
|
void InboundAudioStream::setToStarved() {
|
||||||
|
|
|
@ -51,7 +51,7 @@ public:
|
||||||
void resetSequenceNumberStats() { _incomingSequenceNumberStats.reset(); }
|
void resetSequenceNumberStats() { _incomingSequenceNumberStats.reset(); }
|
||||||
|
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
virtual int parseData(const QByteArray& packet);
|
||||||
|
|
||||||
bool popFrames(int numFrames, bool starveOnFail = true);
|
bool popFrames(int numFrames, bool starveOnFail = true);
|
||||||
bool popFrames(int16_t* dest, int numFrames, bool starveOnFail = true);
|
bool popFrames(int16_t* dest, int numFrames, bool starveOnFail = true);
|
||||||
|
@ -62,6 +62,7 @@ public:
|
||||||
|
|
||||||
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
|
/// this function should be called once per second to ensure the seq num stats history spans ~30 seconds
|
||||||
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
|
AudioStreamStats updateSeqHistoryAndGetAudioStreamStats();
|
||||||
|
|
||||||
virtual AudioStreamStats getAudioStreamStats() const;
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
int getCalculatedDesiredJitterBufferFrames() const;
|
int getCalculatedDesiredJitterBufferFrames() const;
|
||||||
|
|
|
@ -34,6 +34,12 @@ PositionalAudioRingBuffer::PositionalAudioRingBuffer(PositionalAudioRingBuffer::
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int PositionalAudioRingBuffer::parseData(const QByteArray& packet) {
|
||||||
|
int bytesRead = InboundAudioStream::parseData(packet);
|
||||||
|
updateNextOutputTrailingLoudness();
|
||||||
|
return bytesRead;
|
||||||
|
}
|
||||||
|
|
||||||
void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() {
|
void PositionalAudioRingBuffer::updateNextOutputTrailingLoudness() {
|
||||||
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
|
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,9 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
PositionalAudioRingBuffer(PositionalAudioRingBuffer::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||||
|
|
||||||
|
int parseData(const QByteArray& packet);
|
||||||
|
|
||||||
virtual AudioStreamStats getAudioStreamStats() const;
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
void updateNextOutputTrailingLoudness();
|
void updateNextOutputTrailingLoudness();
|
||||||
|
|
Loading…
Reference in a new issue