mirror of
https://github.com/overte-org/overte.git
synced 2025-08-09 19:52:26 +02:00
forgot some code in checkBuffersBeforeFrameSend
corrected updateLastPopOutputTrailingLoudness behavior to match what the old code did.
This commit is contained in:
parent
aead7a6823
commit
ccedb1bd20
7 changed files with 64 additions and 51 deletions
|
@ -113,7 +113,7 @@ void AudioMixer::addStreamToMixForListeningNodeWithStream(PositionalAudioStream*
|
||||||
distanceBetween = EPSILON;
|
distanceBetween = EPSILON;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (streamToAdd->getNextOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
if (streamToAdd->getLastPopOutputTrailingLoudness() / distanceBetween <= _minAudibilityThreshold) {
|
||||||
// according to mixer performance we have decided this does not get to be mixed in
|
// according to mixer performance we have decided this does not get to be mixed in
|
||||||
// bail out
|
// bail out
|
||||||
return;
|
return;
|
||||||
|
@ -284,7 +284,7 @@ void AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||||
|
|
||||||
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
|
if ((*otherNode != *node || otherNodeStream->shouldLoopbackForNode())
|
||||||
&& otherNodeStream->lastPopSucceeded()
|
&& otherNodeStream->lastPopSucceeded()
|
||||||
&& otherNodeStream->getNextOutputTrailingLoudness() > 0.0f) {
|
&& otherNodeStream->getLastPopOutputTrailingLoudness() > 0.0f) {
|
||||||
|
|
||||||
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
addStreamToMixForListeningNodeWithStream(otherNodeStream, nodeAudioStream);
|
||||||
}
|
}
|
||||||
|
@ -544,16 +544,16 @@ void AudioMixer::run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
foreach (const SharedNodePointer& node, nodeList->getNodeHash()) {
|
||||||
if (node->getActiveSocket() && node->getLinkedData()) {
|
if (node->getLinkedData()) {
|
||||||
|
|
||||||
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||||
|
|
||||||
// request a frame from each audio stream. a pointer to the popped data is stored as a member
|
// this function will request a frame from each audio stream.
|
||||||
// in InboundAudioStream. That's how the popped audio data will be read for mixing
|
// a pointer to the popped data is stored as a member in InboundAudioStream.
|
||||||
nodeData->audioStreamsPopFrameForMixing();
|
// That's how the popped audio data will be read for mixing (but only if the pop was successful)
|
||||||
|
nodeData->checkBuffersBeforeFrameSend(_sourceUnattenuatedZone, _listenerUnattenuatedZone);
|
||||||
if (node->getType() == NodeType::Agent
|
|
||||||
&& ((AudioMixerClientData*)node->getLinkedData())->getAvatarAudioStream()) {
|
if (node->getType() == NodeType::Agent && node->getActiveSocket()
|
||||||
|
&& nodeData->getAvatarAudioStream()) {
|
||||||
|
|
||||||
prepareMixForListeningNode(node.data());
|
prepareMixForListeningNode(node.data());
|
||||||
|
|
||||||
|
|
|
@ -98,10 +98,23 @@ int AudioMixerClientData::parseData(const QByteArray& packet) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::audioStreamsPopFrameForMixing() {
|
void AudioMixerClientData::checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone) {
|
||||||
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
QHash<QUuid, PositionalAudioStream*>::ConstIterator i;
|
||||||
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
for (i = _audioStreams.constBegin(); i != _audioStreams.constEnd(); i++) {
|
||||||
i.value()->popFrames(1);
|
PositionalAudioStream* stream = i.value();
|
||||||
|
if (stream->popFrames(1)) {
|
||||||
|
// this is a ring buffer that is ready to go
|
||||||
|
|
||||||
|
// calculate the trailing avg loudness for the next frame
|
||||||
|
// that would be mixed in
|
||||||
|
stream->updateLastPopOutputTrailingLoudness();
|
||||||
|
|
||||||
|
if (checkSourceZone && checkSourceZone->contains(stream->getPosition())) {
|
||||||
|
stream->setListenerUnattenuatedZone(listenerZone);
|
||||||
|
} else {
|
||||||
|
stream->setListenerUnattenuatedZone(NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ public:
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
int parseData(const QByteArray& packet);
|
||||||
|
|
||||||
void audioStreamsPopFrameForMixing();
|
void checkBuffersBeforeFrameSend(AABox* checkSourceZone, AABox* listenerZone);
|
||||||
|
|
||||||
void removeDeadInjectedStreams();
|
void removeDeadInjectedStreams();
|
||||||
|
|
||||||
|
|
|
@ -218,17 +218,26 @@ int16_t* AudioRingBuffer::shiftedPositionAccomodatingWrap(int16_t* position, int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
float AudioRingBuffer::getFrameLoudness(const int16_t* frameStart) const {
|
||||||
float loudness = 0.0f;
|
float loudness = 0.0f;
|
||||||
int16_t* sampleAt = _nextOutput;
|
const int16_t* sampleAt = frameStart;
|
||||||
int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
const int16_t* _bufferLastAt = _buffer + _sampleCapacity - 1;
|
||||||
if (samplesAvailable() >= _numFrameSamples) {
|
|
||||||
for (int i = 0; i < _numFrameSamples; ++i) {
|
for (int i = 0; i < _numFrameSamples; ++i) {
|
||||||
loudness += fabsf(*sampleAt);
|
loudness += fabsf(*sampleAt);
|
||||||
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
|
sampleAt = sampleAt == _bufferLastAt ? _buffer : sampleAt + 1;
|
||||||
}
|
|
||||||
loudness /= _numFrameSamples;
|
|
||||||
loudness /= MAX_SAMPLE_VALUE;
|
|
||||||
}
|
}
|
||||||
|
loudness /= _numFrameSamples;
|
||||||
|
loudness /= MAX_SAMPLE_VALUE;
|
||||||
|
|
||||||
return loudness;
|
return loudness;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::getFrameLoudness(ConstIterator frameStart) const {
|
||||||
|
return getFrameLoudness(&(*frameStart));
|
||||||
|
}
|
||||||
|
|
||||||
|
float AudioRingBuffer::getNextOutputFrameLoudness() const {
|
||||||
|
return getFrameLoudness(_nextOutput);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,6 +71,10 @@ public:
|
||||||
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
int getOverflowCount() const { return _overflowCount; } /// how many times has the ring buffer has overwritten old data
|
||||||
|
|
||||||
int addSilentFrame(int numSilentSamples);
|
int addSilentFrame(int numSilentSamples);
|
||||||
|
|
||||||
|
private:
|
||||||
|
float getFrameLoudness(const int16_t* frameStart) const;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// disallow copying of AudioRingBuffer objects
|
// disallow copying of AudioRingBuffer objects
|
||||||
AudioRingBuffer(const AudioRingBuffer&);
|
AudioRingBuffer(const AudioRingBuffer&);
|
||||||
|
@ -106,7 +110,7 @@ public:
|
||||||
|
|
||||||
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
bool operator==(const ConstIterator& rhs) { return _at == rhs._at; }
|
||||||
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
bool operator!=(const ConstIterator& rhs) { return _at != rhs._at; }
|
||||||
int16_t operator*() { return *_at; }
|
const int16_t& operator*() { return *_at; }
|
||||||
|
|
||||||
ConstIterator& operator=(const ConstIterator& rhs) {
|
ConstIterator& operator=(const ConstIterator& rhs) {
|
||||||
_capacity = rhs._capacity;
|
_capacity = rhs._capacity;
|
||||||
|
@ -138,7 +142,7 @@ public:
|
||||||
return tmp;
|
return tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
int16_t operator[] (int i) {
|
const int16_t& operator[] (int i) {
|
||||||
return *atShiftedBy(i);
|
return *atShiftedBy(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,6 +179,8 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
|
ConstIterator nextOutput() const { return ConstIterator(_buffer, _sampleCapacity, _nextOutput); }
|
||||||
|
|
||||||
|
float getFrameLoudness(ConstIterator frameStart) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // hifi_AudioRingBuffer_h
|
#endif // hifi_AudioRingBuffer_h
|
||||||
|
|
|
@ -29,32 +29,26 @@ PositionalAudioStream::PositionalAudioStream(PositionalAudioStream::Type type, b
|
||||||
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
_orientation(0.0f, 0.0f, 0.0f, 0.0f),
|
||||||
_shouldLoopbackForNode(false),
|
_shouldLoopbackForNode(false),
|
||||||
_isStereo(isStereo),
|
_isStereo(isStereo),
|
||||||
_nextOutputTrailingLoudness(0.0f),
|
_lastPopOutputTrailingLoudness(0.0f),
|
||||||
_listenerUnattenuatedZone(NULL)
|
_listenerUnattenuatedZone(NULL)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
int PositionalAudioStream::parseData(const QByteArray& packet) {
|
void PositionalAudioStream::updateLastPopOutputTrailingLoudness() {
|
||||||
int bytesRead = InboundAudioStream::parseData(packet);
|
float lastPopLoudness = _ringBuffer.getFrameLoudness(_lastPopOutput);
|
||||||
updateNextOutputTrailingLoudness();
|
|
||||||
return bytesRead;
|
|
||||||
}
|
|
||||||
|
|
||||||
void PositionalAudioStream::updateNextOutputTrailingLoudness() {
|
|
||||||
float nextLoudness = _ringBuffer.getNextOutputFrameLoudness();
|
|
||||||
|
|
||||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||||
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||||
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
||||||
const float LOUDNESS_EPSILON = 0.000001f;
|
const float LOUDNESS_EPSILON = 0.000001f;
|
||||||
|
|
||||||
if (nextLoudness >= _nextOutputTrailingLoudness) {
|
if (lastPopLoudness >= _lastPopOutputTrailingLoudness) {
|
||||||
_nextOutputTrailingLoudness = nextLoudness;
|
_lastPopOutputTrailingLoudness = lastPopLoudness;
|
||||||
} else {
|
} else {
|
||||||
_nextOutputTrailingLoudness = (_nextOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * nextLoudness);
|
_lastPopOutputTrailingLoudness = (_lastPopOutputTrailingLoudness * PREVIOUS_FRAMES_RATIO) + (CURRENT_FRAME_RATIO * lastPopLoudness);
|
||||||
|
|
||||||
if (_nextOutputTrailingLoudness < LOUDNESS_EPSILON) {
|
if (_lastPopOutputTrailingLoudness < LOUDNESS_EPSILON) {
|
||||||
_nextOutputTrailingLoudness = 0;
|
_lastPopOutputTrailingLoudness = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,12 +29,10 @@ public:
|
||||||
|
|
||||||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo = false, bool dynamicJitterBuffers = false);
|
||||||
|
|
||||||
int parseData(const QByteArray& packet);
|
|
||||||
|
|
||||||
virtual AudioStreamStats getAudioStreamStats() const;
|
virtual AudioStreamStats getAudioStreamStats() const;
|
||||||
|
|
||||||
void updateNextOutputTrailingLoudness();
|
void updateLastPopOutputTrailingLoudness();
|
||||||
float getNextOutputTrailingLoudness() const { return _nextOutputTrailingLoudness; }
|
float getLastPopOutputTrailingLoudness() const { return _lastPopOutputTrailingLoudness; }
|
||||||
|
|
||||||
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
bool shouldLoopbackForNode() const { return _shouldLoopbackForNode; }
|
||||||
bool isStereo() const { return _isStereo; }
|
bool isStereo() const { return _isStereo; }
|
||||||
|
@ -50,13 +48,6 @@ protected:
|
||||||
PositionalAudioStream(const PositionalAudioStream&);
|
PositionalAudioStream(const PositionalAudioStream&);
|
||||||
PositionalAudioStream& operator= (const PositionalAudioStream&);
|
PositionalAudioStream& operator= (const PositionalAudioStream&);
|
||||||
|
|
||||||
/// parses the info between the seq num and the audio data in the network packet and calculates
|
|
||||||
/// how many audio samples this packet contains
|
|
||||||
virtual int parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) = 0;
|
|
||||||
|
|
||||||
/// parses the audio data in the network packet
|
|
||||||
virtual int parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) = 0;
|
|
||||||
|
|
||||||
int parsePositionalData(const QByteArray& positionalByteArray);
|
int parsePositionalData(const QByteArray& positionalByteArray);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -67,7 +58,7 @@ protected:
|
||||||
bool _shouldLoopbackForNode;
|
bool _shouldLoopbackForNode;
|
||||||
bool _isStereo;
|
bool _isStereo;
|
||||||
|
|
||||||
float _nextOutputTrailingLoudness;
|
float _lastPopOutputTrailingLoudness;
|
||||||
AABox* _listenerUnattenuatedZone;
|
AABox* _listenerUnattenuatedZone;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue