mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 05:52:31 +02:00
Don't send reverb data as often
This commit is contained in:
parent
6df37b6d4a
commit
d4ec337cae
3 changed files with 68 additions and 24 deletions
|
@ -731,30 +731,56 @@ void AudioMixer::run() {
|
||||||
dataAt += sizeof(quint16);
|
dataAt += sizeof(quint16);
|
||||||
|
|
||||||
// Pack stream properties
|
// Pack stream properties
|
||||||
bool inAZone = false;
|
bool hasReverb = false;
|
||||||
|
float reverbTime;
|
||||||
|
float wetLevel;
|
||||||
|
|
||||||
|
// find reverb properties
|
||||||
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
|
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
|
||||||
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
|
AvatarAudioStream* stream = data->getAvatarAudioStream();
|
||||||
|
glm::vec3 streamPosition = stream->getPosition();
|
||||||
if (_audioZones[_zoneReverbSettings[i].zone].contains(streamPosition)) {
|
if (_audioZones[_zoneReverbSettings[i].zone].contains(streamPosition)) {
|
||||||
bool hasReverb = true;
|
hasReverb = true;
|
||||||
float reverbTime = _zoneReverbSettings[i].reverbTime;
|
reverbTime = _zoneReverbSettings[i].reverbTime;
|
||||||
float wetLevel = _zoneReverbSettings[i].wetLevel;
|
wetLevel = _zoneReverbSettings[i].wetLevel;
|
||||||
|
break;
|
||||||
memcpy(dataAt, &hasReverb, sizeof(bool));
|
}
|
||||||
dataAt += sizeof(bool);
|
}
|
||||||
|
AvatarAudioStream* stream = nodeData->getAvatarAudioStream();
|
||||||
|
bool dataChanged = (stream->hasReverb() != hasReverb) ||
|
||||||
|
(stream->hasReverb() && (stream->getRevebTime() != reverbTime ||
|
||||||
|
stream->getWetLevel() != wetLevel));
|
||||||
|
// Update stream
|
||||||
|
if (hasReverb) {
|
||||||
|
stream->setReverb(reverbTime, wetLevel);
|
||||||
|
} else {
|
||||||
|
stream->clearReverb();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send at change or every so often
|
||||||
|
float CHANCE_OF_SEND = 0.01;
|
||||||
|
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
|
||||||
|
|
||||||
|
unsigned char bitset = 0;
|
||||||
|
if (sendData) {
|
||||||
|
setAtBit(bitset, HAS_DATA_BIT);
|
||||||
|
if (hasReverb) {
|
||||||
|
setAtBit(bitset, HAS_REVERB_BIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(dataAt, &bitset, sizeof(unsigned char));
|
||||||
|
dataAt += sizeof(unsigned char);
|
||||||
|
|
||||||
|
if (hasReverb) {
|
||||||
memcpy(dataAt, &reverbTime, sizeof(float));
|
memcpy(dataAt, &reverbTime, sizeof(float));
|
||||||
dataAt += sizeof(float);
|
dataAt += sizeof(float);
|
||||||
memcpy(dataAt, &wetLevel, sizeof(float));
|
memcpy(dataAt, &wetLevel, sizeof(float));
|
||||||
dataAt += sizeof(float);
|
dataAt += sizeof(float);
|
||||||
|
|
||||||
inAZone = true;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
if (!inAZone) {
|
memcpy(dataAt, &bitset, sizeof(unsigned char));
|
||||||
bool hasReverb = false;
|
dataAt += sizeof(unsigned char);
|
||||||
memcpy(dataAt, &hasReverb, sizeof(bool));
|
|
||||||
dataAt += sizeof(bool);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// pack mixed audio samples
|
// pack mixed audio samples
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
|
|
||||||
#include "InboundAudioStream.h"
|
#include "InboundAudioStream.h"
|
||||||
#include "PacketHeaders.h"
|
#include "PacketHeaders.h"
|
||||||
|
#include <SharedUtil.h>
|
||||||
|
|
||||||
const int STARVE_HISTORY_CAPACITY = 50;
|
const int STARVE_HISTORY_CAPACITY = 50;
|
||||||
|
|
||||||
|
@ -83,6 +84,12 @@ void InboundAudioStream::clearBuffer() {
|
||||||
_currentJitterBufferFrames = 0;
|
_currentJitterBufferFrames = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InboundAudioStream::setReverb(float reverbTime, float wetLevel) {
|
||||||
|
_hasReverb = true;
|
||||||
|
_reverbTime = reverbTime;
|
||||||
|
_wetLevel = wetLevel;
|
||||||
|
}
|
||||||
|
|
||||||
void InboundAudioStream::perSecondCallbackForUpdatingStats() {
|
void InboundAudioStream::perSecondCallbackForUpdatingStats() {
|
||||||
_incomingSequenceNumberStats.pushStatsToHistory();
|
_incomingSequenceNumberStats.pushStatsToHistory();
|
||||||
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
|
_timeGapStatsForDesiredCalcOnTooManyStarves.currentIntervalComplete();
|
||||||
|
@ -165,14 +172,19 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
||||||
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||||
int read = 0;
|
int read = 0;
|
||||||
if (type == PacketTypeMixedAudio) {
|
if (type == PacketTypeMixedAudio) {
|
||||||
memcpy(&_hasReverb, packetAfterSeqNum.data() + read, sizeof(bool));
|
char bitset;
|
||||||
read += sizeof(bool);
|
memcpy(&bitset, packetAfterSeqNum.data() + read, sizeof(char));
|
||||||
|
read += sizeof(char);
|
||||||
|
|
||||||
if (_hasReverb) {
|
bool hasData = oneAtBit(bitset, HAS_DATA_BIT);
|
||||||
memcpy(&_reverbTime, packetAfterSeqNum.data() + read, sizeof(float));
|
if (hasData) {
|
||||||
read += sizeof(float);
|
_hasReverb = oneAtBit(bitset, HAS_REVERB_BIT);
|
||||||
memcpy(&_wetLevel, packetAfterSeqNum.data() + read, sizeof(float));
|
if (_hasReverb) {
|
||||||
read += sizeof(float);
|
memcpy(&_reverbTime, packetAfterSeqNum.data() + read, sizeof(float));
|
||||||
|
read += sizeof(float);
|
||||||
|
memcpy(&_wetLevel, packetAfterSeqNum.data() + read, sizeof(float));
|
||||||
|
read += sizeof(float);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,7 +218,7 @@ int InboundAudioStream::writeDroppableSilentSamples(int silentSamples) {
|
||||||
|
|
||||||
_framesAvailableStat.reset();
|
_framesAvailableStat.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
int ret = _ringBuffer.addSilentSamples(silentSamples - numSilentFramesToDrop * samplesPerFrame);
|
int ret = _ringBuffer.addSilentSamples(silentSamples - numSilentFramesToDrop * samplesPerFrame);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -45,6 +45,10 @@ const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_CALC_ON_TOO_MANY_STARVES = 50;
|
||||||
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
|
const int DEFAULT_WINDOW_SECONDS_FOR_DESIRED_REDUCTION = 10;
|
||||||
const bool DEFAULT_REPETITION_WITH_FADE = true;
|
const bool DEFAULT_REPETITION_WITH_FADE = true;
|
||||||
|
|
||||||
|
// Mixed Audio bitset
|
||||||
|
const int HAS_DATA_BIT = 0; // 1st bit
|
||||||
|
const int HAS_REVERB_BIT = 1; // 2nd bit
|
||||||
|
|
||||||
class InboundAudioStream : public NodeData {
|
class InboundAudioStream : public NodeData {
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
public:
|
public:
|
||||||
|
@ -158,6 +162,8 @@ public:
|
||||||
bool hasReverb() const { return _hasReverb; }
|
bool hasReverb() const { return _hasReverb; }
|
||||||
float getRevebTime() const { return _reverbTime; }
|
float getRevebTime() const { return _reverbTime; }
|
||||||
float getWetLevel() const { return _wetLevel; }
|
float getWetLevel() const { return _wetLevel; }
|
||||||
|
void setReverb(float reverbTime, float wetLevel);
|
||||||
|
void clearReverb() { _hasReverb = false; }
|
||||||
|
|
||||||
public slots:
|
public slots:
|
||||||
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
|
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
|
||||||
|
|
Loading…
Reference in a new issue