mirror of
https://github.com/lubosz/overte.git
synced 2025-04-24 07:13:57 +02:00
Stream reverb settings from mixer to interface
This commit is contained in:
parent
86b87770c2
commit
37b47b52d3
4 changed files with 70 additions and 10 deletions
|
@ -428,8 +428,8 @@ int AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData* l
|
|||
}
|
||||
|
||||
int AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
AvatarAudioStream* nodeAudioStream = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* listenerNodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_preMixSamples, 0, sizeof(_preMixSamples));
|
||||
|
@ -730,6 +730,30 @@ void AudioMixer::run() {
|
|||
memcpy(dataAt, &sequence, sizeof(quint16));
|
||||
dataAt += sizeof(quint16);
|
||||
|
||||
// Pack stream properties
|
||||
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
|
||||
glm::vec3 streamPosition = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream()->getPosition();
|
||||
if (_audioZones[_zoneReverbSettings[i].zone].contains(streamPosition)) {
|
||||
bool hasReverb = true;
|
||||
float reverbTime = _zoneReverbSettings[i].reverbTime;
|
||||
float wetLevel = _zoneReverbSettings[i].wetLevel;
|
||||
|
||||
memcpy(dataAt, &hasReverb, sizeof(bool));
|
||||
dataAt += sizeof(bool);
|
||||
memcpy(dataAt, &reverbTime, sizeof(float));
|
||||
dataAt += sizeof(float);
|
||||
memcpy(dataAt, &wetLevel, sizeof(float));
|
||||
dataAt += sizeof(float);
|
||||
|
||||
qDebug() << "Out" << sequence << reverbTime << wetLevel;
|
||||
} else {
|
||||
bool hasReverb = false;
|
||||
memcpy(dataAt, &hasReverb, sizeof(bool));
|
||||
dataAt += sizeof(bool);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// pack mixed audio samples
|
||||
memcpy(dataAt, _mixSamples, NETWORK_BUFFER_LENGTH_BYTES_STEREO);
|
||||
dataAt += NETWORK_BUFFER_LENGTH_BYTES_STEREO;
|
||||
|
|
|
@ -787,7 +787,6 @@ void Audio::handleAudioInput() {
|
|||
NodeList* nodeList = NodeList::getInstance();
|
||||
SharedNodePointer audioMixer = nodeList->soloNodeOfType(NodeType::AudioMixer);
|
||||
|
||||
|
||||
if (_recorder && _recorder.data()->isRecording()) {
|
||||
_recorder.data()->record(reinterpret_cast<char*>(networkAudioSamples), numNetworkBytes);
|
||||
}
|
||||
|
@ -907,12 +906,10 @@ void Audio::addLastFrameRepeatedWithFadeToScope(int samplesPerChannel) {
|
|||
}
|
||||
|
||||
void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& outputBuffer) {
|
||||
|
||||
const int numNetworkOutputSamples = inputBuffer.size() / sizeof(int16_t);
|
||||
const int numDeviceOutputSamples = numNetworkOutputSamples * (_outputFormat.sampleRate() * _outputFormat.channelCount())
|
||||
/ (_desiredOutputFormat.sampleRate() * _desiredOutputFormat.channelCount());
|
||||
|
||||
|
||||
outputBuffer.resize(numDeviceOutputSamples * sizeof(int16_t));
|
||||
|
||||
const int16_t* receivedSamples;
|
||||
|
@ -952,13 +949,28 @@ void Audio::processReceivedSamples(const QByteArray& inputBuffer, QByteArray& ou
|
|||
numDeviceOutputSamples,
|
||||
_desiredOutputFormat, _outputFormat);
|
||||
|
||||
if(_reverb) {
|
||||
if (_receivedAudioStream.hasReverb()) {
|
||||
bool reverbChanged = false;
|
||||
|
||||
if (_reverbOptions.getReverbTime() != _receivedAudioStream.getRevebTime()) {
|
||||
_reverbOptions.setReverbTime(_receivedAudioStream.getRevebTime());
|
||||
reverbChanged = true;
|
||||
}
|
||||
if (_reverbOptions.getWetLevel() != _receivedAudioStream.getWetLevel()) {
|
||||
_reverbOptions.setWetLevel(_receivedAudioStream.getWetLevel());
|
||||
reverbChanged = true;
|
||||
}
|
||||
if (reverbChanged) {
|
||||
initGverb();
|
||||
}
|
||||
}
|
||||
|
||||
if(_reverb || _receivedAudioStream.hasReverb()) {
|
||||
addReverb((int16_t*)outputBuffer.data(), numDeviceOutputSamples, _outputFormat);
|
||||
}
|
||||
}
|
||||
|
||||
void Audio::addReceivedAudioToStream(const QByteArray& audioByteArray) {
|
||||
|
||||
if (_audioOutput) {
|
||||
// Audio output must exist and be correctly set up if we're going to process received audio
|
||||
_receivedAudioStream.parseData(audioByteArray);
|
||||
|
|
|
@ -44,7 +44,8 @@ InboundAudioStream::InboundAudioStream(int numFrameSamples, int numFramesCapacit
|
|||
_framesAvailableStat(),
|
||||
_currentJitterBufferFrames(0),
|
||||
_timeGapStatsForStatsPacket(0, STATS_FOR_STATS_PACKET_WINDOW_SECONDS),
|
||||
_repetitionWithFade(settings._repetitionWithFade)
|
||||
_repetitionWithFade(settings._repetitionWithFade),
|
||||
_hasReverb(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -162,9 +163,23 @@ int InboundAudioStream::parseData(const QByteArray& packet) {
|
|||
}
|
||||
|
||||
int InboundAudioStream::parseStreamProperties(PacketType type, const QByteArray& packetAfterSeqNum, int& numAudioSamples) {
|
||||
int read = 0;
|
||||
if (type == PacketTypeMixedAudio) {
|
||||
memcpy(&_hasReverb, packetAfterSeqNum.data() + read, sizeof(bool));
|
||||
read += sizeof(bool);
|
||||
|
||||
if (_hasReverb) {
|
||||
memcpy(&_reverbTime, packetAfterSeqNum.data() + read, sizeof(float));
|
||||
read += sizeof(float);
|
||||
memcpy(&_wetLevel, packetAfterSeqNum.data() + read, sizeof(float));
|
||||
read += sizeof(float);
|
||||
qDebug() << "In" << _reverbTime << _wetLevel;
|
||||
}
|
||||
}
|
||||
|
||||
// mixed audio packets do not have any info between the seq num and the audio data.
|
||||
numAudioSamples = packetAfterSeqNum.size() / sizeof(int16_t);
|
||||
return 0;
|
||||
numAudioSamples = (packetAfterSeqNum.size() - read) / sizeof(int16_t);
|
||||
return read;
|
||||
}
|
||||
|
||||
int InboundAudioStream::parseAudioData(PacketType type, const QByteArray& packetAfterStreamProperties, int numAudioSamples) {
|
||||
|
|
|
@ -154,6 +154,10 @@ public:
|
|||
int getOverflowCount() const { return _ringBuffer.getOverflowCount(); }
|
||||
|
||||
int getPacketsReceived() const { return _incomingSequenceNumberStats.getReceived(); }
|
||||
|
||||
bool hasReverb() const { return _hasReverb; }
|
||||
float getRevebTime() const { return _reverbTime; }
|
||||
float getWetLevel() const { return _wetLevel; }
|
||||
|
||||
public slots:
|
||||
/// This function should be called every second for all the stats to function properly. If dynamic jitter buffers
|
||||
|
@ -243,6 +247,11 @@ protected:
|
|||
MovingMinMaxAvg<quint64> _timeGapStatsForStatsPacket;
|
||||
|
||||
bool _repetitionWithFade;
|
||||
|
||||
// Reverb properties
|
||||
bool _hasReverb;
|
||||
float _reverbTime;
|
||||
float _wetLevel;
|
||||
};
|
||||
|
||||
float calculateRepeatedFrameFadeFactor(int indexOfRepeat);
|
||||
|
|
Loading…
Reference in a new issue