From 1b129a43b59876c15fdb4de41c1877bd38a960a0 Mon Sep 17 00:00:00 2001 From: Stephen Birarda Date: Mon, 21 Oct 2013 14:27:55 -0700 Subject: [PATCH] initial changes to allow for multiple buffers per client --- assignment-client/src/audio/AudioMixer.cpp | 409 +++++++++--------- assignment-client/src/audio/AudioMixer.h | 14 + .../src/audio/AudioMixerClientData.cpp | 27 ++ .../src/audio/AudioMixerClientData.h | 7 +- .../src/audio/AvatarAudioRingBuffer.h | 2 +- libraries/shared/src/Node.cpp | 4 - libraries/shared/src/Node.h | 4 +- 7 files changed, 254 insertions(+), 213 deletions(-) diff --git a/assignment-client/src/audio/AudioMixer.cpp b/assignment-client/src/audio/AudioMixer.cpp index a35a8c9c04..5ee507265e 100644 --- a/assignment-client/src/audio/AudioMixer.cpp +++ b/assignment-client/src/audio/AudioMixer.cpp @@ -48,8 +48,6 @@ #include "AudioMixer.h" -const unsigned short MIXER_LISTEN_PORT = 55443; - const short JITTER_BUFFER_MSECS = 12; const short JITTER_BUFFER_SAMPLES = JITTER_BUFFER_MSECS * (SAMPLE_RATE / 1000.0); @@ -70,6 +68,201 @@ AudioMixer::AudioMixer(const unsigned char* dataBuffer, int numBytes) : Assignme } +void AudioMixer::addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, + AvatarAudioRingBuffer* listeningNodeBuffer) { + float bearingRelativeAngleToSource = 0.0f; + float attenuationCoefficient = 1.0f; + int numSamplesDelay = 0; + float weakChannelAmplitudeRatio = 1.0f; + + const int PHASE_DELAY_AT_90 = 20; + + static stk::StkFrames stkFrameBuffer(BUFFER_LENGTH_SAMPLES_PER_CHANNEL, 1); + + stk::TwoPole* otherNodeTwoPole = NULL; + + if (bufferToAdd != listeningNodeBuffer) { + // if the two buffer pointers do no match then these are different buffers + + glm::vec3 listenerPosition = listeningNodeBuffer->getPosition(); + glm::vec3 relativePosition = bufferToAdd->getPosition() - listeningNodeBuffer->getPosition(); + glm::quat inverseOrientation = glm::inverse(listeningNodeBuffer->getOrientation()); + + float distanceSquareToSource = glm::dot(relativePosition, relativePosition); + float radius = 0.0f; + + if (bufferToAdd->getType() == PositionalAudioRingBuffer::Injector) { + InjectedAudioRingBuffer* injectedBuffer = (InjectedAudioRingBuffer*) bufferToAdd; + radius = injectedBuffer->getRadius(); + attenuationCoefficient *= injectedBuffer->getAttenuationRatio(); + } + + if (radius == 0 || (distanceSquareToSource > radius * radius)) { + // this is either not a spherical source, or the listener is outside the sphere + + if (radius > 0) { + // this is a spherical source - the distance used for the coefficient + // needs to be the closest point on the boundary to the source + + // ovveride the distance to the node with the distance to the point on the + // boundary of the sphere + distanceSquareToSource -= (radius * radius); + + } else { + // calculate the angle delivery for off-axis attenuation + glm::vec3 rotatedListenerPosition = glm::inverse(bufferToAdd->getOrientation()) * relativePosition; + + float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f), + glm::normalize(rotatedListenerPosition)); + + const float MAX_OFF_AXIS_ATTENUATION = 0.2f; + const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f; + + float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + + (OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f)); + + // multiply the current attenuation coefficient by the calculated off axis coefficient + attenuationCoefficient *= offAxisCoefficient; + } + + glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition; + + const float DISTANCE_SCALE = 2.5f; + const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; + const float DISTANCE_LOG_BASE = 2.5f; + const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE); + + // calculate the distance coefficient using the distance to this node + float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, + DISTANCE_SCALE_LOG + + (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); + distanceCoefficient = std::min(1.0f, distanceCoefficient); + + // multiply the current attenuation coefficient by the distance coefficient + attenuationCoefficient *= distanceCoefficient; + + // project the rotated source position vector onto the XZ plane + rotatedSourcePosition.y = 0.0f; + + // produce an oriented angle about the y-axis + bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), + glm::normalize(rotatedSourcePosition), + glm::vec3(0.0f, 1.0f, 0.0f)); + + const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5; + + // figure out the number of samples of delay and the ratio of the amplitude + // in the weak channel for audio spatialization + float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource))); + numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio; + weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio); + + // grab the TwoPole object for this source, add it if it doesn't exist + TwoPoleNodeMap& nodeTwoPoles = listeningNodeBuffer->getTwoPoles(); + TwoPoleNodeMap::iterator twoPoleIterator = nodeTwoPoles.find(bufferToAdd); + + if (twoPoleIterator == nodeTwoPoles.end()) { + // setup the freeVerb effect for this source for this client + otherNodeTwoPole = nodeTwoPoles[bufferToAdd] = new stk::TwoPole; + } else { + otherNodeTwoPole = twoPoleIterator->second; + } + + // calculate the reasonance for this TwoPole based on angle to source + float TWO_POLE_CUT_OFF_FREQUENCY = 800.0f; + float TWO_POLE_MAX_FILTER_STRENGTH = 0.4f; + + otherNodeTwoPole->setResonance(TWO_POLE_CUT_OFF_FREQUENCY, + TWO_POLE_MAX_FILTER_STRENGTH + * fabsf(bearingRelativeAngleToSource) / 180.0f, + true); + } + } + + int16_t* sourceBuffer = bufferToAdd->getNextOutput(); + + int16_t* goodChannel = (bearingRelativeAngleToSource > 0.0f) + ? _clientSamples + : _clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL; + int16_t* delayedChannel = (bearingRelativeAngleToSource > 0.0f) + ? _clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL + : _clientSamples; + + int16_t* delaySamplePointer = bufferToAdd->getNextOutput() == bufferToAdd->getBuffer() + ? bufferToAdd->getBuffer() + RING_BUFFER_LENGTH_SAMPLES - numSamplesDelay + : bufferToAdd->getNextOutput() - numSamplesDelay; + + for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) { + // load up the stkFrameBuffer with this source's samples + stkFrameBuffer[s] = (stk::StkFloat) sourceBuffer[s]; + } + + // perform the TwoPole effect on the stkFrameBuffer + if (otherNodeTwoPole) { + otherNodeTwoPole->tick(stkFrameBuffer); + } + + for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) { + if (s < numSamplesDelay) { + // pull the earlier sample for the delayed channel + int earlierSample = delaySamplePointer[s] * attenuationCoefficient * weakChannelAmplitudeRatio; + + delayedChannel[s] = glm::clamp(delayedChannel[s] + earlierSample, + MIN_SAMPLE_VALUE, + MAX_SAMPLE_VALUE); + } + + int16_t currentSample = stkFrameBuffer[s] * attenuationCoefficient; + + goodChannel[s] = glm::clamp(goodChannel[s] + currentSample, + MIN_SAMPLE_VALUE, + MAX_SAMPLE_VALUE); + + if (s + numSamplesDelay < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) { + int sumSample = delayedChannel[s + numSamplesDelay] + + (currentSample * weakChannelAmplitudeRatio); + delayedChannel[s + numSamplesDelay] = glm::clamp(sumSample, + MIN_SAMPLE_VALUE, + MAX_SAMPLE_VALUE); + } + + if (s >= BUFFER_LENGTH_SAMPLES_PER_CHANNEL - PHASE_DELAY_AT_90) { + // this could be a delayed sample on the next pass + // so store the affected back in the ARB + bufferToAdd->getNextOutput()[s] = (int16_t) stkFrameBuffer[s]; + } + } +} + +void AudioMixer::prepareMixForListeningNode(Node* node) { + NodeList* nodeList = NodeList::getInstance(); + + AvatarAudioRingBuffer* nodeRingBuffer = ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer(); + + // zero out the client mix for this node + memset(_clientSamples, 0, sizeof(_clientSamples)); + + // loop through all other nodes that have sufficient audio to mix + for (NodeList::iterator otherNode = nodeList->begin(); otherNode != nodeList->end(); otherNode++) { + if (otherNode->getLinkedData()) { + + AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData(); + + // enumerate the ARBs attached to the otherNode and add all that should be added to mix + for (int i = 0; i < otherNodeClientData->getRingBuffers().size(); i++) { + PositionalAudioRingBuffer* otherNodeBuffer = otherNodeClientData->getRingBuffers()[i]; + + if ((*otherNode != *node + || otherNodeBuffer->getType() != PositionalAudioRingBuffer::Microphone + || nodeRingBuffer->shouldLoopbackForNode()) + && otherNodeBuffer->willBeAddedToMix()) { + addBufferToMixForListeningNodeWithBuffer(otherNodeBuffer, nodeRingBuffer); + } + } + } + } +} + void AudioMixer::run() { // change the logging target name while this is running Logging::setTargetName(AUDIO_MIXER_LOGGING_TARGET_NAME); @@ -86,7 +279,7 @@ void AudioMixer::run() { nodeList->startSilentNodeRemovalThread(); - unsigned char* packetData = new unsigned char[MAX_PACKET_SIZE]; + unsigned char packetData[MAX_PACKET_SIZE] = {}; sockaddr* nodeAddress = new sockaddr; @@ -100,8 +293,6 @@ void AudioMixer::run() { unsigned char clientPacket[BUFFER_LENGTH_BYTES_STEREO + numBytesPacketHeader]; populateTypeAndVersion(clientPacket, PACKET_TYPE_MIXED_AUDIO); - int16_t clientSamples[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2] = {}; - gettimeofday(&startTime, NULL); timeval lastDomainServerCheckIn = {}; @@ -110,8 +301,6 @@ void AudioMixer::run() { float sumFrameTimePercentages = 0.0f; int numStatCollections = 0; - stk::StkFrames stkFrameBuffer(BUFFER_LENGTH_SAMPLES_PER_CHANNEL, 1); - // if we'll be sending stats, call the Logstash::socket() method to make it load the logstash IP outside the loop if (Logging::shouldSendStats()) { Logging::socket(); @@ -147,209 +336,25 @@ void AudioMixer::run() { nodeList->possiblyPingInactiveNodes(); for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) { - PositionalAudioRingBuffer* positionalRingBuffer = (PositionalAudioRingBuffer*) node->getLinkedData(); - if (positionalRingBuffer && positionalRingBuffer->shouldBeAddedToMix(JITTER_BUFFER_SAMPLES)) { - // this is a ring buffer that is ready to go - // set its flag so we know to push its buffer when all is said and done - positionalRingBuffer->setWillBeAddedToMix(true); + if (node->getLinkedData()) { + ((AudioMixerClientData*) node->getLinkedData())->checkBuffersBeforeFrameSend(JITTER_BUFFER_SAMPLES); } } for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) { - - const int PHASE_DELAY_AT_90 = 20; - - if (node->getType() == NODE_TYPE_AGENT && node->getActiveSocket() && node->getLinkedData()) { - AvatarAudioRingBuffer* nodeRingBuffer = (AvatarAudioRingBuffer*) node->getLinkedData(); + if (node->getType() == NODE_TYPE_AGENT && node->getActiveSocket() && node->getLinkedData() + && ((AudioMixerClientData*) node->getLinkedData())->getAvatarAudioRingBuffer()) { + prepareMixForListeningNode(&(*node)); - // zero out the client mix for this node - memset(clientSamples, 0, sizeof(clientSamples)); - - // loop through all other nodes that have sufficient audio to mix - for (NodeList::iterator otherNode = nodeList->begin(); otherNode != nodeList->end(); otherNode++) { - if (otherNode->getLinkedData() - && ((PositionalAudioRingBuffer*) otherNode->getLinkedData())->willBeAddedToMix() - && (otherNode != node || (otherNode == node && nodeRingBuffer->shouldLoopbackForNode()))) { - PositionalAudioRingBuffer* otherNodeBuffer = (PositionalAudioRingBuffer*) otherNode->getLinkedData(); - // based on our listen mode we will do this mixing... - - float bearingRelativeAngleToSource = 0.0f; - float attenuationCoefficient = 1.0f; - int numSamplesDelay = 0; - float weakChannelAmplitudeRatio = 1.0f; - - stk::TwoPole* otherNodeTwoPole = NULL; - - if (otherNode != node) { - - glm::vec3 listenerPosition = nodeRingBuffer->getPosition(); - glm::vec3 relativePosition = otherNodeBuffer->getPosition() - nodeRingBuffer->getPosition(); - glm::quat inverseOrientation = glm::inverse(nodeRingBuffer->getOrientation()); - - float distanceSquareToSource = glm::dot(relativePosition, relativePosition); - float radius = 0.0f; - - if (otherNode->getType() == NODE_TYPE_AUDIO_INJECTOR) { - InjectedAudioRingBuffer* injectedBuffer = (InjectedAudioRingBuffer*) otherNodeBuffer; - radius = injectedBuffer->getRadius(); - attenuationCoefficient *= injectedBuffer->getAttenuationRatio(); - } - - if (radius == 0 || (distanceSquareToSource > radius * radius)) { - // this is either not a spherical source, or the listener is outside the sphere - - if (radius > 0) { - // this is a spherical source - the distance used for the coefficient - // needs to be the closest point on the boundary to the source - - // ovveride the distance to the node with the distance to the point on the - // boundary of the sphere - distanceSquareToSource -= (radius * radius); - - } else { - // calculate the angle delivery for off-axis attenuation - glm::vec3 rotatedListenerPosition = glm::inverse(otherNodeBuffer->getOrientation()) - * relativePosition; - - float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f), - glm::normalize(rotatedListenerPosition)); - - const float MAX_OFF_AXIS_ATTENUATION = 0.2f; - const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f; - - float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION + - (OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / 90.0f)); - - // multiply the current attenuation coefficient by the calculated off axis coefficient - attenuationCoefficient *= offAxisCoefficient; - } - - glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition; - - const float DISTANCE_SCALE = 2.5f; - const float GEOMETRIC_AMPLITUDE_SCALAR = 0.3f; - const float DISTANCE_LOG_BASE = 2.5f; - const float DISTANCE_SCALE_LOG = logf(DISTANCE_SCALE) / logf(DISTANCE_LOG_BASE); - - // calculate the distance coefficient using the distance to this node - float distanceCoefficient = powf(GEOMETRIC_AMPLITUDE_SCALAR, - DISTANCE_SCALE_LOG + - (0.5f * logf(distanceSquareToSource) / logf(DISTANCE_LOG_BASE)) - 1); - distanceCoefficient = std::min(1.0f, distanceCoefficient); - - // multiply the current attenuation coefficient by the distance coefficient - attenuationCoefficient *= distanceCoefficient; - - // project the rotated source position vector onto the XZ plane - rotatedSourcePosition.y = 0.0f; - - // produce an oriented angle about the y-axis - bearingRelativeAngleToSource = glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), - glm::normalize(rotatedSourcePosition), - glm::vec3(0.0f, 1.0f, 0.0f)); - - const float PHASE_AMPLITUDE_RATIO_AT_90 = 0.5; - - // figure out the number of samples of delay and the ratio of the amplitude - // in the weak channel for audio spatialization - float sinRatio = fabsf(sinf(glm::radians(bearingRelativeAngleToSource))); - numSamplesDelay = PHASE_DELAY_AT_90 * sinRatio; - weakChannelAmplitudeRatio = 1 - (PHASE_AMPLITUDE_RATIO_AT_90 * sinRatio); - - // grab the TwoPole object for this source, add it if it doesn't exist - TwoPoleNodeMap& nodeTwoPoles = nodeRingBuffer->getTwoPoles(); - TwoPoleNodeMap::iterator twoPoleIterator = nodeTwoPoles.find(otherNode->getUUID()); - - if (twoPoleIterator == nodeTwoPoles.end()) { - // setup the freeVerb effect for this source for this client - otherNodeTwoPole = nodeTwoPoles[otherNode->getUUID()] = new stk::TwoPole; - } else { - otherNodeTwoPole = twoPoleIterator->second; - } - - // calculate the reasonance for this TwoPole based on angle to source - float TWO_POLE_CUT_OFF_FREQUENCY = 800.0f; - float TWO_POLE_MAX_FILTER_STRENGTH = 0.4f; - - otherNodeTwoPole->setResonance(TWO_POLE_CUT_OFF_FREQUENCY, - TWO_POLE_MAX_FILTER_STRENGTH - * fabsf(bearingRelativeAngleToSource) / 180.0f, - true); - } - } - - int16_t* sourceBuffer = otherNodeBuffer->getNextOutput(); - - int16_t* goodChannel = (bearingRelativeAngleToSource > 0.0f) - ? clientSamples - : clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL; - int16_t* delayedChannel = (bearingRelativeAngleToSource > 0.0f) - ? clientSamples + BUFFER_LENGTH_SAMPLES_PER_CHANNEL - : clientSamples; - - int16_t* delaySamplePointer = otherNodeBuffer->getNextOutput() == otherNodeBuffer->getBuffer() - ? otherNodeBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES - numSamplesDelay - : otherNodeBuffer->getNextOutput() - numSamplesDelay; - - for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) { - // load up the stkFrameBuffer with this source's samples - stkFrameBuffer[s] = (stk::StkFloat) sourceBuffer[s]; - } - - // perform the TwoPole effect on the stkFrameBuffer - if (otherNodeTwoPole) { - otherNodeTwoPole->tick(stkFrameBuffer); - } - - for (int s = 0; s < BUFFER_LENGTH_SAMPLES_PER_CHANNEL; s++) { - if (s < numSamplesDelay) { - // pull the earlier sample for the delayed channel - int earlierSample = delaySamplePointer[s] * attenuationCoefficient * weakChannelAmplitudeRatio; - - delayedChannel[s] = glm::clamp(delayedChannel[s] + earlierSample, - MIN_SAMPLE_VALUE, - MAX_SAMPLE_VALUE); - } - - int16_t currentSample = stkFrameBuffer[s] * attenuationCoefficient; - - goodChannel[s] = glm::clamp(goodChannel[s] + currentSample, - MIN_SAMPLE_VALUE, - MAX_SAMPLE_VALUE); - - if (s + numSamplesDelay < BUFFER_LENGTH_SAMPLES_PER_CHANNEL) { - int sumSample = delayedChannel[s + numSamplesDelay] - + (currentSample * weakChannelAmplitudeRatio); - delayedChannel[s + numSamplesDelay] = glm::clamp(sumSample, - MIN_SAMPLE_VALUE, - MAX_SAMPLE_VALUE); - } - - if (s >= BUFFER_LENGTH_SAMPLES_PER_CHANNEL - PHASE_DELAY_AT_90) { - // this could be a delayed sample on the next pass - // so store the affected back in the ARB - otherNodeBuffer->getNextOutput()[s] = (int16_t) stkFrameBuffer[s]; - } - } - - } - } - - memcpy(clientPacket + numBytesPacketHeader, clientSamples, sizeof(clientSamples)); + memcpy(clientPacket + numBytesPacketHeader, _clientSamples, sizeof(_clientSamples)); nodeList->getNodeSocket()->send(node->getActiveSocket(), clientPacket, sizeof(clientPacket)); } } // push forward the next output pointers for any audio buffers we used for (NodeList::iterator node = nodeList->begin(); node != nodeList->end(); node++) { - PositionalAudioRingBuffer* nodeBuffer = (PositionalAudioRingBuffer*) node->getLinkedData(); - if (nodeBuffer && nodeBuffer->willBeAddedToMix()) { - nodeBuffer->setNextOutput(nodeBuffer->getNextOutput() + BUFFER_LENGTH_SAMPLES_PER_CHANNEL); - - if (nodeBuffer->getNextOutput() >= nodeBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) { - nodeBuffer->setNextOutput(nodeBuffer->getBuffer()); - } - nodeBuffer->setWillBeAddedToMix(false); + if (node->getLinkedData()) { + ((AudioMixerClientData*) node->getLinkedData())->pushBuffersAfterFrameSend(); } } @@ -367,12 +372,6 @@ void AudioMixer::run() { if (matchingNode) { nodeList->updateNodeWithData(matchingNode, nodeAddress, packetData, receivedBytes); - - if (packetData[0] != PACKET_TYPE_INJECT_AUDIO - && std::isnan(((PositionalAudioRingBuffer *)matchingNode->getLinkedData())->getOrientation().x)) { - // kill off this node - temporary solution to mixer crash on mac sleep - matchingNode->setAlive(false); - } } } else { // let processNodeData handle it. diff --git a/assignment-client/src/audio/AudioMixer.h b/assignment-client/src/audio/AudioMixer.h index 565bae180a..4cf0398b9b 100644 --- a/assignment-client/src/audio/AudioMixer.h +++ b/assignment-client/src/audio/AudioMixer.h @@ -10,6 +10,10 @@ #define __hifi__AudioMixer__ #include +#include + +class PositionalAudioRingBuffer; +class AvatarAudioRingBuffer; /// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients. class AudioMixer : public Assignment { @@ -18,6 +22,16 @@ public: /// runs the audio mixer void run(); +private: + /// adds one buffer to the mix for a listening node + void addBufferToMixForListeningNodeWithBuffer(PositionalAudioRingBuffer* bufferToAdd, + AvatarAudioRingBuffer* listeningNodeBuffer); + + /// prepares and sends a mix to one Node + void prepareMixForListeningNode(Node* node); + + + int16_t _clientSamples[BUFFER_LENGTH_SAMPLES_PER_CHANNEL * 2]; }; #endif /* defined(__hifi__AudioMixer__) */ diff --git a/assignment-client/src/audio/AudioMixerClientData.cpp b/assignment-client/src/audio/AudioMixerClientData.cpp index c26d7129ec..586a33dab5 100644 --- a/assignment-client/src/audio/AudioMixerClientData.cpp +++ b/assignment-client/src/audio/AudioMixerClientData.cpp @@ -74,3 +74,30 @@ int AudioMixerClientData::parseData(unsigned char* packetData, int numBytes) { return 0; } + +void AudioMixerClientData::checkBuffersBeforeFrameSend(int jitterBufferLengthSamples) { + for (int i = 0; i < _ringBuffers.size(); i++) { + if (_ringBuffers[i]->shouldBeAddedToMix(jitterBufferLengthSamples)) { + // this is a ring buffer that is ready to go + // set its flag so we know to push its buffer when all is said and done + _ringBuffers[i]->setWillBeAddedToMix(true); + } + } +} + +void AudioMixerClientData::pushBuffersAfterFrameSend() { + for (int i = 0; i < _ringBuffers.size(); i++) { + if (_ringBuffers[i]->willBeAddedToMix()) { + // this was a used buffer, push the output pointer forwards + PositionalAudioRingBuffer* audioBuffer = _ringBuffers[i]; + + audioBuffer->setNextOutput(audioBuffer->getNextOutput() + BUFFER_LENGTH_SAMPLES_PER_CHANNEL); + + if (audioBuffer->getNextOutput() >= audioBuffer->getBuffer() + RING_BUFFER_LENGTH_SAMPLES) { + audioBuffer->setNextOutput(audioBuffer->getBuffer()); + } + + audioBuffer->setWillBeAddedToMix(false); + } + } +} diff --git a/assignment-client/src/audio/AudioMixerClientData.h b/assignment-client/src/audio/AudioMixerClientData.h index acc788296f..05cfa51e5f 100644 --- a/assignment-client/src/audio/AudioMixerClientData.h +++ b/assignment-client/src/audio/AudioMixerClientData.h @@ -20,9 +20,12 @@ class AudioMixerClientData : public NodeData { public: ~AudioMixerClientData(); - int parseData(unsigned char* packetData, int numBytes); - + const std::vector getRingBuffers() const { return _ringBuffers; } AvatarAudioRingBuffer* getAvatarAudioRingBuffer() const; + + int parseData(unsigned char* packetData, int numBytes); + void checkBuffersBeforeFrameSend(int jitterBufferLengthSamples); + void pushBuffersAfterFrameSend(); private: std::vector _ringBuffers; }; diff --git a/assignment-client/src/audio/AvatarAudioRingBuffer.h b/assignment-client/src/audio/AvatarAudioRingBuffer.h index a3e39073de..4178cd4da8 100644 --- a/assignment-client/src/audio/AvatarAudioRingBuffer.h +++ b/assignment-client/src/audio/AvatarAudioRingBuffer.h @@ -16,7 +16,7 @@ #include "PositionalAudioRingBuffer.h" -typedef std::map TwoPoleNodeMap; +typedef std::map TwoPoleNodeMap; class AvatarAudioRingBuffer : public PositionalAudioRingBuffer { public: diff --git a/libraries/shared/src/Node.cpp b/libraries/shared/src/Node.cpp index 57f27f971a..54caaf2802 100644 --- a/libraries/shared/src/Node.cpp +++ b/libraries/shared/src/Node.cpp @@ -122,10 +122,6 @@ void Node::activatePublicSocket() { _activeSocket = _publicSocket; } -bool Node::operator==(const Node& otherNode) { - return matches(otherNode._publicSocket, otherNode._localSocket, otherNode._type); -} - bool Node::matches(sockaddr* otherPublicSocket, sockaddr* otherLocalSocket, char otherNodeType) { // checks if two node objects are the same node (same type + local + public address) return _type == otherNodeType diff --git a/libraries/shared/src/Node.h b/libraries/shared/src/Node.h index 7e835cda74..434618ae6c 100644 --- a/libraries/shared/src/Node.h +++ b/libraries/shared/src/Node.h @@ -29,7 +29,8 @@ public: Node(const QUuid& uuid, char type, sockaddr* publicSocket, sockaddr* localSocket); ~Node(); - bool operator==(const Node& otherNode); + bool operator==(const Node& otherNode) const { return _uuid == otherNode._uuid; } + bool operator!=(const Node& otherNode) const { return !(*this == otherNode); } bool matches(sockaddr* otherPublicSocket, sockaddr* otherLocalSocket, char otherNodeType); @@ -73,6 +74,7 @@ public: void unlock() { pthread_mutex_unlock(&_mutex); } static void printLog(Node const&); + private: // privatize copy and assignment operator to disallow Node copying Node(const Node &otherNode);