mirror of
https://github.com/lubosz/overte.git
synced 2025-04-08 15:43:24 +02:00
enumerate a vector of mixable streams for each listener
This commit is contained in:
parent
c992150c10
commit
371de312cc
16 changed files with 646 additions and 446 deletions
|
@ -196,17 +196,6 @@ const pair<QString, CodecPluginPointer> AudioMixer::negotiateCodec(vector<QStrin
|
|||
return make_pair(selectedCodecName, _availableCodecs[selectedCodecName]);
|
||||
}
|
||||
|
||||
void AudioMixer::handleNodeKilled(SharedNodePointer killedNode) {
|
||||
// enumerate the connected listeners to remove HRTF objects for the disconnected node
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
nodeList->eachNode([&killedNode](const SharedNodePointer& node) {
|
||||
auto clientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (clientData) {
|
||||
clientData->removeNode(killedNode->getLocalID());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void AudioMixer::handleNodeMuteRequestPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode) {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
@ -225,32 +214,31 @@ void AudioMixer::handleNodeMuteRequestPacket(QSharedPointer<ReceivedMessage> pac
|
|||
}
|
||||
}
|
||||
|
||||
void AudioMixer::handleNodeKilled(SharedNodePointer killedNode) {
|
||||
auto clientData = dynamic_cast<AudioMixerClientData*>(killedNode->getLinkedData());
|
||||
if (clientData) {
|
||||
// stage the removal of all streams from this node, workers handle when preparing mixes for listeners
|
||||
_workerSharedData.removedNodes.emplace_back(killedNode->getLocalID());
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixer::handleKillAvatarPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode) {
|
||||
auto clientData = dynamic_cast<AudioMixerClientData*>(sendingNode->getLinkedData());
|
||||
if (clientData) {
|
||||
clientData->removeAgentAvatarAudioStream();
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
nodeList->eachNode([sendingNode](const SharedNodePointer& node){
|
||||
auto listenerClientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (listenerClientData) {
|
||||
listenerClientData->removeHRTFForStream(sendingNode->getLocalID());
|
||||
}
|
||||
});
|
||||
|
||||
// stage a removal of the avatar audio stream from this Agent, workers handle when preparing mixes for listeners
|
||||
_workerSharedData.removedStreams.emplace_back(sendingNode->getUUID(), sendingNode->getLocalID(), QUuid());
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixer::removeHRTFsForFinishedInjector(const QUuid& streamID) {
|
||||
auto injectorClientData = qobject_cast<AudioMixerClientData*>(sender());
|
||||
if (injectorClientData) {
|
||||
// enumerate the connected listeners to remove HRTF objects for the disconnected injector
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
nodeList->eachNode([injectorClientData, &streamID](const SharedNodePointer& node){
|
||||
auto listenerClientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (listenerClientData) {
|
||||
listenerClientData->removeHRTFForStream(injectorClientData->getNodeLocalID(), streamID);
|
||||
}
|
||||
});
|
||||
if (injectorClientData) {
|
||||
// stage the removal of this stream, workers handle when preparing mixes for listeners
|
||||
_workerSharedData.removedStreams.emplace_back(injectorClientData->getNodeID(), injectorClientData->getNodeLocalID(),
|
||||
streamID);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -370,7 +358,6 @@ AudioMixerClientData* AudioMixer::getOrCreateClientData(Node* node) {
|
|||
if (!clientData) {
|
||||
node->setLinkedData(unique_ptr<NodeData> { new AudioMixerClientData(node->getUUID(), node->getLocalID()) });
|
||||
clientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
clientData->setNodeLocalID(node->getLocalID());
|
||||
connect(clientData, &AudioMixerClientData::injectorStreamFinished, this, &AudioMixer::removeHRTFsForFinishedInjector);
|
||||
}
|
||||
|
||||
|
@ -409,6 +396,30 @@ void AudioMixer::start() {
|
|||
|
||||
auto frameTimer = _frameTiming.timer();
|
||||
|
||||
// process (node-isolated) audio packets across slave threads
|
||||
{
|
||||
auto packetsTimer = _packetsTiming.timer();
|
||||
|
||||
// first clear the concurrent vector of added streams that the slaves will add to when they process packets
|
||||
_workerSharedData.addedStreams.clear();
|
||||
|
||||
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
|
||||
_slavePool.processPackets(cbegin, cend);
|
||||
});
|
||||
}
|
||||
|
||||
// process queued events (networking, global audio packets, &c.)
|
||||
{
|
||||
auto eventsTimer = _eventsTiming.timer();
|
||||
|
||||
// clear removed nodes and removed streams before we process events that will setup the new set
|
||||
_workerSharedData.removedNodes.clear();
|
||||
_workerSharedData.removedStreams.clear();
|
||||
|
||||
// since we're a while loop we need to yield to qt's event processing
|
||||
QCoreApplication::processEvents();
|
||||
}
|
||||
|
||||
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
|
||||
// prepare frames; pop off any new audio from their streams
|
||||
{
|
||||
|
@ -434,21 +445,6 @@ void AudioMixer::start() {
|
|||
++frame;
|
||||
++_numStatFrames;
|
||||
|
||||
// process queued events (networking, global audio packets, &c.)
|
||||
{
|
||||
auto eventsTimer = _eventsTiming.timer();
|
||||
|
||||
// since we're a while loop we need to yield to qt's event processing
|
||||
QCoreApplication::processEvents();
|
||||
|
||||
// process (node-isolated) audio packets across slave threads
|
||||
{
|
||||
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
|
||||
auto packetsTimer = _packetsTiming.timer();
|
||||
_slavePool.processPackets(cbegin, cend);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (_isFinished) {
|
||||
// alert qt eventing that this is finished
|
||||
|
|
|
@ -86,6 +86,7 @@ private:
|
|||
// mixing helpers
|
||||
std::chrono::microseconds timeFrame(p_high_resolution_clock::time_point& timestamp);
|
||||
void throttle(std::chrono::microseconds frameDuration, int frame);
|
||||
|
||||
// pop a frame from any streams on the node
|
||||
// returns the number of available streams
|
||||
int prepareFrame(const SharedNodePointer& node, unsigned int frame);
|
||||
|
@ -105,7 +106,7 @@ private:
|
|||
int _numStatFrames { 0 };
|
||||
AudioMixerStats _stats;
|
||||
|
||||
AudioMixerSlavePool _slavePool;
|
||||
AudioMixerSlavePool _slavePool { _workerSharedData };
|
||||
|
||||
class Timer {
|
||||
public:
|
||||
|
@ -147,6 +148,7 @@ private:
|
|||
static std::vector<ZoneSettings> _zoneSettings;
|
||||
static std::vector<ReverbSettings> _zoneReverbSettings;
|
||||
|
||||
AudioMixerSlave::SharedData _workerSharedData;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixer_h
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
AudioMixerClientData::AudioMixerClientData(const QUuid& nodeID, Node::LocalID nodeLocalID) :
|
||||
NodeData(nodeID, nodeLocalID),
|
||||
audioLimiter(AudioConstants::SAMPLE_RATE, AudioConstants::STEREO),
|
||||
_ignoreZone(*this),
|
||||
_outgoingMixedAudioSequenceNumber(0),
|
||||
_downstreamAudioStreamStats()
|
||||
{
|
||||
|
@ -56,7 +55,7 @@ void AudioMixerClientData::queuePacket(QSharedPointer<ReceivedMessage> message,
|
|||
_packetQueue.push(message);
|
||||
}
|
||||
|
||||
void AudioMixerClientData::processPackets() {
|
||||
void AudioMixerClientData::processPackets(ConcurrentAddedStreams& addedStreams) {
|
||||
SharedNodePointer node = _packetQueue.node;
|
||||
assert(_packetQueue.empty() || node);
|
||||
_packetQueue.node.clear();
|
||||
|
@ -69,22 +68,17 @@ void AudioMixerClientData::processPackets() {
|
|||
case PacketType::MicrophoneAudioWithEcho:
|
||||
case PacketType::InjectAudio:
|
||||
case PacketType::SilentAudioFrame: {
|
||||
|
||||
if (node->isUpstream()) {
|
||||
setupCodecForReplicatedAgent(packet);
|
||||
}
|
||||
|
||||
QMutexLocker lock(&getMutex());
|
||||
parseData(*packet);
|
||||
processStreamPacket(*packet, addedStreams);
|
||||
|
||||
optionallyReplicatePacket(*packet, *node);
|
||||
|
||||
break;
|
||||
}
|
||||
case PacketType::AudioStreamStats: {
|
||||
QMutexLocker lock(&getMutex());
|
||||
parseData(*packet);
|
||||
|
||||
break;
|
||||
}
|
||||
case PacketType::NegotiateAudioFormat:
|
||||
|
@ -186,29 +180,113 @@ void AudioMixerClientData::parseRequestsDomainListData(ReceivedMessage& message)
|
|||
void AudioMixerClientData::parsePerAvatarGainSet(ReceivedMessage& message, const SharedNodePointer& node) {
|
||||
QUuid uuid = node->getUUID();
|
||||
// parse the UUID from the packet
|
||||
QUuid avatarUuid = QUuid::fromRfc4122(message.readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
||||
QUuid avatarUUID = QUuid::fromRfc4122(message.readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
||||
uint8_t packedGain;
|
||||
message.readPrimitive(&packedGain);
|
||||
float gain = unpackFloatGainFromByte(packedGain);
|
||||
|
||||
if (avatarUuid.isNull()) {
|
||||
if (avatarUUID.isNull()) {
|
||||
// set the MASTER avatar gain
|
||||
setMasterAvatarGain(gain);
|
||||
qCDebug(audio) << "Setting MASTER avatar gain for " << uuid << " to " << gain;
|
||||
} else {
|
||||
// set the per-source avatar gain
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
hrtfForStream(nodeList->nodeWithUUID(avatarUuid)->getLocalID(), QUuid()).setGainAdjustment(gain);
|
||||
qCDebug(audio) << "Setting avatar gain adjustment for hrtf[" << uuid << "][" << avatarUuid << "] to " << gain;
|
||||
setGainForAvatar(avatarUUID, gain);
|
||||
qCDebug(audio) << "Setting avatar gain adjustment for hrtf[" << uuid << "][" << avatarUUID << "] to " << gain;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::setGainForAvatar(QUuid nodeID, uint8_t gain) {
|
||||
auto it = std::find_if(_mixableStreams.cbegin(), _mixableStreams.cend(), [nodeID](const MixableStream& mixableStream){
|
||||
return mixableStream.nodeStreamID.nodeID == nodeID && mixableStream.nodeStreamID.streamID.isNull();
|
||||
});
|
||||
|
||||
if (it != _mixableStreams.cend()) {
|
||||
it->hrtf->setGainAdjustment(gain);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::parseNodeIgnoreRequest(QSharedPointer<ReceivedMessage> message, const SharedNodePointer& node) {
|
||||
node->parseIgnoreRequestMessage(message);
|
||||
auto ignoredNodesPair = node->parseIgnoreRequestMessage(message);
|
||||
|
||||
// we have a vector of ignored or unignored node UUIDs - update our internal data structures so that
|
||||
// streams can be included or excluded next time a mix is being created
|
||||
if (ignoredNodesPair.second) {
|
||||
// we have newly ignored nodes, add them to our vector
|
||||
_newIgnoredNodeIDs.insert(std::end(_newIgnoredNodeIDs),
|
||||
std::begin(ignoredNodesPair.first), std::end(ignoredNodesPair.first));
|
||||
} else {
|
||||
// we have newly unignored nodes, add them to our vector
|
||||
_newUnignoredNodeIDs.insert(std::end(_newUnignoredNodeIDs),
|
||||
std::begin(ignoredNodesPair.first), std::end(ignoredNodesPair.first));
|
||||
}
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
for (auto& nodeID : ignoredNodesPair.first) {
|
||||
auto otherNode = nodeList->nodeWithUUID(nodeID);
|
||||
if (otherNode) {
|
||||
auto otherNodeMixerClientData = static_cast<AudioMixerClientData*>(otherNode->getLinkedData());
|
||||
if (otherNodeMixerClientData) {
|
||||
if (ignoredNodesPair.second) {
|
||||
otherNodeMixerClientData->ignoredByNode(getNodeID());
|
||||
} else {
|
||||
otherNodeMixerClientData->unignoredByNode(getNodeID());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::ignoredByNode(QUuid nodeID) {
|
||||
// first add this ID to the concurrent vector for newly ignoring nodes
|
||||
_newIgnoringNodeIDs.push_back(nodeID);
|
||||
|
||||
// now take a lock and on the consistent vector of ignoring nodes and make sure this node is in it
|
||||
std::lock_guard<std::mutex> lock(_ignoringNodeIDsMutex);
|
||||
if (std::find(_ignoringNodeIDs.begin(), _ignoringNodeIDs.end(), nodeID) == _ignoringNodeIDs.end()) {
|
||||
_ignoringNodeIDs.push_back(nodeID);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::unignoredByNode(QUuid nodeID) {
|
||||
// first add this ID to the concurrent vector for newly unignoring nodes
|
||||
_newUnignoringNodeIDs.push_back(nodeID);
|
||||
|
||||
// now take a lock on the consistent vector of ignoring nodes and make sure this node isn't in it
|
||||
std::lock_guard<std::mutex> lock(_ignoringNodeIDsMutex);
|
||||
auto it = _ignoringNodeIDs.begin();
|
||||
while (it != _ignoringNodeIDs.end()) {
|
||||
if (*it == nodeID) {
|
||||
it = _ignoringNodeIDs.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::clearStagedIgnoreChanges() {
|
||||
_newIgnoredNodeIDs.clear();
|
||||
_newUnignoredNodeIDs.clear();
|
||||
_newIgnoringNodeIDs.clear();
|
||||
_newUnignoringNodeIDs.clear();
|
||||
}
|
||||
|
||||
void AudioMixerClientData::parseRadiusIgnoreRequest(QSharedPointer<ReceivedMessage> message, const SharedNodePointer& node) {
|
||||
node->parseIgnoreRadiusRequestMessage(message);
|
||||
bool enabled;
|
||||
message->readPrimitive(&enabled);
|
||||
|
||||
_isIgnoreRadiusEnabled = enabled;
|
||||
|
||||
auto avatarAudioStream = getAvatarAudioStream();
|
||||
|
||||
// if we have an avatar audio stream, tell it wether its ignore box should be enabled or disabled
|
||||
if (avatarAudioStream) {
|
||||
if (_isIgnoreRadiusEnabled) {
|
||||
avatarAudioStream->enableIgnoreBox();
|
||||
} else {
|
||||
avatarAudioStream->disableIgnoreBox();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() {
|
||||
|
@ -226,40 +304,6 @@ AvatarAudioStream* AudioMixerClientData::getAvatarAudioStream() {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
AudioHRTF& AudioMixerClientData::hrtfForStream(Node::LocalID nodeID, const QUuid& streamID) {
|
||||
auto& hrtfVector = _nodeSourcesHRTFMap[nodeID];
|
||||
|
||||
auto streamIt = std::find_if(hrtfVector.begin(), hrtfVector.end(), [&streamID](IdentifiedHRTF& identifiedHRTF){
|
||||
return identifiedHRTF.streamIdentifier == streamID;
|
||||
});
|
||||
|
||||
if (streamIt == hrtfVector.end()) {
|
||||
hrtfVector.push_back({ streamID, std::unique_ptr<AudioHRTF>(new AudioHRTF) });
|
||||
|
||||
return *hrtfVector.back().hrtf;
|
||||
} else {
|
||||
return *streamIt->hrtf;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::removeHRTFForStream(Node::LocalID nodeID, const QUuid& streamID) {
|
||||
auto it = _nodeSourcesHRTFMap.find(nodeID);
|
||||
if (it != _nodeSourcesHRTFMap.end()) {
|
||||
auto streamIt = std::find_if(it->second.begin(), it->second.end(), [&streamID](IdentifiedHRTF& identifiedHRTF){
|
||||
return identifiedHRTF.streamIdentifier == streamID;
|
||||
});
|
||||
|
||||
// erase the stream with the given ID from the given node
|
||||
it->second.erase(streamIt);
|
||||
|
||||
// is the map for this node now empty?
|
||||
// if so we can remove it
|
||||
if (it->second.size() == 0) {
|
||||
_nodeSourcesHRTFMap.erase(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerClientData::removeAgentAvatarAudioStream() {
|
||||
QWriteLocker writeLocker { &_streamsLock };
|
||||
|
||||
|
@ -283,112 +327,127 @@ int AudioMixerClientData::parseData(ReceivedMessage& message) {
|
|||
message.readPrimitive(&_downstreamAudioStreamStats);
|
||||
|
||||
return message.getPosition();
|
||||
}
|
||||
|
||||
} else {
|
||||
SharedStreamPointer matchingStream;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool isMicStream = false;
|
||||
void AudioMixerClientData::processStreamPacket(ReceivedMessage& message, ConcurrentAddedStreams &addedStreams) {
|
||||
SharedStreamPointer matchingStream;
|
||||
|
||||
if (packetType == PacketType::MicrophoneAudioWithEcho
|
||||
|| packetType == PacketType::ReplicatedMicrophoneAudioWithEcho
|
||||
|| packetType == PacketType::MicrophoneAudioNoEcho
|
||||
|| packetType == PacketType::ReplicatedMicrophoneAudioNoEcho
|
||||
|| packetType == PacketType::SilentAudioFrame
|
||||
|| packetType == PacketType::ReplicatedSilentAudioFrame) {
|
||||
auto packetType = message.getType();
|
||||
bool newStream = false;
|
||||
|
||||
QWriteLocker writeLocker { &_streamsLock };
|
||||
if (packetType == PacketType::MicrophoneAudioWithEcho
|
||||
|| packetType == PacketType::ReplicatedMicrophoneAudioWithEcho
|
||||
|| packetType == PacketType::MicrophoneAudioNoEcho
|
||||
|| packetType == PacketType::ReplicatedMicrophoneAudioNoEcho
|
||||
|| packetType == PacketType::SilentAudioFrame
|
||||
|| packetType == PacketType::ReplicatedSilentAudioFrame) {
|
||||
|
||||
auto micStreamIt = std::find_if(_audioStreams.begin(), _audioStreams.end(), [](const SharedStreamPointer& stream){
|
||||
return stream->getStreamIdentifier().isNull();
|
||||
});
|
||||
if (micStreamIt == _audioStreams.end()) {
|
||||
// we don't have a mic stream yet, so add it
|
||||
QWriteLocker writeLocker { &_streamsLock };
|
||||
|
||||
// hop past the sequence number that leads the packet
|
||||
message.seek(sizeof(quint16));
|
||||
auto micStreamIt = std::find_if(_audioStreams.begin(), _audioStreams.end(), [](const SharedStreamPointer& stream){
|
||||
return stream->getStreamIdentifier().isNull();
|
||||
});
|
||||
|
||||
// pull the codec string from the packet
|
||||
auto codecString = message.readString();
|
||||
if (micStreamIt == _audioStreams.end()) {
|
||||
// we don't have a mic stream yet, so add it
|
||||
|
||||
// determine if the stream is stereo or not
|
||||
bool isStereo;
|
||||
if (packetType == PacketType::SilentAudioFrame
|
||||
|| packetType == PacketType::ReplicatedSilentAudioFrame) {
|
||||
quint16 numSilentSamples;
|
||||
message.readPrimitive(&numSilentSamples);
|
||||
isStereo = numSilentSamples == AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
||||
} else {
|
||||
quint8 channelFlag;
|
||||
message.readPrimitive(&channelFlag);
|
||||
isStereo = channelFlag == 1;
|
||||
}
|
||||
|
||||
auto avatarAudioStream = new AvatarAudioStream(isStereo, AudioMixer::getStaticJitterFrames());
|
||||
avatarAudioStream->setupCodec(_codec, _selectedCodecName, isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
qCDebug(audio) << "creating new AvatarAudioStream... codec:" << _selectedCodecName << "isStereo:" << isStereo;
|
||||
|
||||
connect(avatarAudioStream, &InboundAudioStream::mismatchedAudioCodec,
|
||||
this, &AudioMixerClientData::handleMismatchAudioFormat);
|
||||
|
||||
matchingStream = SharedStreamPointer(avatarAudioStream);
|
||||
_audioStreams.push_back(matchingStream);
|
||||
} else {
|
||||
matchingStream = *micStreamIt;
|
||||
}
|
||||
|
||||
writeLocker.unlock();
|
||||
|
||||
isMicStream = true;
|
||||
} else if (packetType == PacketType::InjectAudio
|
||||
|| packetType == PacketType::ReplicatedInjectAudio) {
|
||||
// this is injected audio
|
||||
// grab the stream identifier for this injected audio
|
||||
// hop past the sequence number that leads the packet
|
||||
message.seek(sizeof(quint16));
|
||||
|
||||
QUuid streamIdentifier = QUuid::fromRfc4122(message.readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
||||
// pull the codec string from the packet
|
||||
auto codecString = message.readString();
|
||||
|
||||
// determine if the stream is stereo or not
|
||||
bool isStereo;
|
||||
message.readPrimitive(&isStereo);
|
||||
|
||||
QWriteLocker writeLock { &_streamsLock };
|
||||
|
||||
auto streamIt = std::find_if(_audioStreams.begin(), _audioStreams.end(), [&streamIdentifier](const SharedStreamPointer& stream) {
|
||||
return stream->getStreamIdentifier() == streamIdentifier;
|
||||
});
|
||||
|
||||
if (streamIt == _audioStreams.end()) {
|
||||
// we don't have this injected stream yet, so add it
|
||||
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStaticJitterFrames());
|
||||
|
||||
#if INJECTORS_SUPPORT_CODECS
|
||||
injectorStream->setupCodec(_codec, _selectedCodecName, isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
qCDebug(audio) << "creating new injectorStream... codec:" << _selectedCodecName << "isStereo:" << isStereo;
|
||||
#endif
|
||||
|
||||
matchingStream = SharedStreamPointer(injectorStream);
|
||||
_audioStreams.push_back(matchingStream);
|
||||
if (packetType == PacketType::SilentAudioFrame || packetType == PacketType::ReplicatedSilentAudioFrame) {
|
||||
quint16 numSilentSamples;
|
||||
message.readPrimitive(&numSilentSamples);
|
||||
isStereo = numSilentSamples == AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
||||
} else {
|
||||
matchingStream = *streamIt;
|
||||
quint8 channelFlag;
|
||||
message.readPrimitive(&channelFlag);
|
||||
isStereo = channelFlag == 1;
|
||||
}
|
||||
|
||||
writeLock.unlock();
|
||||
auto avatarAudioStream = new AvatarAudioStream(isStereo, AudioMixer::getStaticJitterFrames());
|
||||
avatarAudioStream->setupCodec(_codec, _selectedCodecName, isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
|
||||
if (_isIgnoreRadiusEnabled) {
|
||||
avatarAudioStream->enableIgnoreBox();
|
||||
} else {
|
||||
avatarAudioStream->disableIgnoreBox();
|
||||
}
|
||||
|
||||
qCDebug(audio) << "creating new AvatarAudioStream... codec:" << _selectedCodecName << "isStereo:" << isStereo;
|
||||
|
||||
connect(avatarAudioStream, &InboundAudioStream::mismatchedAudioCodec,
|
||||
this, &AudioMixerClientData::handleMismatchAudioFormat);
|
||||
|
||||
matchingStream = SharedStreamPointer(avatarAudioStream);
|
||||
_audioStreams.push_back(matchingStream);
|
||||
|
||||
newStream = true;
|
||||
} else {
|
||||
matchingStream = *micStreamIt;
|
||||
}
|
||||
|
||||
// seek to the beginning of the packet so that the next reader is in the right spot
|
||||
message.seek(0);
|
||||
writeLocker.unlock();
|
||||
} else if (packetType == PacketType::InjectAudio
|
||||
|| packetType == PacketType::ReplicatedInjectAudio) {
|
||||
// this is injected audio
|
||||
// grab the stream identifier for this injected audio
|
||||
message.seek(sizeof(quint16));
|
||||
|
||||
// check the overflow count before we parse data
|
||||
auto overflowBefore = matchingStream->getOverflowCount();
|
||||
auto parseResult = matchingStream->parseData(message);
|
||||
QUuid streamIdentifier = QUuid::fromRfc4122(message.readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
||||
|
||||
if (matchingStream->getOverflowCount() > overflowBefore) {
|
||||
qCDebug(audio) << "Just overflowed on stream from" << message.getSourceID() << "at" << message.getSenderSockAddr();
|
||||
qCDebug(audio) << "This stream is for" << (isMicStream ? "microphone audio" : "injected audio");
|
||||
bool isStereo;
|
||||
message.readPrimitive(&isStereo);
|
||||
|
||||
QWriteLocker writeLock { &_streamsLock };
|
||||
|
||||
auto streamIt = std::find_if(_audioStreams.begin(), _audioStreams.end(), [&streamIdentifier](const SharedStreamPointer& stream) {
|
||||
return stream->getStreamIdentifier() == streamIdentifier;
|
||||
});
|
||||
|
||||
if (streamIt == _audioStreams.end()) {
|
||||
// we don't have this injected stream yet, so add it
|
||||
auto injectorStream = new InjectedAudioStream(streamIdentifier, isStereo, AudioMixer::getStaticJitterFrames());
|
||||
|
||||
#if INJECTORS_SUPPORT_CODECS
|
||||
injectorStream->setupCodec(_codec, _selectedCodecName, isStereo ? AudioConstants::STEREO : AudioConstants::MONO);
|
||||
qCDebug(audio) << "creating new injectorStream... codec:" << _selectedCodecName << "isStereo:" << isStereo;
|
||||
#endif
|
||||
|
||||
matchingStream = SharedStreamPointer(injectorStream);
|
||||
_audioStreams.push_back(matchingStream);
|
||||
|
||||
newStream = true;
|
||||
} else {
|
||||
matchingStream = *streamIt;
|
||||
}
|
||||
|
||||
return parseResult;
|
||||
writeLock.unlock();
|
||||
}
|
||||
|
||||
// seek to the beginning of the packet so that the next reader is in the right spot
|
||||
message.seek(0);
|
||||
|
||||
// check the overflow count before we parse data
|
||||
auto overflowBefore = matchingStream->getOverflowCount();
|
||||
matchingStream->parseData(message);
|
||||
|
||||
if (matchingStream->getOverflowCount() > overflowBefore) {
|
||||
qCDebug(audio) << "Just overflowed on stream" << matchingStream->getStreamIdentifier()
|
||||
<< "from" << message.getSourceID();
|
||||
}
|
||||
|
||||
if (newStream) {
|
||||
// whenever a stream is added, push it to the concurrent vector of streams added this frame
|
||||
addedStreams.emplace_back(getNodeID(), getNodeLocalID(), matchingStream->getStreamIdentifier(), matchingStream.get());
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioMixerClientData::checkBuffersBeforeFrameSend() {
|
||||
|
@ -632,74 +691,6 @@ void AudioMixerClientData::cleanupCodec() {
|
|||
}
|
||||
}
|
||||
|
||||
AudioMixerClientData::IgnoreZone& AudioMixerClientData::IgnoreZoneMemo::get(unsigned int frame) {
|
||||
// check for a memoized zone
|
||||
if (frame != _frame.load(std::memory_order_acquire)) {
|
||||
AvatarAudioStream* stream = _data.getAvatarAudioStream();
|
||||
|
||||
// get the initial dimensions from the stream
|
||||
glm::vec3 corner = stream ? stream->getAvatarBoundingBoxCorner() : glm::vec3(0);
|
||||
glm::vec3 scale = stream ? stream->getAvatarBoundingBoxScale() : glm::vec3(0);
|
||||
|
||||
// enforce a minimum scale
|
||||
static const glm::vec3 MIN_IGNORE_BOX_SCALE = glm::vec3(0.3f, 1.3f, 0.3f);
|
||||
if (glm::any(glm::lessThan(scale, MIN_IGNORE_BOX_SCALE))) {
|
||||
scale = MIN_IGNORE_BOX_SCALE;
|
||||
}
|
||||
|
||||
// (this is arbitrary number determined empirically for comfort)
|
||||
const float IGNORE_BOX_SCALE_FACTOR = 2.4f;
|
||||
scale *= IGNORE_BOX_SCALE_FACTOR;
|
||||
|
||||
// create the box (we use a box for the zone for convenience)
|
||||
AABox box(corner, scale);
|
||||
|
||||
// update the memoized zone
|
||||
// This may be called by multiple threads concurrently,
|
||||
// so take a lock and only update the memo if this call is first.
|
||||
// This prevents concurrent updates from invalidating the returned reference
|
||||
// (contingent on the preconditions listed in the header).
|
||||
std::lock_guard<std::mutex> lock(_mutex);
|
||||
if (frame != _frame.load(std::memory_order_acquire)) {
|
||||
_zone = box;
|
||||
unsigned int oldFrame = _frame.exchange(frame, std::memory_order_release);
|
||||
Q_UNUSED(oldFrame);
|
||||
}
|
||||
}
|
||||
|
||||
return _zone;
|
||||
}
|
||||
|
||||
bool AudioMixerClientData::shouldIgnore(const SharedNodePointer self, const SharedNodePointer node, unsigned int frame) {
|
||||
// this is symmetric over self / node; if computed, it is cached in the other
|
||||
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (!nodeData) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// compute shouldIgnore
|
||||
bool shouldIgnore = true;
|
||||
if ( // the nodes are not ignoring each other explicitly (or are but get data regardless)
|
||||
(!self->isIgnoringNodeWithID(node->getUUID()) ||
|
||||
(nodeData->getRequestsDomainListData() && node->getCanKick())) &&
|
||||
(!node->isIgnoringNodeWithID(self->getUUID()) ||
|
||||
(getRequestsDomainListData() && self->getCanKick()))) {
|
||||
|
||||
// if either node is enabling an ignore radius, check their proximity
|
||||
if ((self->isIgnoreRadiusEnabled() || node->isIgnoreRadiusEnabled())) {
|
||||
auto& zone = _ignoreZone.get(frame);
|
||||
auto& nodeZone = nodeData->_ignoreZone.get(frame);
|
||||
shouldIgnore = zone.touches(nodeZone);
|
||||
} else {
|
||||
shouldIgnore = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return shouldIgnore;
|
||||
}
|
||||
|
||||
void AudioMixerClientData::setupCodecForReplicatedAgent(QSharedPointer<ReceivedMessage> message) {
|
||||
// hop past the sequence number that leads the packet
|
||||
message->seek(sizeof(quint16));
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
#include <queue>
|
||||
|
||||
#include <tbb/concurrent_vector.h>
|
||||
|
||||
#include <QtCore/QJsonObject>
|
||||
|
||||
#include <AABox.h>
|
||||
|
@ -30,6 +32,17 @@
|
|||
class AudioMixerClientData : public NodeData {
|
||||
Q_OBJECT
|
||||
public:
|
||||
struct AddedStream {
|
||||
NodeIDStreamID nodeIDStreamID;
|
||||
PositionalAudioStream* positionalStream;
|
||||
|
||||
AddedStream(QUuid nodeID, Node::LocalID localNodeID,
|
||||
StreamID streamID, PositionalAudioStream* positionalStream) :
|
||||
nodeIDStreamID(nodeID, localNodeID, streamID), positionalStream(positionalStream) {};
|
||||
};
|
||||
|
||||
using ConcurrentAddedStreams = tbb::concurrent_vector<AddedStream>;
|
||||
|
||||
AudioMixerClientData(const QUuid& nodeID, Node::LocalID nodeLocalID);
|
||||
~AudioMixerClientData();
|
||||
|
||||
|
@ -37,31 +50,16 @@ public:
|
|||
using AudioStreamVector = std::vector<SharedStreamPointer>;
|
||||
|
||||
void queuePacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer node);
|
||||
void processPackets();
|
||||
void processPackets(ConcurrentAddedStreams& addedStreams);
|
||||
|
||||
AudioStreamVector& getAudioStreams() { return _audioStreams; }
|
||||
AvatarAudioStream* getAvatarAudioStream();
|
||||
|
||||
// returns whether self (this data's node) should ignore node, memoized by frame
|
||||
// precondition: frame is increasing after first call (including overflow wrap)
|
||||
bool shouldIgnore(SharedNodePointer self, SharedNodePointer node, unsigned int frame);
|
||||
|
||||
// the following methods should be called from the AudioMixer assignment thread ONLY
|
||||
// they are not thread-safe
|
||||
|
||||
// returns a new or existing HRTF object for the given stream from the given node
|
||||
AudioHRTF& hrtfForStream(Node::LocalID nodeID, const QUuid& streamID = QUuid());
|
||||
|
||||
// removes an AudioHRTF object for a given stream
|
||||
void removeHRTFForStream(Node::LocalID nodeID, const QUuid& streamID = QUuid());
|
||||
|
||||
// remove all sources and data from this node
|
||||
void removeNode(Node::LocalID nodeID) { _nodeSourcesHRTFMap.erase(nodeID); }
|
||||
|
||||
void removeAgentAvatarAudioStream();
|
||||
|
||||
// packet parsers
|
||||
int parseData(ReceivedMessage& message) override;
|
||||
void processStreamPacket(ReceivedMessage& message, ConcurrentAddedStreams& addedStreams);
|
||||
void negotiateAudioFormat(ReceivedMessage& message, const SharedNodePointer& node);
|
||||
void parseRequestsDomainListData(ReceivedMessage& message);
|
||||
void parsePerAvatarGainSet(ReceivedMessage& message, const SharedNodePointer& node);
|
||||
|
@ -75,9 +73,6 @@ public:
|
|||
|
||||
QJsonObject getAudioStreamStats();
|
||||
|
||||
void setNodeLocalID(Node::LocalID localNodeID) { _localNodeID = localNodeID; }
|
||||
Node::LocalID getNodeLocalID() { return _localNodeID; }
|
||||
|
||||
void sendAudioStreamStatsPackets(const SharedNodePointer& destinationNode);
|
||||
|
||||
void incrementOutgoingMixedAudioSequenceNumber() { _outgoingMixedAudioSequenceNumber++; }
|
||||
|
@ -115,6 +110,48 @@ public:
|
|||
|
||||
void setupCodecForReplicatedAgent(QSharedPointer<ReceivedMessage> message);
|
||||
|
||||
struct MixableStream {
|
||||
float approximateVolume { 0.0f };
|
||||
NodeIDStreamID nodeStreamID;
|
||||
std::unique_ptr<AudioHRTF> hrtf;
|
||||
PositionalAudioStream* positionalStream;
|
||||
bool ignoredByListener { false };
|
||||
bool ignoringListener { false };
|
||||
bool completedSilentRender { false };
|
||||
bool skippedStream { false };
|
||||
|
||||
MixableStream(NodeIDStreamID nodeIDStreamID, PositionalAudioStream* positionalStream) :
|
||||
nodeStreamID(nodeIDStreamID), hrtf(new AudioHRTF), positionalStream(positionalStream) {};
|
||||
MixableStream(QUuid nodeID, Node::LocalID localNodeID, StreamID streamID, PositionalAudioStream* positionalStream) :
|
||||
nodeStreamID(nodeID, localNodeID, streamID), hrtf(new AudioHRTF), positionalStream(positionalStream) {};
|
||||
};
|
||||
|
||||
using MixableStreamsVector = std::vector<MixableStream>;
|
||||
|
||||
MixableStreamsVector& getMixableStreams() { return _mixableStreams; }
|
||||
|
||||
// thread-safe, called from AudioMixerSlave(s) while processing ignore packets for other nodes
|
||||
void ignoredByNode(QUuid nodeID);
|
||||
void unignoredByNode(QUuid nodeID);
|
||||
|
||||
// start of methods called non-concurrently from single AudioMixerSlave mixing for the owning node
|
||||
|
||||
const Node::IgnoredNodeIDs& getNewIgnoredNodeIDs() const { return _newIgnoredNodeIDs; }
|
||||
const Node::IgnoredNodeIDs& getNewUnignoredNodeIDs() const { return _newUnignoredNodeIDs; }
|
||||
|
||||
using ConcurrentIgnoreNodeIDs = tbb::concurrent_vector<QUuid>;
|
||||
const ConcurrentIgnoreNodeIDs& getNewIgnoringNodeIDs() const { return _newIgnoringNodeIDs; }
|
||||
const ConcurrentIgnoreNodeIDs& getNewUnignoringNodeIDs() const { return _newUnignoringNodeIDs; }
|
||||
|
||||
void clearStagedIgnoreChanges();
|
||||
|
||||
const Node::IgnoredNodeIDs& getIgnoringNodeIDs() const { return _ignoringNodeIDs; }
|
||||
|
||||
bool getHasReceivedFirstMix() const { return _hasReceivedFirstMix; }
|
||||
void setHasReceivedFirstMix(bool hasReceivedFirstMix) { _hasReceivedFirstMix = hasReceivedFirstMix; }
|
||||
|
||||
// end of methods called non-concurrently from single AudioMixerSlave
|
||||
|
||||
signals:
|
||||
void injectorStreamFinished(const QUuid& streamIdentifier);
|
||||
|
||||
|
@ -133,33 +170,9 @@ private:
|
|||
|
||||
void optionallyReplicatePacket(ReceivedMessage& packet, const Node& node);
|
||||
|
||||
using IgnoreZone = AABox;
|
||||
class IgnoreZoneMemo {
|
||||
public:
|
||||
IgnoreZoneMemo(AudioMixerClientData& data) : _data(data) {}
|
||||
void setGainForAvatar(QUuid nodeID, uint8_t gain);
|
||||
|
||||
// returns an ignore zone, memoized by frame (lockless if the zone is already memoized)
|
||||
// preconditions:
|
||||
// - frame is increasing after first call (including overflow wrap)
|
||||
// - there are no references left from calls to getIgnoreZone(frame - 1)
|
||||
IgnoreZone& get(unsigned int frame);
|
||||
|
||||
private:
|
||||
AudioMixerClientData& _data;
|
||||
IgnoreZone _zone;
|
||||
std::atomic<unsigned int> _frame { 0 };
|
||||
std::mutex _mutex;
|
||||
};
|
||||
IgnoreZoneMemo _ignoreZone;
|
||||
|
||||
struct IdentifiedHRTF {
|
||||
QUuid streamIdentifier;
|
||||
std::unique_ptr<AudioHRTF> hrtf;
|
||||
};
|
||||
|
||||
using HRTFVector = std::vector<IdentifiedHRTF>;
|
||||
using NodeSourcesHRTFMap = std::unordered_map<Node::LocalID, HRTFVector>;
|
||||
NodeSourcesHRTFMap _nodeSourcesHRTFMap;
|
||||
MixableStreamsVector _mixableStreams;
|
||||
|
||||
quint16 _outgoingMixedAudioSequenceNumber;
|
||||
|
||||
|
@ -179,7 +192,20 @@ private:
|
|||
bool _shouldMuteClient { false };
|
||||
bool _requestsDomainListData { false };
|
||||
|
||||
Node::LocalID _localNodeID;
|
||||
std::vector<AddedStream> _newAddedStreams;
|
||||
|
||||
Node::IgnoredNodeIDs _newIgnoredNodeIDs;
|
||||
Node::IgnoredNodeIDs _newUnignoredNodeIDs;
|
||||
|
||||
tbb::concurrent_vector<QUuid> _newIgnoringNodeIDs;
|
||||
tbb::concurrent_vector<QUuid> _newUnignoringNodeIDs;
|
||||
|
||||
std::mutex _ignoringNodeIDsMutex;
|
||||
Node::IgnoredNodeIDs _ignoringNodeIDs;
|
||||
|
||||
std::atomic_bool _isIgnoreRadiusEnabled { false };
|
||||
|
||||
bool _hasReceivedFirstMix { false };
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerClientData_h
|
||||
|
|
|
@ -46,9 +46,8 @@ void sendMutePacket(const SharedNodePointer& node, AudioMixerClientData&);
|
|||
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data);
|
||||
|
||||
// mix helpers
|
||||
inline float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition);
|
||||
inline float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
|
||||
inline float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd);
|
||||
inline float computeGain(float masterListenerGain, const AvatarAudioStream& listeningNodeStream,
|
||||
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, float distance, bool isEcho);
|
||||
inline float computeAzimuth(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition);
|
||||
|
@ -56,7 +55,7 @@ inline float computeAzimuth(const AvatarAudioStream& listeningNodeStream, const
|
|||
void AudioMixerSlave::processPackets(const SharedNodePointer& node) {
|
||||
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
|
||||
if (data) {
|
||||
data->processPackets();
|
||||
data->processPackets(_sharedData.addedStreams);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,6 +124,13 @@ void AudioMixerSlave::mix(const SharedNodePointer& node) {
|
|||
}
|
||||
}
|
||||
|
||||
template<typename V>
|
||||
bool containsNodeID(const V& vector, QUuid nodeID) {
|
||||
return std::any_of(std::begin(vector), std::end(vector), [&nodeID](const QUuid& vectorID){
|
||||
return vectorID == nodeID;
|
||||
});
|
||||
}
|
||||
|
||||
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||
AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());
|
||||
|
@ -139,87 +145,185 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
|||
memset(_mixSamples, 0, sizeof(_mixSamples));
|
||||
|
||||
bool isThrottling = _throttlingRatio > 0.0f;
|
||||
std::vector<std::pair<float, SharedNodePointer>> throttledNodes;
|
||||
|
||||
typedef void (AudioMixerSlave::*MixFunctor)(
|
||||
AudioMixerClientData&, Node::LocalID, const AvatarAudioStream&, const PositionalAudioStream&);
|
||||
auto forAllStreams = [&](const SharedNodePointer& node, AudioMixerClientData* nodeData, MixFunctor mixFunctor) {
|
||||
auto nodeID = node->getLocalID();
|
||||
for (auto& streamPair : nodeData->getAudioStreams()) {
|
||||
(this->*mixFunctor)(*listenerData, nodeID, *listenerAudioStream, *streamPair);
|
||||
}
|
||||
};
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
||||
auto mixStart = p_high_resolution_clock::now();
|
||||
#endif
|
||||
|
||||
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (!nodeData) {
|
||||
return;
|
||||
}
|
||||
auto& mixableStreams = listenerData->getMixableStreams();
|
||||
auto& ignoredNodeIDs = listener->getIgnoredNodeIDs();
|
||||
auto& ignoringNodeIDs = listenerData->getIgnoringNodeIDs();
|
||||
|
||||
if (*node == *listener) {
|
||||
// only mix the echo, if requested
|
||||
for (auto& streamPair : nodeData->getAudioStreams()) {
|
||||
if (streamPair->shouldLoopbackForNode()) {
|
||||
mixStream(*listenerData, node->getLocalID(), *listenerAudioStream, *streamPair);
|
||||
// add data for newly created streams to our vector
|
||||
if (!listenerData->getHasReceivedFirstMix()) {
|
||||
// when this listener is new, we need to fill its added streams object with all available streams
|
||||
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
if (nodeData) {
|
||||
for (auto& stream : nodeData->getAudioStreams()) {
|
||||
mixableStreams.emplace_back(node->getUUID(), node->getLocalID(),
|
||||
stream->getStreamIdentifier(), &(*stream));
|
||||
|
||||
// pre-populate ignored and ignoring flags for this stream
|
||||
mixableStreams.back().ignoredByListener = containsNodeID(ignoredNodeIDs, node->getUUID());
|
||||
mixableStreams.back().ignoringListener = containsNodeID(ignoringNodeIDs, node->getUUID());
|
||||
}
|
||||
}
|
||||
} else if (!listenerData->shouldIgnore(listener, node, _frame)) {
|
||||
if (!isThrottling) {
|
||||
forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);
|
||||
} else {
|
||||
});
|
||||
|
||||
// compute the node's max relative volume
|
||||
float nodeVolume = 0.0f;
|
||||
for (auto& nodeStream : nodeData->getAudioStreams()) {
|
||||
// flag this listener as having received their first mix so we know we don't need to enumerate all nodes again
|
||||
listenerData->setHasReceivedFirstMix(true);
|
||||
} else {
|
||||
for (const auto& newStream : _sharedData.addedStreams) {
|
||||
mixableStreams.emplace_back(newStream.nodeIDStreamID, newStream.positionalStream);
|
||||
|
||||
// approximate the gain
|
||||
glm::vec3 relativePosition = nodeStream->getPosition() - listenerAudioStream->getPosition();
|
||||
float gain = approximateGain(*listenerAudioStream, *nodeStream, relativePosition);
|
||||
|
||||
// modify by hrtf gain adjustment
|
||||
auto& hrtf = listenerData->hrtfForStream(node->getLocalID(), nodeStream->getStreamIdentifier());
|
||||
gain *= hrtf.getGainAdjustment();
|
||||
|
||||
auto streamVolume = nodeStream->getLastPopOutputTrailingLoudness() * gain;
|
||||
nodeVolume = std::max(streamVolume, nodeVolume);
|
||||
}
|
||||
|
||||
// max-heapify the nodes by relative volume
|
||||
throttledNodes.push_back({ nodeVolume, node });
|
||||
std::push_heap(throttledNodes.begin(), throttledNodes.end());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (isThrottling) {
|
||||
// pop the loudest nodes off the heap and mix their streams
|
||||
int numToRetain = (int)(std::distance(_begin, _end) * (1 - _throttlingRatio));
|
||||
for (int i = 0; i < numToRetain; i++) {
|
||||
if (throttledNodes.empty()) {
|
||||
break;
|
||||
}
|
||||
|
||||
std::pop_heap(throttledNodes.begin(), throttledNodes.end());
|
||||
|
||||
auto& node = throttledNodes.back().second;
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);
|
||||
|
||||
throttledNodes.pop_back();
|
||||
}
|
||||
|
||||
// throttle the remaining nodes' streams
|
||||
for (const std::pair<float, SharedNodePointer>& nodePair : throttledNodes) {
|
||||
auto& node = nodePair.second;
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
forAllStreams(node, nodeData, &AudioMixerSlave::throttleStream);
|
||||
// pre-populate ignored and ignoring flags for this stream
|
||||
mixableStreams.back().ignoredByListener = containsNodeID(ignoredNodeIDs, newStream.nodeIDStreamID.nodeID);
|
||||
mixableStreams.back().ignoringListener = containsNodeID(ignoringNodeIDs, newStream.nodeIDStreamID.nodeID);
|
||||
}
|
||||
}
|
||||
|
||||
// grab the unprocessed ignores and unignores from and for this listener
|
||||
const auto& nodesIgnoredByListener = listenerData->getNewIgnoredNodeIDs();
|
||||
const auto& nodesUnignoredByListener = listenerData->getNewUnignoredNodeIDs();
|
||||
const auto& nodesIgnoringListener = listenerData->getNewIgnoringNodeIDs();
|
||||
const auto& nodesUnignoringListener = listenerData->getNewUnignoringNodeIDs();
|
||||
|
||||
// enumerate the available streams
|
||||
auto it = mixableStreams.begin();
|
||||
auto end = mixableStreams.end();
|
||||
while (it != end) {
|
||||
// check if this node (and therefore all of the node's streams) has been removed
|
||||
auto& nodeIDStreamID = it->nodeStreamID;
|
||||
auto matchedRemovedNode = std::find(_sharedData.removedNodes.cbegin(), _sharedData.removedNodes.cend(),
|
||||
nodeIDStreamID.nodeLocalID);
|
||||
bool streamRemoved = matchedRemovedNode != _sharedData.removedNodes.cend();
|
||||
|
||||
// if the node wasn't removed, check if this stream was specifically removed
|
||||
if (!streamRemoved) {
|
||||
auto matchedRemovedStream = std::find(_sharedData.removedStreams.cbegin(), _sharedData.removedStreams.cend(),
|
||||
nodeIDStreamID);
|
||||
streamRemoved = matchedRemovedStream != _sharedData.removedStreams.cend();
|
||||
}
|
||||
|
||||
if (streamRemoved) {
|
||||
// this stream was removed, so swap it with the last item and decrease the end iterator
|
||||
--end;
|
||||
std::swap(*it, *end);
|
||||
|
||||
// process the it element (which is now the element that was the last item before the swap)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (it->nodeStreamID.nodeLocalID == listener->getLocalID()) {
|
||||
// streams from this node should be skipped unless loopback is specifically requested
|
||||
if (it->positionalStream->shouldLoopbackForNode()) {
|
||||
it->skippedStream = false;
|
||||
} else {
|
||||
it->approximateVolume = 0.0f;
|
||||
it->skippedStream = true;
|
||||
it->completedSilentRender = true;
|
||||
|
||||
// if we know we're skipping this stream, no more processing is required
|
||||
// since we don't do silent HRTF renders for echo streams
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
if (it->ignoredByListener && nodesUnignoredByListener.size() > 0) {
|
||||
// this stream was previously ignored by the listener and we have some unignored streams
|
||||
// check now if it is one of the unignored streams and flag it as such
|
||||
it->ignoredByListener = !containsNodeID(nodesUnignoredByListener, nodeIDStreamID.nodeID);
|
||||
|
||||
} else if (!it->ignoredByListener && nodesIgnoredByListener.size() > 0) {
|
||||
// this stream was previously not ignored by the listener and we have some newly ignored streams
|
||||
// check now if it is one of the ignored streams and flag it as such
|
||||
it->ignoredByListener = containsNodeID(nodesIgnoredByListener, nodeIDStreamID.nodeID);
|
||||
}
|
||||
|
||||
if (it->ignoringListener && nodesUnignoringListener.size() > 0) {
|
||||
// this stream was previously ignoring the listener and we have some new un-ignoring nodes
|
||||
// check now if it is one of the unignoring streams and flag it as such
|
||||
it->ignoringListener = !containsNodeID(nodesUnignoringListener, nodeIDStreamID.nodeID);
|
||||
} else if (!it->ignoringListener && nodesIgnoringListener.size() > 0) {
|
||||
it->ignoringListener = containsNodeID(nodesIgnoringListener, nodeIDStreamID.nodeID);
|
||||
}
|
||||
|
||||
if (it->ignoredByListener
|
||||
|| (it->ignoringListener && !(listenerData->getRequestsDomainListData() && listener->getCanKick()))) {
|
||||
// this is a stream ignoring by the listener
|
||||
// or ignoring the listener (and the listener is not an admin asking for (the poorly named) "domain list" data)
|
||||
// mark it skipped and move on
|
||||
it->skippedStream = true;
|
||||
} else {
|
||||
it->skippedStream = false;
|
||||
}
|
||||
|
||||
if (!it->skippedStream) {
|
||||
if ((listenerAudioStream->isIgnoreBoxEnabled() || it->positionalStream->isIgnoreBoxEnabled())
|
||||
&& listenerAudioStream->getIgnoreBox().touches(it->positionalStream->getIgnoreBox())) {
|
||||
// the listener is ignoring audio sources within a radius, and this source is in that radius
|
||||
// so we mark it skipped
|
||||
it->skippedStream = true;
|
||||
} else {
|
||||
it->skippedStream = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!isThrottling) {
|
||||
// we aren't throttling, so we already know that we can add this stream to the mix
|
||||
addStream(*it, *listenerAudioStream, listenerData->getMasterAvatarGain(), false);
|
||||
} else {
|
||||
// we're throttling, so we need to update the approximate volume for any un-skipped streams
|
||||
// unless this is simply for an echo (in which case the approx volume is 1.0)
|
||||
if (!it->skippedStream) {
|
||||
if (it->positionalStream != listenerAudioStream) {
|
||||
// approximate the gain
|
||||
float gain = approximateGain(*listenerAudioStream, *(it->positionalStream));
|
||||
|
||||
// for avatar streams, modify by the set gain adjustment
|
||||
if (nodeIDStreamID.streamID.isNull()) {
|
||||
gain *= it->hrtf->getGainAdjustment();
|
||||
}
|
||||
|
||||
it->approximateVolume = it->positionalStream->getLastPopOutputTrailingLoudness() * gain;
|
||||
} else {
|
||||
it->approximateVolume = 1.0f;
|
||||
}
|
||||
} else {
|
||||
it->approximateVolume = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
++it;
|
||||
}
|
||||
|
||||
// erase any removed streams that were swapped to the end
|
||||
mixableStreams.erase(end, mixableStreams.end());
|
||||
|
||||
if (isThrottling) {
|
||||
// since we're throttling, we need to partition the mixable into throttled and unthrottled streams
|
||||
auto numToRetain = std::distance(_begin, _end) * (1 - _throttlingRatio);
|
||||
auto throttlePoint = mixableStreams.begin() + numToRetain;
|
||||
|
||||
std::nth_element(mixableStreams.begin(), throttlePoint, mixableStreams.end(),
|
||||
[](const auto& a, const auto& b)
|
||||
{
|
||||
return a.approximateVolume > b.approximateVolume;
|
||||
});
|
||||
|
||||
for (auto it = mixableStreams.begin(); it != mixableStreams.end(); ++it) {
|
||||
// add this stream, it is throttled if it is at or past the throttle iterator in the vector
|
||||
addStream(*it, *listenerAudioStream, listenerData->getMasterAvatarGain(), it >= throttlePoint);
|
||||
}
|
||||
}
|
||||
|
||||
// clear the newly ignored, un-ignored, ignoring, and un-ignoring streams now that we've processed them
|
||||
listenerData->clearStagedIgnoreChanges();
|
||||
|
||||
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
||||
auto mixEnd = p_high_resolution_clock::now();
|
||||
auto mixTime = std::chrono::duration_cast<std::chrono::nanoseconds>(mixEnd - mixStart);
|
||||
|
@ -242,51 +346,59 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
|||
return hasAudio;
|
||||
}
|
||||
|
||||
void AudioMixerSlave::throttleStream(AudioMixerClientData& listenerNodeData, Node::LocalID sourceNodeLocalID,
|
||||
const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
|
||||
// only throttle this stream to the mix if it has a valid position, we won't know how to mix it otherwise
|
||||
if (streamToAdd.hasValidPosition()) {
|
||||
addStream(listenerNodeData, sourceNodeLocalID, listeningNodeStream, streamToAdd, true);
|
||||
}
|
||||
}
|
||||
void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStream, AvatarAudioStream& listeningNodeStream,
|
||||
float masterListenerGain, bool throttle) {
|
||||
|
||||
void AudioMixerSlave::mixStream(AudioMixerClientData& listenerNodeData, Node::LocalID sourceNodeLocalID,
|
||||
const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
|
||||
// only add the stream to the mix if it has a valid position, we won't know how to mix it otherwise
|
||||
if (streamToAdd.hasValidPosition()) {
|
||||
addStream(listenerNodeData, sourceNodeLocalID, listeningNodeStream, streamToAdd, false);
|
||||
if (mixableStream.skippedStream) {
|
||||
// any skipped stream gets no processing and no silent render - early return
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, Node::LocalID sourceNodeLocalID,
|
||||
const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
bool throttle) {
|
||||
++stats.totalMixes;
|
||||
|
||||
// to reduce artifacts we call the HRTF functor for every source, even if throttled or silent
|
||||
auto streamToAdd = mixableStream.positionalStream;
|
||||
|
||||
// to reduce artifacts we still call the HRTF functor for every silent or throttled source
|
||||
// for the first frame where the source becomes throttled or silent
|
||||
// this ensures the correct tail from last mixed block and the correct spatialization of next first block
|
||||
if (throttle || mixableStream.skippedStream || streamToAdd->getLastPopOutputLoudness() == 0.0f) {
|
||||
if (mixableStream.completedSilentRender) {
|
||||
|
||||
if (throttle) {
|
||||
++stats.hrtfThrottleRenders;
|
||||
}
|
||||
|
||||
return;
|
||||
} else {
|
||||
mixableStream.completedSilentRender = true;
|
||||
}
|
||||
} else if (mixableStream.completedSilentRender) {
|
||||
// a stream that is no longer throttled or silent should have its silent render flag reset to false
|
||||
// so that we complete a silent render for the stream next time it is throttled or otherwise goes silent
|
||||
mixableStream.completedSilentRender = false;
|
||||
}
|
||||
|
||||
// check if this is a server echo of a source back to itself
|
||||
bool isEcho = (&streamToAdd == &listeningNodeStream);
|
||||
bool isEcho = (streamToAdd == &listeningNodeStream);
|
||||
|
||||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream.getPosition();
|
||||
|
||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||
float gain = computeGain(listenerNodeData, listeningNodeStream, streamToAdd, relativePosition, distance, isEcho);
|
||||
float gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho);
|
||||
float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
|
||||
const int HRTF_DATASET_INDEX = 1;
|
||||
|
||||
if (!streamToAdd.lastPopSucceeded()) {
|
||||
if (!streamToAdd->lastPopSucceeded()) {
|
||||
bool forceSilentBlock = true;
|
||||
|
||||
if (!streamToAdd.getLastPopOutput().isNull()) {
|
||||
bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);
|
||||
if (!streamToAdd->getLastPopOutput().isNull()) {
|
||||
bool isInjector = dynamic_cast<const InjectedAudioStream*>(streamToAdd);
|
||||
|
||||
// in an injector, just go silent - the injector has likely ended
|
||||
// in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
|
||||
if (!isInjector) {
|
||||
// calculate its fade factor, which depends on how many times it's already been repeated.
|
||||
float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
|
||||
float fadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd->getConsecutiveNotMixedCount() - 1);
|
||||
if (fadeFactor > 0.0f) {
|
||||
// apply the fadeFactor to the gain
|
||||
gain *= fadeFactor;
|
||||
|
@ -298,13 +410,10 @@ void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, Node::Lo
|
|||
if (forceSilentBlock) {
|
||||
// call renderSilent with a forced silent block to reduce artifacts
|
||||
// (this is not done for stereo streams since they do not go through the HRTF)
|
||||
if (!streamToAdd.isStereo() && !isEcho) {
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeLocalID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
if (!streamToAdd->isStereo() && !isEcho) {
|
||||
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
|
||||
hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
mixableStream.hrtf->renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfSilentRenders;
|
||||
}
|
||||
|
@ -314,16 +423,15 @@ void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, Node::Lo
|
|||
}
|
||||
|
||||
// grab the stream from the ring buffer
|
||||
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
|
||||
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd->getLastPopOutput();
|
||||
|
||||
// stereo sources are not passed through HRTF
|
||||
if (streamToAdd.isStereo()) {
|
||||
if (streamToAdd->isStereo()) {
|
||||
|
||||
// apply the avatar gain adjustment
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeLocalID, streamToAdd.getStreamIdentifier());
|
||||
gain *= hrtf.getGainAdjustment();
|
||||
gain *= mixableStream.hrtf->getGainAdjustment();
|
||||
|
||||
const float scale = 1/32768.0f; // int16_t to float
|
||||
const float scale = 1 / 32768.0f; // int16_t to float
|
||||
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL; i++) {
|
||||
_mixSamples[2*i+0] += (float)streamPopOutput[2*i+0] * gain * scale;
|
||||
|
@ -349,15 +457,13 @@ void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, Node::Lo
|
|||
return;
|
||||
}
|
||||
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeLocalID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
|
||||
if (streamToAdd->getLastPopOutputLoudness() == 0.0f || mixableStream.skippedStream) {
|
||||
// call renderSilent to reduce artifacts
|
||||
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
mixableStream.hrtf->renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfSilentRenders;
|
||||
return;
|
||||
|
@ -365,19 +471,14 @@ void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, Node::Lo
|
|||
|
||||
if (throttle) {
|
||||
// call renderSilent with actual frame data and a gain of 0.0f to reduce artifacts
|
||||
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
mixableStream.hrtf->renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfThrottleRenders;
|
||||
return;
|
||||
}
|
||||
|
||||
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
||||
// apply per-avatar gain to positional audio injectors, which wouldn't otherwise be affected by PAL sliders
|
||||
hrtf.setGainAdjustment(listenerNodeData.hrtfForStream(sourceNodeLocalID, QUuid()).getGainAdjustment());
|
||||
}
|
||||
|
||||
hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
mixableStream.hrtf->render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfRenders;
|
||||
|
@ -489,8 +590,7 @@ void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData&
|
|||
}
|
||||
}
|
||||
|
||||
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition) {
|
||||
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
|
||||
float gain = 1.0f;
|
||||
|
||||
// injector: apply attenuation
|
||||
|
@ -501,13 +601,14 @@ float approximateGain(const AvatarAudioStream& listeningNodeStream, const Positi
|
|||
// avatar: skip attenuation - it is too costly to approximate
|
||||
|
||||
// distance attenuation: approximate, ignore zone-specific attenuations
|
||||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||
float distance = glm::length(relativePosition);
|
||||
return gain / distance;
|
||||
|
||||
// avatar: skip master gain - it is constant for all streams
|
||||
}
|
||||
|
||||
float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudioStream& listeningNodeStream,
|
||||
float computeGain(float masterListenerGain, const AvatarAudioStream& listeningNodeStream,
|
||||
const PositionalAudioStream& streamToAdd, const glm::vec3& relativePosition, float distance, bool isEcho) {
|
||||
float gain = 1.0f;
|
||||
|
||||
|
@ -530,7 +631,7 @@ float computeGain(const AudioMixerClientData& listenerNodeData, const AvatarAudi
|
|||
gain *= offAxisCoefficient;
|
||||
|
||||
// apply master gain, only to avatars
|
||||
gain *= listenerNodeData.getMasterAvatarGain();
|
||||
gain *= masterListenerGain;
|
||||
}
|
||||
|
||||
auto& audioZones = AudioMixer::getAudioZones();
|
||||
|
|
|
@ -12,23 +12,33 @@
|
|||
#ifndef hifi_AudioMixerSlave_h
|
||||
#define hifi_AudioMixerSlave_h
|
||||
|
||||
#include <tbb/concurrent_vector.h>
|
||||
|
||||
#include <AABox.h>
|
||||
#include <AudioHRTF.h>
|
||||
#include <AudioRingBuffer.h>
|
||||
#include <ThreadedAssignment.h>
|
||||
#include <UUIDHasher.h>
|
||||
#include <NodeList.h>
|
||||
#include <PositionalAudioStream.h>
|
||||
|
||||
#include "AudioMixerClientData.h"
|
||||
#include "AudioMixerStats.h"
|
||||
|
||||
class PositionalAudioStream;
|
||||
class AvatarAudioStream;
|
||||
class AudioHRTF;
|
||||
class AudioMixerClientData;
|
||||
|
||||
class AudioMixerSlave {
|
||||
public:
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
|
||||
struct SharedData {
|
||||
AudioMixerClientData::ConcurrentAddedStreams addedStreams;
|
||||
std::vector<Node::LocalID> removedNodes;
|
||||
std::vector<NodeIDStreamID> removedStreams;
|
||||
};
|
||||
|
||||
AudioMixerSlave(SharedData& sharedData) : _sharedData(sharedData) {};
|
||||
|
||||
// process packets for a given node (requires no configuration)
|
||||
void processPackets(const SharedNodePointer& node);
|
||||
|
@ -45,13 +55,8 @@ public:
|
|||
private:
|
||||
// create mix, returns true if mix has audio
|
||||
bool prepareMix(const SharedNodePointer& listener);
|
||||
void throttleStream(AudioMixerClientData& listenerData, Node::LocalID streamerID,
|
||||
const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer);
|
||||
void mixStream(AudioMixerClientData& listenerData, Node::LocalID streamerID,
|
||||
const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer);
|
||||
void addStream(AudioMixerClientData& listenerData, Node::LocalID streamerID,
|
||||
const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer,
|
||||
bool throttle);
|
||||
void addStream(AudioMixerClientData::MixableStream& mixableStream, AvatarAudioStream& listeningNodeStream,
|
||||
float masterListenerGain, bool throttle);
|
||||
|
||||
// mixing buffers
|
||||
float _mixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
|
@ -62,6 +67,8 @@ private:
|
|||
ConstIter _end;
|
||||
unsigned int _frame { 0 };
|
||||
float _throttlingRatio { 0.0f };
|
||||
|
||||
SharedData& _sharedData;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerSlave_h
|
||||
|
|
|
@ -167,7 +167,7 @@ void AudioMixerSlavePool::resize(int numThreads) {
|
|||
if (numThreads > _numThreads) {
|
||||
// start new slaves
|
||||
for (int i = 0; i < numThreads - _numThreads; ++i) {
|
||||
auto slave = new AudioMixerSlaveThread(*this);
|
||||
auto slave = new AudioMixerSlaveThread(*this, _workerSharedData);
|
||||
slave->start();
|
||||
_slaves.emplace_back(slave);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,8 @@ class AudioMixerSlaveThread : public QThread, public AudioMixerSlave {
|
|||
using Lock = std::unique_lock<Mutex>;
|
||||
|
||||
public:
|
||||
AudioMixerSlaveThread(AudioMixerSlavePool& pool) : _pool(pool) {}
|
||||
AudioMixerSlaveThread(AudioMixerSlavePool& pool, AudioMixerSlave::SharedData& sharedData)
|
||||
: AudioMixerSlave(sharedData), _pool(pool) {}
|
||||
|
||||
void run() override final;
|
||||
|
||||
|
@ -58,7 +59,8 @@ class AudioMixerSlavePool {
|
|||
public:
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
|
||||
AudioMixerSlavePool(int numThreads = QThread::idealThreadCount()) { setNumThreads(numThreads); }
|
||||
AudioMixerSlavePool(AudioMixerSlave::SharedData& sharedData, int numThreads = QThread::idealThreadCount())
|
||||
: _workerSharedData(sharedData) { setNumThreads(numThreads); }
|
||||
~AudioMixerSlavePool() { resize(0); }
|
||||
|
||||
// process packets on slave threads
|
||||
|
@ -100,6 +102,8 @@ private:
|
|||
float _throttlingRatio { 0.0f };
|
||||
ConstIter _begin;
|
||||
ConstIter _end;
|
||||
|
||||
AudioMixerSlave::SharedData& _workerSharedData;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerSlavePool_h
|
||||
|
|
|
@ -673,7 +673,13 @@ void AvatarMixer::handleNodeIgnoreRequestPacket(QSharedPointer<ReceivedMessage>
|
|||
|
||||
void AvatarMixer::handleRadiusIgnoreRequestPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode) {
|
||||
auto start = usecTimestampNow();
|
||||
sendingNode->parseIgnoreRadiusRequestMessage(packet);
|
||||
|
||||
bool enabled;
|
||||
packet->readPrimitive(&enabled);
|
||||
|
||||
auto avatarData = getOrCreateClientData(sendingNode);
|
||||
avatarData->setIsIgnoreRadiusEnabled(enabled);
|
||||
|
||||
auto end = usecTimestampNow();
|
||||
_handleRadiusIgnoreRequestPacketElapsedTime += (end - start);
|
||||
}
|
||||
|
|
|
@ -227,7 +227,7 @@ void AvatarMixerClientData::ignoreOther(const Node* self, const Node* other) {
|
|||
addToRadiusIgnoringSet(other->getUUID());
|
||||
auto killPacket = NLPacket::create(PacketType::KillAvatar, NUM_BYTES_RFC4122_UUID + sizeof(KillAvatarReason), true);
|
||||
killPacket->write(other->getUUID().toRfc4122());
|
||||
if (self->isIgnoreRadiusEnabled()) {
|
||||
if (_isIgnoreRadiusEnabled) {
|
||||
killPacket->writePrimitive(KillAvatarReason::TheirAvatarEnteredYourBubble);
|
||||
} else {
|
||||
killPacket->writePrimitive(KillAvatarReason::YourAvatarEnteredTheirBubble);
|
||||
|
|
|
@ -49,6 +49,9 @@ public:
|
|||
const AvatarData* getConstAvatarData() const { return _avatar.get(); }
|
||||
AvatarSharedPointer getAvatarSharedPointer() const { return _avatar; }
|
||||
|
||||
bool isIgnoreRadiusEnabled() const { return _isIgnoreRadiusEnabled; }
|
||||
void setIsIgnoreRadiusEnabled(bool enabled) { _isIgnoreRadiusEnabled = enabled; }
|
||||
|
||||
uint16_t getLastBroadcastSequenceNumber(const QUuid& nodeUUID) const;
|
||||
void setLastBroadcastSequenceNumber(const QUuid& nodeUUID, uint16_t sequenceNumber)
|
||||
{ _lastBroadcastSequenceNumbers[nodeUUID] = sequenceNumber; }
|
||||
|
@ -180,6 +183,8 @@ private:
|
|||
|
||||
std::unordered_map<Node::LocalID, TraitsCheckTimestamp> _lastSentTraitsTimestamps;
|
||||
std::unordered_map<Node::LocalID, AvatarTraits::TraitVersions> _sentTraitVersions;
|
||||
|
||||
std::atomic_bool _isIgnoreRadiusEnabled { false };
|
||||
};
|
||||
|
||||
#endif // hifi_AvatarMixerClientData_h
|
||||
|
|
|
@ -345,7 +345,7 @@ void AvatarMixerSlave::broadcastAvatarDataToAgent(const SharedNodePointer& node)
|
|||
} else {
|
||||
// Check to see if the space bubble is enabled
|
||||
// Don't bother with these checks if the other avatar has their bubble enabled and we're gettingAnyIgnored
|
||||
if (destinationNode->isIgnoreRadiusEnabled() || (avatarNode->isIgnoreRadiusEnabled() && !getsAnyIgnored)) {
|
||||
if (nodeData->isIgnoreRadiusEnabled() || (avatarClientNodeData->isIgnoreRadiusEnabled() && !getsAnyIgnored)) {
|
||||
// Perform the collision check between the two bounding boxes
|
||||
const float OTHER_AVATAR_BUBBLE_EXPANSION_FACTOR = 2.4f; // magic number determined empirically
|
||||
AABox otherNodeBox = computeBubbleBox(avatarClientNodeData->getAvatar(), OTHER_AVATAR_BUBBLE_EXPANSION_FACTOR);
|
||||
|
|
|
@ -92,6 +92,11 @@ int PositionalAudioStream::parsePositionalData(const QByteArray& positionalByteA
|
|||
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxCorner), sizeof(_avatarBoundingBoxCorner));
|
||||
packetStream.readRawData(reinterpret_cast<char*>(&_avatarBoundingBoxScale), sizeof(_avatarBoundingBoxScale));
|
||||
|
||||
if (_avatarBoundingBoxCorner != _ignoreBox.getCorner()) {
|
||||
// if the ignore box corner changes, we need to re-calculate the ignore box
|
||||
calculateIgnoreBox();
|
||||
}
|
||||
|
||||
// if this node sent us a NaN for first float in orientation then don't consider this good audio and bail
|
||||
if (glm::isnan(_orientation.x)) {
|
||||
// NOTE: why would we reset the ring buffer here?
|
||||
|
@ -107,3 +112,29 @@ AudioStreamStats PositionalAudioStream::getAudioStreamStats() const {
|
|||
streamStats._streamType = _type;
|
||||
return streamStats;
|
||||
}
|
||||
|
||||
void PositionalAudioStream::calculateIgnoreBox() {
|
||||
if (_avatarBoundingBoxScale != glm::vec3(0)) {
|
||||
auto scale = _avatarBoundingBoxScale;
|
||||
|
||||
// enforce a minimum scale
|
||||
static const glm::vec3 MIN_IGNORE_BOX_SCALE = glm::vec3(0.3f, 1.3f, 0.3f);
|
||||
if (glm::any(glm::lessThan(scale, MIN_IGNORE_BOX_SCALE))) {
|
||||
scale = MIN_IGNORE_BOX_SCALE;
|
||||
}
|
||||
|
||||
// (this is arbitrary number determined empirically for comfort)
|
||||
const float IGNORE_BOX_SCALE_FACTOR = 2.4f;
|
||||
scale *= IGNORE_BOX_SCALE_FACTOR;
|
||||
|
||||
// create the box (we use a box for the zone for convenience)
|
||||
_ignoreBox.setBox(_avatarBoundingBoxCorner, scale);
|
||||
}
|
||||
}
|
||||
|
||||
void PositionalAudioStream::enableIgnoreBox() {
|
||||
// re-calculate the ignore box using the latest values
|
||||
calculateIgnoreBox();
|
||||
|
||||
_isIgnoreBoxEnabled = true;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,21 @@
|
|||
|
||||
const int AUDIOMIXER_INBOUND_RING_BUFFER_FRAME_CAPACITY = 100;
|
||||
|
||||
using StreamID = QUuid;
|
||||
|
||||
struct NodeIDStreamID {
|
||||
QUuid nodeID;
|
||||
Node::LocalID nodeLocalID;
|
||||
StreamID streamID;
|
||||
|
||||
NodeIDStreamID(QUuid nodeID, Node::LocalID nodeLocalID, StreamID streamID)
|
||||
: nodeID(nodeID), nodeLocalID(nodeLocalID), streamID(streamID) {};
|
||||
|
||||
bool operator==(const NodeIDStreamID& other) const {
|
||||
return (nodeLocalID == other.nodeLocalID || nodeID == other.nodeID) && streamID == other.streamID;
|
||||
}
|
||||
};
|
||||
|
||||
class PositionalAudioStream : public InboundAudioStream {
|
||||
Q_OBJECT
|
||||
public:
|
||||
|
@ -30,7 +45,7 @@ public:
|
|||
PositionalAudioStream(PositionalAudioStream::Type type, bool isStereo, int numStaticJitterFrames = -1);
|
||||
|
||||
const QUuid DEFAULT_STREAM_IDENTIFIER = QUuid();
|
||||
virtual const QUuid& getStreamIdentifier() const { return DEFAULT_STREAM_IDENTIFIER; }
|
||||
virtual const StreamID& getStreamIdentifier() const { return DEFAULT_STREAM_IDENTIFIER; }
|
||||
|
||||
virtual void resetStats() override;
|
||||
|
||||
|
@ -53,6 +68,16 @@ public:
|
|||
|
||||
bool hasValidPosition() const { return _hasValidPosition; }
|
||||
|
||||
using IgnoreBox = AABox;
|
||||
|
||||
// called from single AudioMixerSlave while processing packets for node
|
||||
void enableIgnoreBox();
|
||||
void disableIgnoreBox() { _isIgnoreBoxEnabled = false; }
|
||||
|
||||
// thread-safe, called from AudioMixerSlave(s) while preparing mixes
|
||||
bool isIgnoreBoxEnabled() const { return _isIgnoreBoxEnabled; }
|
||||
const IgnoreBox& getIgnoreBox() const { return _ignoreBox; }
|
||||
|
||||
protected:
|
||||
// disallow copying of PositionalAudioStream objects
|
||||
PositionalAudioStream(const PositionalAudioStream&);
|
||||
|
@ -61,6 +86,8 @@ protected:
|
|||
int parsePositionalData(const QByteArray& positionalByteArray);
|
||||
|
||||
protected:
|
||||
void calculateIgnoreBox();
|
||||
|
||||
Type _type;
|
||||
glm::vec3 _position;
|
||||
glm::quat _orientation;
|
||||
|
@ -80,6 +107,9 @@ protected:
|
|||
int _frameCounter;
|
||||
|
||||
bool _hasValidPosition { false };
|
||||
|
||||
bool _isIgnoreBoxEnabled { false };
|
||||
IgnoreBox _ignoreBox;
|
||||
};
|
||||
|
||||
#endif // hifi_PositionalAudioStream_h
|
||||
|
|
|
@ -96,7 +96,6 @@ Node::Node(const QUuid& uuid, NodeType_t type, const HifiSockAddr& publicSocket,
|
|||
{
|
||||
// Update socket's object name
|
||||
setType(_type);
|
||||
_ignoreRadiusEnabled = false;
|
||||
}
|
||||
|
||||
void Node::setType(char type) {
|
||||
|
@ -114,9 +113,12 @@ void Node::updateClockSkewUsec(qint64 clockSkewSample) {
|
|||
_clockSkewUsec = (quint64)_clockSkewMovingPercentile.getValueAtPercentile();
|
||||
}
|
||||
|
||||
void Node::parseIgnoreRequestMessage(QSharedPointer<ReceivedMessage> message) {
|
||||
Node::NodesIgnoredPair Node::parseIgnoreRequestMessage(QSharedPointer<ReceivedMessage> message) {
|
||||
bool addToIgnore;
|
||||
message->readPrimitive(&addToIgnore);
|
||||
|
||||
std::vector<QUuid> nodesIgnored;
|
||||
|
||||
while (message->getBytesLeftToRead()) {
|
||||
// parse out the UUID being ignored from the packet
|
||||
QUuid ignoredUUID = QUuid::fromRfc4122(message->readWithoutCopy(NUM_BYTES_RFC4122_UUID));
|
||||
|
@ -126,7 +128,11 @@ void Node::parseIgnoreRequestMessage(QSharedPointer<ReceivedMessage> message) {
|
|||
} else {
|
||||
removeIgnoredNode(ignoredUUID);
|
||||
}
|
||||
|
||||
nodesIgnored.push_back(ignoredUUID);
|
||||
}
|
||||
|
||||
return { nodesIgnored, addToIgnore };
|
||||
}
|
||||
|
||||
void Node::addIgnoredNode(const QUuid& otherNodeID) {
|
||||
|
@ -167,12 +173,6 @@ bool Node::isIgnoringNodeWithID(const QUuid& nodeID) const {
|
|||
return std::find(_ignoredNodeIDs.begin(), _ignoredNodeIDs.end(), nodeID) != _ignoredNodeIDs.end();
|
||||
}
|
||||
|
||||
void Node::parseIgnoreRadiusRequestMessage(QSharedPointer<ReceivedMessage> message) {
|
||||
bool enabled;
|
||||
message->readPrimitive(&enabled);
|
||||
_ignoreRadiusEnabled = enabled;
|
||||
}
|
||||
|
||||
QDataStream& operator<<(QDataStream& out, const Node& node) {
|
||||
out << node._type;
|
||||
out << node._uuid;
|
||||
|
|
|
@ -81,17 +81,19 @@ public:
|
|||
bool getCanKick() const { return _permissions.can(NodePermissions::Permission::canKick); }
|
||||
bool getCanReplaceContent() const { return _permissions.can(NodePermissions::Permission::canReplaceDomainContent); }
|
||||
|
||||
void parseIgnoreRequestMessage(QSharedPointer<ReceivedMessage> message);
|
||||
using NodesIgnoredPair = std::pair<std::vector<QUuid>, bool>;
|
||||
|
||||
NodesIgnoredPair parseIgnoreRequestMessage(QSharedPointer<ReceivedMessage> message);
|
||||
void addIgnoredNode(const QUuid& otherNodeID);
|
||||
void removeIgnoredNode(const QUuid& otherNodeID);
|
||||
bool isIgnoringNodeWithID(const QUuid& nodeID) const;
|
||||
void parseIgnoreRadiusRequestMessage(QSharedPointer<ReceivedMessage> message);
|
||||
|
||||
using IgnoredNodeIDs = std::vector<QUuid>;
|
||||
const IgnoredNodeIDs& getIgnoredNodeIDs() const { return _ignoredNodeIDs; }
|
||||
|
||||
friend QDataStream& operator<<(QDataStream& out, const Node& node);
|
||||
friend QDataStream& operator>>(QDataStream& in, Node& node);
|
||||
|
||||
bool isIgnoreRadiusEnabled() const { return _ignoreRadiusEnabled; }
|
||||
|
||||
private:
|
||||
// privatize copy and assignment operator to disallow Node copying
|
||||
Node(const Node &otherNode);
|
||||
|
@ -109,11 +111,10 @@ private:
|
|||
MovingPercentile _clockSkewMovingPercentile;
|
||||
NodePermissions _permissions;
|
||||
bool _isUpstream { false };
|
||||
std::vector<QUuid> _ignoredNodeIDs;
|
||||
|
||||
IgnoredNodeIDs _ignoredNodeIDs;
|
||||
mutable QReadWriteLock _ignoredNodeIDSetLock;
|
||||
std::vector<QString> _replicatedUsernames { };
|
||||
|
||||
std::atomic_bool _ignoreRadiusEnabled;
|
||||
};
|
||||
|
||||
Q_DECLARE_METATYPE(Node*)
|
||||
|
|
Loading…
Reference in a new issue