mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 06:53:01 +02:00
streamline mix operation
This commit is contained in:
parent
ac6dd57a30
commit
585c278400
11 changed files with 409 additions and 276 deletions
|
@ -301,14 +301,24 @@ void AudioMixer::sendStatsPacket() {
|
||||||
QJsonObject mixStats;
|
QJsonObject mixStats;
|
||||||
|
|
||||||
mixStats["%_hrtf_mixes"] = percentageForMixStats(_stats.hrtfRenders);
|
mixStats["%_hrtf_mixes"] = percentageForMixStats(_stats.hrtfRenders);
|
||||||
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_stats.hrtfSilentRenders);
|
|
||||||
mixStats["%_hrtf_throttle_mixes"] = percentageForMixStats(_stats.hrtfThrottleRenders);
|
|
||||||
mixStats["%_skipped_throttle_mixes"] = percentageForMixStats(_stats.skippedThrottle);
|
|
||||||
mixStats["%_skipped_silent_mixes"] = percentageForMixStats(_stats.skippedSilent);
|
|
||||||
mixStats["%_skipped_other_mixes"] = percentageForMixStats(_stats.skippedOther);
|
|
||||||
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_stats.manualStereoMixes);
|
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_stats.manualStereoMixes);
|
||||||
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_stats.manualEchoMixes);
|
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_stats.manualEchoMixes);
|
||||||
|
|
||||||
|
mixStats["1_hrtf_renders"] = (int)(_stats.hrtfRenders / (float)_numStatFrames);
|
||||||
|
mixStats["1_hrtf_resets"] = (int)(_stats.hrtfResets / (float)_numStatFrames);
|
||||||
|
mixStats["1_hrtf_updates"] = (int)(_stats.hrtfUpdates / (float)_numStatFrames);
|
||||||
|
|
||||||
|
mixStats["2_skipped_streams"] = (int)(_stats.skipped / (float)_numStatFrames);
|
||||||
|
mixStats["2_inactive_streams"] = (int)(_stats.inactive / (float)_numStatFrames);
|
||||||
|
mixStats["2_active_streams"] = (int)(_stats.active / (float)_numStatFrames);
|
||||||
|
|
||||||
|
mixStats["3_skippped_to_active"] = (int)(_stats.skippedToActive / (float)_numStatFrames);
|
||||||
|
mixStats["3_skippped_to_inactive"] = (int)(_stats.skippedToInactive / (float)_numStatFrames);
|
||||||
|
mixStats["3_inactive_to_skippped"] = (int)(_stats.inactiveToSkipped / (float)_numStatFrames);
|
||||||
|
mixStats["3_inactive_to_active"] = (int)(_stats.inactiveToActive / (float)_numStatFrames);
|
||||||
|
mixStats["3_active_to_skippped"] = (int)(_stats.activeToSkipped / (float)_numStatFrames);
|
||||||
|
mixStats["3_active_to_inactive"] = (int)(_stats.activeToInactive / (float)_numStatFrames);
|
||||||
|
|
||||||
mixStats["total_mixes"] = _stats.totalMixes;
|
mixStats["total_mixes"] = _stats.totalMixes;
|
||||||
mixStats["avg_mixes_per_block"] = _stats.totalMixes / _numStatFrames;
|
mixStats["avg_mixes_per_block"] = _stats.totalMixes / _numStatFrames;
|
||||||
|
|
||||||
|
@ -424,12 +434,11 @@ void AudioMixer::start() {
|
||||||
QCoreApplication::processEvents();
|
QCoreApplication::processEvents();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int numToRetain = nodeList->size() * (1 - _throttlingRatio);
|
||||||
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
|
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
|
||||||
// mix across slave threads
|
// mix across slave threads
|
||||||
{
|
auto mixTimer = _mixTiming.timer();
|
||||||
auto mixTimer = _mixTiming.timer();
|
_slavePool.mix(cbegin, cend, frame, numToRetain);
|
||||||
_slavePool.mix(cbegin, cend, frame, _throttlingRatio);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// gather stats
|
// gather stats
|
||||||
|
|
|
@ -203,11 +203,11 @@ void AudioMixerClientData::parsePerAvatarGainSet(ReceivedMessage& message, const
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerClientData::setGainForAvatar(QUuid nodeID, uint8_t gain) {
|
void AudioMixerClientData::setGainForAvatar(QUuid nodeID, uint8_t gain) {
|
||||||
auto it = std::find_if(_mixableStreams.cbegin(), _mixableStreams.cend(), [nodeID](const MixableStream& mixableStream){
|
auto it = std::find_if(_streams.active.cbegin(), _streams.active.cend(), [nodeID](const MixableStream& mixableStream){
|
||||||
return mixableStream.nodeStreamID.nodeID == nodeID && mixableStream.nodeStreamID.streamID.isNull();
|
return mixableStream.nodeStreamID.nodeID == nodeID && mixableStream.nodeStreamID.streamID.isNull();
|
||||||
});
|
});
|
||||||
|
|
||||||
if (it != _mixableStreams.cend()) {
|
if (it != _streams.active.cend()) {
|
||||||
it->hrtf->setGainAdjustment(gain);
|
it->hrtf->setGainAdjustment(gain);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -105,7 +105,7 @@ public:
|
||||||
bool shouldMuteClient() { return _shouldMuteClient; }
|
bool shouldMuteClient() { return _shouldMuteClient; }
|
||||||
void setShouldMuteClient(bool shouldMuteClient) { _shouldMuteClient = shouldMuteClient; }
|
void setShouldMuteClient(bool shouldMuteClient) { _shouldMuteClient = shouldMuteClient; }
|
||||||
glm::vec3 getPosition() { return getAvatarAudioStream() ? getAvatarAudioStream()->getPosition() : glm::vec3(0); }
|
glm::vec3 getPosition() { return getAvatarAudioStream() ? getAvatarAudioStream()->getPosition() : glm::vec3(0); }
|
||||||
bool getRequestsDomainListData() { return _requestsDomainListData; }
|
bool getRequestsDomainListData() const { return _requestsDomainListData; }
|
||||||
void setRequestsDomainListData(bool requesting) { _requestsDomainListData = requesting; }
|
void setRequestsDomainListData(bool requesting) { _requestsDomainListData = requesting; }
|
||||||
|
|
||||||
void setupCodecForReplicatedAgent(QSharedPointer<ReceivedMessage> message);
|
void setupCodecForReplicatedAgent(QSharedPointer<ReceivedMessage> message);
|
||||||
|
@ -117,8 +117,6 @@ public:
|
||||||
PositionalAudioStream* positionalStream;
|
PositionalAudioStream* positionalStream;
|
||||||
bool ignoredByListener { false };
|
bool ignoredByListener { false };
|
||||||
bool ignoringListener { false };
|
bool ignoringListener { false };
|
||||||
bool completedSilentRender { false };
|
|
||||||
bool skippedStream { false };
|
|
||||||
|
|
||||||
MixableStream(NodeIDStreamID nodeIDStreamID, PositionalAudioStream* positionalStream) :
|
MixableStream(NodeIDStreamID nodeIDStreamID, PositionalAudioStream* positionalStream) :
|
||||||
nodeStreamID(nodeIDStreamID), hrtf(new AudioHRTF), positionalStream(positionalStream) {};
|
nodeStreamID(nodeIDStreamID), hrtf(new AudioHRTF), positionalStream(positionalStream) {};
|
||||||
|
@ -127,8 +125,13 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
using MixableStreamsVector = std::vector<MixableStream>;
|
using MixableStreamsVector = std::vector<MixableStream>;
|
||||||
|
struct Streams {
|
||||||
|
MixableStreamsVector active;
|
||||||
|
MixableStreamsVector inactive;
|
||||||
|
MixableStreamsVector skipped;
|
||||||
|
};
|
||||||
|
|
||||||
MixableStreamsVector& getMixableStreams() { return _mixableStreams; }
|
Streams& getStreams() { return _streams; }
|
||||||
|
|
||||||
// thread-safe, called from AudioMixerSlave(s) while processing ignore packets for other nodes
|
// thread-safe, called from AudioMixerSlave(s) while processing ignore packets for other nodes
|
||||||
void ignoredByNode(QUuid nodeID);
|
void ignoredByNode(QUuid nodeID);
|
||||||
|
@ -173,7 +176,7 @@ private:
|
||||||
|
|
||||||
bool containsValidPosition(ReceivedMessage& message) const;
|
bool containsValidPosition(ReceivedMessage& message) const;
|
||||||
|
|
||||||
MixableStreamsVector _mixableStreams;
|
Streams _streams;
|
||||||
|
|
||||||
quint16 _outgoingMixedAudioSequenceNumber;
|
quint16 _outgoingMixedAudioSequenceNumber;
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,10 @@
|
||||||
#include "InjectedAudioStream.h"
|
#include "InjectedAudioStream.h"
|
||||||
#include "AudioHelpers.h"
|
#include "AudioHelpers.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
using AudioStreamVector = AudioMixerClientData::AudioStreamVector;
|
using AudioStreamVector = AudioMixerClientData::AudioStreamVector;
|
||||||
|
using MixableStream = AudioMixerClientData::MixableStream;
|
||||||
|
using MixableStreamsVector = AudioMixerClientData::MixableStreamsVector;
|
||||||
|
|
||||||
// packet helpers
|
// packet helpers
|
||||||
std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 sequence, QString codec);
|
std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 sequence, QString codec);
|
||||||
|
@ -60,11 +63,11 @@ void AudioMixerSlave::processPackets(const SharedNodePointer& node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerSlave::configureMix(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio) {
|
void AudioMixerSlave::configureMix(ConstIter begin, ConstIter end, unsigned int frame, int numToRetain) {
|
||||||
_begin = begin;
|
_begin = begin;
|
||||||
_end = end;
|
_end = end;
|
||||||
_frame = frame;
|
_frame = frame;
|
||||||
_throttlingRatio = throttlingRatio;
|
_numToRetain = numToRetain;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerSlave::mix(const SharedNodePointer& node) {
|
void AudioMixerSlave::mix(const SharedNodePointer& node) {
|
||||||
|
@ -125,18 +128,61 @@ void AudioMixerSlave::mix(const SharedNodePointer& node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename V>
|
|
||||||
bool containsNodeID(const V& vector, QUuid nodeID) {
|
template <class Container, class Predicate>
|
||||||
return std::any_of(std::begin(vector), std::end(vector), [&nodeID](const QUuid& vectorID){
|
void erase_if(Container& cont, Predicate&& pred) {
|
||||||
return vectorID == nodeID;
|
auto it = remove_if(begin(cont), end(cont), std::forward<Predicate>(pred));
|
||||||
|
cont.erase(it, end(cont));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Container>
|
||||||
|
bool contains(const Container& cont, typename Container::value_type value) {
|
||||||
|
return std::any_of(begin(cont), end(cont), [&value](const auto& element) {
|
||||||
|
return value == element;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This class lets you do an erase if in several segments
|
||||||
|
// that use different predicates
|
||||||
|
template <class Container>
|
||||||
|
class SegmentedEraseIf {
|
||||||
|
public:
|
||||||
|
using iterator = typename Container::iterator;
|
||||||
|
|
||||||
|
SegmentedEraseIf(Container& cont) : _cont(cont) {
|
||||||
|
_first = begin(_cont);
|
||||||
|
_it = _first;
|
||||||
|
}
|
||||||
|
~SegmentedEraseIf() {
|
||||||
|
assert(_it == end(_cont));
|
||||||
|
_cont.erase(_first, _it);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Predicate>
|
||||||
|
void iterateTo(iterator last, Predicate pred) {
|
||||||
|
while (_it != last) {
|
||||||
|
if (!pred(*_it)) {
|
||||||
|
if (_first != _it) {
|
||||||
|
*_first = move(*_it);
|
||||||
|
}
|
||||||
|
++_first;
|
||||||
|
}
|
||||||
|
++_it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
iterator _first;
|
||||||
|
iterator _it;
|
||||||
|
Container& _cont;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
void AudioMixerSlave::addStreams(Node& listener, AudioMixerClientData& listenerData) {
|
void AudioMixerSlave::addStreams(Node& listener, AudioMixerClientData& listenerData) {
|
||||||
auto& ignoredNodeIDs = listener.getIgnoredNodeIDs();
|
auto& ignoredNodeIDs = listener.getIgnoredNodeIDs();
|
||||||
auto& ignoringNodeIDs = listenerData.getIgnoringNodeIDs();
|
auto& ignoringNodeIDs = listenerData.getIgnoringNodeIDs();
|
||||||
|
|
||||||
auto& mixableStreams = listenerData.getMixableStreams();
|
auto& streams = listenerData.getStreams();
|
||||||
|
|
||||||
// add data for newly created streams to our vector
|
// add data for newly created streams to our vector
|
||||||
if (!listenerData.getHasReceivedFirstMix()) {
|
if (!listenerData.getHasReceivedFirstMix()) {
|
||||||
|
@ -145,12 +191,20 @@ void AudioMixerSlave::addStreams(Node& listener, AudioMixerClientData& listenerD
|
||||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
if (nodeData) {
|
if (nodeData) {
|
||||||
for (auto& stream : nodeData->getAudioStreams()) {
|
for (auto& stream : nodeData->getAudioStreams()) {
|
||||||
mixableStreams.emplace_back(node->getUUID(), node->getLocalID(),
|
bool ignoredByListener = contains(ignoredNodeIDs, node->getUUID());
|
||||||
stream->getStreamIdentifier(), &(*stream));
|
bool ignoringListener = contains(ignoringNodeIDs, node->getUUID());
|
||||||
|
|
||||||
// pre-populate ignored and ignoring flags for this stream
|
if (ignoredByListener || ignoringListener) {
|
||||||
mixableStreams.back().ignoredByListener = containsNodeID(ignoredNodeIDs, node->getUUID());
|
streams.skipped.emplace_back(node->getUUID(), node->getLocalID(),
|
||||||
mixableStreams.back().ignoringListener = containsNodeID(ignoringNodeIDs, node->getUUID());
|
stream->getStreamIdentifier(), stream.get());
|
||||||
|
|
||||||
|
// pre-populate ignored and ignoring flags for this stream
|
||||||
|
streams.skipped.back().ignoredByListener = ignoredByListener;
|
||||||
|
streams.skipped.back().ignoringListener = ignoringListener;
|
||||||
|
} else {
|
||||||
|
streams.active.emplace_back(node->getUUID(), node->getLocalID(),
|
||||||
|
stream->getStreamIdentifier(), stream.get());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -159,49 +213,94 @@ void AudioMixerSlave::addStreams(Node& listener, AudioMixerClientData& listenerD
|
||||||
listenerData.setHasReceivedFirstMix(true);
|
listenerData.setHasReceivedFirstMix(true);
|
||||||
} else {
|
} else {
|
||||||
for (const auto& newStream : _sharedData.addedStreams) {
|
for (const auto& newStream : _sharedData.addedStreams) {
|
||||||
mixableStreams.emplace_back(newStream.nodeIDStreamID, newStream.positionalStream);
|
bool ignoredByListener = contains(ignoredNodeIDs, newStream.nodeIDStreamID.nodeID);
|
||||||
|
bool ignoringListener = contains(ignoringNodeIDs, newStream.nodeIDStreamID.nodeID);
|
||||||
|
|
||||||
// pre-populate ignored and ignoring flags for this stream
|
if (ignoredByListener || ignoringListener) {
|
||||||
mixableStreams.back().ignoredByListener = containsNodeID(ignoredNodeIDs, newStream.nodeIDStreamID.nodeID);
|
streams.skipped.emplace_back(newStream.nodeIDStreamID, newStream.positionalStream);
|
||||||
mixableStreams.back().ignoringListener = containsNodeID(ignoringNodeIDs, newStream.nodeIDStreamID.nodeID);
|
|
||||||
|
// pre-populate ignored and ignoring flags for this stream
|
||||||
|
streams.skipped.back().ignoredByListener = ignoredByListener;
|
||||||
|
streams.skipped.back().ignoringListener = ignoringListener;
|
||||||
|
} else {
|
||||||
|
streams.active.emplace_back(newStream.nodeIDStreamID, newStream.positionalStream);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerSlave::removeStreams(AudioMixerClientData::MixableStreamsVector& mixableStreams) {
|
bool shouldBeRemoved(const MixableStream& stream, const AudioMixerSlave::SharedData& sharedData) {
|
||||||
if (_sharedData.removedNodes.size() > 0) {
|
return (contains(sharedData.removedNodes, stream.nodeStreamID.nodeLocalID) ||
|
||||||
// enumerate the available streams
|
contains(sharedData.removedStreams, stream.nodeStreamID));
|
||||||
auto it = mixableStreams.begin();
|
};
|
||||||
auto end = mixableStreams.end();
|
|
||||||
|
|
||||||
while (it != end) {
|
bool shouldBeInactive(MixableStream& stream) {
|
||||||
// check if this node (and therefore all of the node's streams) has been removed
|
return (!stream.positionalStream->lastPopSucceeded() ||
|
||||||
auto& nodeIDStreamID = it->nodeStreamID;
|
stream.positionalStream->getLastPopOutputLoudness() == 0.0f);
|
||||||
auto matchedRemovedNode = std::find(_sharedData.removedNodes.cbegin(), _sharedData.removedNodes.cend(),
|
};
|
||||||
nodeIDStreamID.nodeLocalID);
|
|
||||||
bool streamRemoved = matchedRemovedNode != _sharedData.removedNodes.cend();
|
|
||||||
|
|
||||||
// if the node wasn't removed, check if this stream was specifically removed
|
bool shouldBeSkipped(MixableStream& stream, const Node& listener,
|
||||||
if (!streamRemoved) {
|
const AvatarAudioStream& listenerAudioStream,
|
||||||
auto matchedRemovedStream = std::find(_sharedData.removedStreams.cbegin(), _sharedData.removedStreams.cend(),
|
const AudioMixerClientData& listenerData) {
|
||||||
nodeIDStreamID);
|
|
||||||
streamRemoved = matchedRemovedStream != _sharedData.removedStreams.cend();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (streamRemoved) {
|
if (stream.nodeStreamID.nodeLocalID == listener.getLocalID()) {
|
||||||
// this stream was removed, so swap it with the last item and decrease the end iterator
|
return !stream.positionalStream->shouldLoopbackForNode();
|
||||||
--end;
|
|
||||||
std::swap(*it, *end);
|
|
||||||
|
|
||||||
// process the it element (which is now the element that was the last item before the swap)
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// erase any removed streams that were swapped to the end
|
|
||||||
mixableStreams.erase(end, mixableStreams.end());
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
// grab the unprocessed ignores and unignores from and for this listener
|
||||||
|
const auto& nodesIgnoredByListener = listenerData.getNewIgnoredNodeIDs();
|
||||||
|
const auto& nodesUnignoredByListener = listenerData.getNewUnignoredNodeIDs();
|
||||||
|
const auto& nodesIgnoringListener = listenerData.getNewIgnoringNodeIDs();
|
||||||
|
const auto& nodesUnignoringListener = listenerData.getNewUnignoringNodeIDs();
|
||||||
|
|
||||||
|
// this stream was previously not ignored by the listener and we have some newly ignored streams
|
||||||
|
// check now if it is one of the ignored streams and flag it as such
|
||||||
|
if (stream.ignoredByListener) {
|
||||||
|
stream.ignoredByListener = !contains(nodesUnignoredByListener, stream.nodeStreamID.nodeID);
|
||||||
|
} else {
|
||||||
|
stream.ignoredByListener = contains(nodesIgnoredByListener, stream.nodeStreamID.nodeID);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream.ignoringListener) {
|
||||||
|
stream.ignoringListener = !contains(nodesUnignoringListener, stream.nodeStreamID.nodeID);
|
||||||
|
} else {
|
||||||
|
stream.ignoringListener = contains(nodesIgnoringListener, stream.nodeStreamID.nodeID);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool listenerIsAdmin = listenerData.getRequestsDomainListData() && listener.getCanKick();
|
||||||
|
if (stream.ignoredByListener || (stream.ignoringListener && !listenerIsAdmin)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool shouldCheckIgnoreBox = (listenerAudioStream.isIgnoreBoxEnabled() ||
|
||||||
|
stream.positionalStream->isIgnoreBoxEnabled());
|
||||||
|
if (shouldCheckIgnoreBox &&
|
||||||
|
listenerAudioStream.getIgnoreBox().touches(stream.positionalStream->getIgnoreBox())) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
float approximateVolume(const MixableStream& stream, const AvatarAudioStream* listenerAudioStream) {
|
||||||
|
if (stream.positionalStream->getLastPopOutputTrailingLoudness() == 0.0f) {
|
||||||
|
return 0.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream.positionalStream == listenerAudioStream) {
|
||||||
|
return 1.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
// approximate the gain
|
||||||
|
float gain = approximateGain(*listenerAudioStream, *(stream.positionalStream));
|
||||||
|
|
||||||
|
// for avatar streams, modify by the set gain adjustment
|
||||||
|
if (stream.nodeStreamID.streamID.isNull()) {
|
||||||
|
gain *= stream.hrtf->getGainAdjustment();
|
||||||
|
}
|
||||||
|
|
||||||
|
return stream.positionalStream->getLastPopOutputTrailingLoudness() * gain;
|
||||||
|
};
|
||||||
|
|
||||||
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
|
AvatarAudioStream* listenerAudioStream = static_cast<AudioMixerClientData*>(listener->getLinkedData())->getAvatarAudioStream();
|
||||||
|
@ -210,128 +309,154 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
// zero out the mix for this listener
|
// zero out the mix for this listener
|
||||||
memset(_mixSamples, 0, sizeof(_mixSamples));
|
memset(_mixSamples, 0, sizeof(_mixSamples));
|
||||||
|
|
||||||
bool isThrottling = _throttlingRatio > 0.0f;
|
bool isThrottling = _numToRetain != -1;
|
||||||
|
|
||||||
auto nodeList = DependencyManager::get<NodeList>();
|
auto& streams = listenerData->getStreams();
|
||||||
|
|
||||||
auto& mixableStreams = listenerData->getMixableStreams();
|
|
||||||
|
|
||||||
// grab the unprocessed ignores and unignores from and for this listener
|
|
||||||
const auto& nodesIgnoredByListener = listenerData->getNewIgnoredNodeIDs();
|
|
||||||
const auto& nodesUnignoredByListener = listenerData->getNewUnignoredNodeIDs();
|
|
||||||
const auto& nodesIgnoringListener = listenerData->getNewIgnoringNodeIDs();
|
|
||||||
const auto& nodesUnignoringListener = listenerData->getNewUnignoringNodeIDs();
|
|
||||||
|
|
||||||
removeStreams(mixableStreams);
|
|
||||||
addStreams(*listener, *listenerData);
|
addStreams(*listener, *listenerData);
|
||||||
|
|
||||||
// enumerate the available streams
|
// Process skipped streams
|
||||||
auto it = mixableStreams.begin();
|
erase_if(streams.skipped, [&](MixableStream& stream) {
|
||||||
while (it != mixableStreams.end()) {
|
if (shouldBeRemoved(stream, _sharedData)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
auto& nodeIDStreamID = it->nodeStreamID;
|
if (!shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
|
||||||
|
if (shouldBeInactive(stream)) {
|
||||||
if (it->nodeStreamID.nodeLocalID == listener->getLocalID()) {
|
streams.inactive.push_back(move(stream));
|
||||||
// streams from this node should be skipped unless loopback is specifically requested
|
++stats.skippedToInactive;
|
||||||
if (it->positionalStream->shouldLoopbackForNode()) {
|
|
||||||
it->skippedStream = false;
|
|
||||||
} else {
|
} else {
|
||||||
it->approximateVolume = 0.0f;
|
streams.active.push_back(move(stream));
|
||||||
it->skippedStream = true;
|
++stats.skippedToActive;
|
||||||
it->completedSilentRender = true;
|
|
||||||
|
|
||||||
// if we know we're skipping this stream, no more processing is required
|
|
||||||
// since we don't do silent HRTF renders for echo streams
|
|
||||||
++it;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (it->ignoredByListener && nodesUnignoredByListener.size() > 0) {
|
|
||||||
// this stream was previously ignored by the listener and we have some unignored streams
|
|
||||||
// check now if it is one of the unignored streams and flag it as such
|
|
||||||
it->ignoredByListener = !containsNodeID(nodesUnignoredByListener, nodeIDStreamID.nodeID);
|
|
||||||
|
|
||||||
} else if (!it->ignoredByListener && nodesIgnoredByListener.size() > 0) {
|
|
||||||
// this stream was previously not ignored by the listener and we have some newly ignored streams
|
|
||||||
// check now if it is one of the ignored streams and flag it as such
|
|
||||||
it->ignoredByListener = containsNodeID(nodesIgnoredByListener, nodeIDStreamID.nodeID);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it->ignoringListener && nodesUnignoringListener.size() > 0) {
|
|
||||||
// this stream was previously ignoring the listener and we have some new un-ignoring nodes
|
|
||||||
// check now if it is one of the unignoring streams and flag it as such
|
|
||||||
it->ignoringListener = !containsNodeID(nodesUnignoringListener, nodeIDStreamID.nodeID);
|
|
||||||
} else if (!it->ignoringListener && nodesIgnoringListener.size() > 0) {
|
|
||||||
it->ignoringListener = containsNodeID(nodesIgnoringListener, nodeIDStreamID.nodeID);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it->ignoredByListener
|
|
||||||
|| (it->ignoringListener && !(listenerData->getRequestsDomainListData() && listener->getCanKick()))) {
|
|
||||||
// this is a stream ignoring by the listener
|
|
||||||
// or ignoring the listener (and the listener is not an admin asking for (the poorly named) "domain list" data)
|
|
||||||
// mark it skipped and move on
|
|
||||||
it->skippedStream = true;
|
|
||||||
} else {
|
|
||||||
it->skippedStream = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!it->skippedStream) {
|
|
||||||
if ((listenerAudioStream->isIgnoreBoxEnabled() || it->positionalStream->isIgnoreBoxEnabled())
|
|
||||||
&& listenerAudioStream->getIgnoreBox().touches(it->positionalStream->getIgnoreBox())) {
|
|
||||||
// the listener is ignoring audio sources within a radius, and this source is in that radius
|
|
||||||
// so we mark it skipped
|
|
||||||
it->skippedStream = true;
|
|
||||||
} else {
|
|
||||||
it->skippedStream = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isThrottling) {
|
if (!isThrottling) {
|
||||||
// we aren't throttling, so we already know that we can add this stream to the mix
|
updateHRTFParameters(stream, *listenerAudioStream,
|
||||||
addStream(*it, *listenerAudioStream, listenerData->getMasterAvatarGain(), false);
|
listenerData->getMasterAvatarGain());
|
||||||
} else {
|
}
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process inactive streams
|
||||||
|
erase_if(streams.inactive, [&](MixableStream& stream) {
|
||||||
|
if (shouldBeRemoved(stream, _sharedData)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
|
||||||
|
streams.skipped.push_back(move(stream));
|
||||||
|
++stats.inactiveToSkipped;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!shouldBeInactive(stream)) {
|
||||||
|
streams.active.push_back(move(stream));
|
||||||
|
++stats.inactiveToActive;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isThrottling) {
|
||||||
|
updateHRTFParameters(stream, *listenerAudioStream,
|
||||||
|
listenerData->getMasterAvatarGain());
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Process active streams
|
||||||
|
erase_if(streams.active, [&](MixableStream& stream) {
|
||||||
|
if (shouldBeRemoved(stream, _sharedData)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isThrottling) {
|
||||||
// we're throttling, so we need to update the approximate volume for any un-skipped streams
|
// we're throttling, so we need to update the approximate volume for any un-skipped streams
|
||||||
// unless this is simply for an echo (in which case the approx volume is 1.0)
|
// unless this is simply for an echo (in which case the approx volume is 1.0)
|
||||||
if (!it->skippedStream && it->positionalStream->getLastPopOutputTrailingLoudness() > 0.0f) {
|
stream.approximateVolume = approximateVolume(stream, listenerAudioStream);
|
||||||
if (it->positionalStream != listenerAudioStream) {
|
} else {
|
||||||
// approximate the gain
|
if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
|
||||||
float gain = approximateGain(*listenerAudioStream, *(it->positionalStream));
|
addStream(stream, *listenerAudioStream, 0.0f);
|
||||||
|
streams.skipped.push_back(move(stream));
|
||||||
|
++stats.activeToSkipped;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// for avatar streams, modify by the set gain adjustment
|
addStream(stream, *listenerAudioStream, listenerData->getMasterAvatarGain());
|
||||||
if (nodeIDStreamID.streamID.isNull()) {
|
|
||||||
gain *= it->hrtf->getGainAdjustment();
|
|
||||||
}
|
|
||||||
|
|
||||||
it->approximateVolume = it->positionalStream->getLastPopOutputTrailingLoudness() * gain;
|
if (shouldBeInactive(stream)) {
|
||||||
} else {
|
// To reduce artifacts we still call render to flush the HRTF for every silent
|
||||||
it->approximateVolume = 1.0f;
|
// sources on the first frame where the source becomes silent
|
||||||
}
|
// this ensures the correct tail from last mixed block
|
||||||
} else {
|
streams.inactive.push_back(move(stream));
|
||||||
it->approximateVolume = 0.0f;
|
++stats.activeToInactive;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
++it;
|
return false;
|
||||||
}
|
});
|
||||||
|
|
||||||
if (isThrottling) {
|
if (isThrottling) {
|
||||||
// since we're throttling, we need to partition the mixable into throttled and unthrottled streams
|
// since we're throttling, we need to partition the mixable into throttled and unthrottled streams
|
||||||
auto numToRetain = std::distance(_begin, _end) * (1 - _throttlingRatio);
|
int numToRetain = min(_numToRetain, (int)streams.active.size()); // Make sure we don't overflow
|
||||||
auto throttlePoint = mixableStreams.begin() + numToRetain;
|
auto throttlePoint = begin(streams.active) + numToRetain;
|
||||||
|
|
||||||
std::nth_element(mixableStreams.begin(), throttlePoint, mixableStreams.end(),
|
std::nth_element(streams.active.begin(), throttlePoint, streams.active.end(),
|
||||||
[](const auto& a, const auto& b)
|
[](const auto& a, const auto& b)
|
||||||
{
|
{
|
||||||
return a.approximateVolume > b.approximateVolume;
|
return a.approximateVolume > b.approximateVolume;
|
||||||
});
|
});
|
||||||
|
|
||||||
for (auto it = mixableStreams.begin(); it != mixableStreams.end(); ++it) {
|
SegmentedEraseIf<MixableStreamsVector> erase(streams.active);
|
||||||
// add this stream, it is throttled if it is at or past the throttle iterator in the vector
|
erase.iterateTo(throttlePoint, [&](MixableStream& stream) {
|
||||||
addStream(*it, *listenerAudioStream, listenerData->getMasterAvatarGain(), it >= throttlePoint);
|
if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
|
||||||
}
|
resetHRTFState(stream);
|
||||||
|
streams.skipped.push_back(move(stream));
|
||||||
|
++stats.activeToSkipped;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
addStream(stream, *listenerAudioStream, listenerData->getMasterAvatarGain());
|
||||||
|
|
||||||
|
if (shouldBeInactive(stream)) {
|
||||||
|
// To reduce artifacts we still call render to flush the HRTF for every silent
|
||||||
|
// sources on the first frame where the source becomes silent
|
||||||
|
// this ensures the correct tail from last mixed block
|
||||||
|
streams.inactive.push_back(move(stream));
|
||||||
|
++stats.activeToInactive;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
erase.iterateTo(end(streams.active), [&](MixableStream& stream) {
|
||||||
|
// To reduce artifacts we reset the HRTF state for every throttled
|
||||||
|
// sources on the first frame where the source becomes throttled
|
||||||
|
// this ensures at least remove the tail from last mixed block
|
||||||
|
// preventing excessive artifacts on the next first block
|
||||||
|
resetHRTFState(stream);
|
||||||
|
|
||||||
|
if (shouldBeSkipped(stream, *listener, *listenerAudioStream, *listenerData)) {
|
||||||
|
streams.skipped.push_back(move(stream));
|
||||||
|
++stats.activeToSkipped;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (shouldBeInactive(stream)) {
|
||||||
|
streams.inactive.push_back(move(stream));
|
||||||
|
++stats.activeToInactive;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stats.skipped += streams.skipped.size();
|
||||||
|
stats.inactive += streams.inactive.size();
|
||||||
|
stats.active += streams.active.size();
|
||||||
|
|
||||||
// clear the newly ignored, un-ignored, ignoring, and un-ignoring streams now that we've processed them
|
// clear the newly ignored, un-ignored, ignoring, and un-ignoring streams now that we've processed them
|
||||||
listenerData->clearStagedIgnoreChanges();
|
listenerData->clearStagedIgnoreChanges();
|
||||||
|
|
||||||
|
@ -357,41 +482,13 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
return hasAudio;
|
return hasAudio;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStream, AvatarAudioStream& listeningNodeStream,
|
void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStream,
|
||||||
float masterListenerGain, bool throttle) {
|
AvatarAudioStream& listeningNodeStream,
|
||||||
|
float masterListenerGain) {
|
||||||
if (mixableStream.skippedStream) {
|
|
||||||
// any skipped stream gets no processing and no silent render - early return
|
|
||||||
++stats.skippedOther;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
++stats.totalMixes;
|
++stats.totalMixes;
|
||||||
|
|
||||||
auto streamToAdd = mixableStream.positionalStream;
|
auto streamToAdd = mixableStream.positionalStream;
|
||||||
|
|
||||||
// to reduce artifacts we still call the HRTF functor for every silent or throttled source
|
|
||||||
// for the first frame where the source becomes throttled or silent
|
|
||||||
// this ensures the correct tail from last mixed block and the correct spatialization of next first block
|
|
||||||
if (throttle || streamToAdd->getLastPopOutputLoudness() == 0.0f) {
|
|
||||||
if (mixableStream.completedSilentRender) {
|
|
||||||
|
|
||||||
if (streamToAdd->getLastPopOutputLoudness() == 0.0f) {
|
|
||||||
++stats.skippedSilent;
|
|
||||||
} else {
|
|
||||||
++stats.skippedThrottle;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
mixableStream.completedSilentRender = true;
|
|
||||||
}
|
|
||||||
} else if (mixableStream.completedSilentRender) {
|
|
||||||
// a stream that is no longer throttled or silent should have its silent render flag reset to false
|
|
||||||
// so that we complete a silent render for the stream next time it is throttled or otherwise goes silent
|
|
||||||
mixableStream.completedSilentRender = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if this is a server echo of a source back to itself
|
// check if this is a server echo of a source back to itself
|
||||||
bool isEcho = (streamToAdd == &listeningNodeStream);
|
bool isEcho = (streamToAdd == &listeningNodeStream);
|
||||||
|
|
||||||
|
@ -400,6 +497,7 @@ void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStre
|
||||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||||
float gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho);
|
float gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho);
|
||||||
float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
|
float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
|
||||||
|
|
||||||
const int HRTF_DATASET_INDEX = 1;
|
const int HRTF_DATASET_INDEX = 1;
|
||||||
|
|
||||||
if (!streamToAdd->lastPopSucceeded()) {
|
if (!streamToAdd->lastPopSucceeded()) {
|
||||||
|
@ -426,10 +524,10 @@ void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStre
|
||||||
// (this is not done for stereo streams since they do not go through the HRTF)
|
// (this is not done for stereo streams since they do not go through the HRTF)
|
||||||
if (!streamToAdd->isStereo() && !isEcho) {
|
if (!streamToAdd->isStereo() && !isEcho) {
|
||||||
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
|
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
|
||||||
mixableStream.hrtf->renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
mixableStream.hrtf->render(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
++stats.hrtfSilentRenders;
|
++stats.hrtfRenders;
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -453,11 +551,8 @@ void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStre
|
||||||
}
|
}
|
||||||
|
|
||||||
++stats.manualStereoMixes;
|
++stats.manualStereoMixes;
|
||||||
return;
|
} else if (isEcho) {
|
||||||
}
|
// echo sources are not passed through HRTF
|
||||||
|
|
||||||
// echo sources are not passed through HRTF
|
|
||||||
if (isEcho) {
|
|
||||||
|
|
||||||
const float scale = 1/32768.0f; // int16_t to float
|
const float scale = 1/32768.0f; // int16_t to float
|
||||||
|
|
||||||
|
@ -468,34 +563,38 @@ void AudioMixerSlave::addStream(AudioMixerClientData::MixableStream& mixableStre
|
||||||
}
|
}
|
||||||
|
|
||||||
++stats.manualEchoMixes;
|
++stats.manualEchoMixes;
|
||||||
return;
|
} else {
|
||||||
|
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
|
mixableStream.hrtf->render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||||
|
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||||
|
|
||||||
|
++stats.hrtfRenders;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioMixerSlave::updateHRTFParameters(AudioMixerClientData::MixableStream& mixableStream,
|
||||||
|
AvatarAudioStream& listeningNodeStream,
|
||||||
|
float masterListenerGain) {
|
||||||
|
auto streamToAdd = mixableStream.positionalStream;
|
||||||
|
|
||||||
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
// check if this is a server echo of a source back to itself
|
||||||
|
bool isEcho = (streamToAdd == &listeningNodeStream);
|
||||||
|
|
||||||
if (streamToAdd->getLastPopOutputLoudness() == 0.0f) {
|
glm::vec3 relativePosition = streamToAdd->getPosition() - listeningNodeStream.getPosition();
|
||||||
// call renderSilent to reduce artifacts
|
|
||||||
mixableStream.hrtf->renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
|
||||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
|
||||||
|
|
||||||
++stats.hrtfSilentRenders;
|
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||||
return;
|
float gain = computeGain(masterListenerGain, listeningNodeStream, *streamToAdd, relativePosition, distance, isEcho);
|
||||||
}
|
float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
|
||||||
|
|
||||||
if (throttle) {
|
mixableStream.hrtf->setParameterHistory(azimuth, distance, gain);
|
||||||
// call renderSilent with actual frame data and a gain of 0.0f to reduce artifacts
|
|
||||||
mixableStream.hrtf->renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
|
|
||||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
|
||||||
|
|
||||||
++stats.hrtfThrottleRenders;
|
++stats.hrtfUpdates;
|
||||||
return;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
mixableStream.hrtf->render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
void AudioMixerSlave::resetHRTFState(AudioMixerClientData::MixableStream& mixableStream) {
|
||||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
mixableStream.hrtf->reset();
|
||||||
|
++stats.hrtfResets;
|
||||||
++stats.hrtfRenders;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 sequence, QString codec) {
|
std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 sequence, QString codec) {
|
||||||
|
|
|
@ -44,7 +44,7 @@ public:
|
||||||
void processPackets(const SharedNodePointer& node);
|
void processPackets(const SharedNodePointer& node);
|
||||||
|
|
||||||
// configure a round of mixing
|
// configure a round of mixing
|
||||||
void configureMix(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio);
|
void configureMix(ConstIter begin, ConstIter end, unsigned int frame, int numToRetain);
|
||||||
|
|
||||||
// mix and broadcast non-ignored streams to the node (requires configuration using configureMix, above)
|
// mix and broadcast non-ignored streams to the node (requires configuration using configureMix, above)
|
||||||
// returns true if a mixed packet was sent to the node
|
// returns true if a mixed packet was sent to the node
|
||||||
|
@ -55,10 +55,14 @@ public:
|
||||||
private:
|
private:
|
||||||
// create mix, returns true if mix has audio
|
// create mix, returns true if mix has audio
|
||||||
bool prepareMix(const SharedNodePointer& listener);
|
bool prepareMix(const SharedNodePointer& listener);
|
||||||
void addStream(AudioMixerClientData::MixableStream& mixableStream, AvatarAudioStream& listeningNodeStream,
|
void addStream(AudioMixerClientData::MixableStream& mixableStream,
|
||||||
float masterListenerGain, bool throttle);
|
AvatarAudioStream& listeningNodeStream,
|
||||||
|
float masterListenerGain);
|
||||||
|
void updateHRTFParameters(AudioMixerClientData::MixableStream& mixableStream,
|
||||||
|
AvatarAudioStream& listeningNodeStream,
|
||||||
|
float masterListenerGain);
|
||||||
|
void resetHRTFState(AudioMixerClientData::MixableStream& mixableStream);
|
||||||
|
|
||||||
void removeStreams(AudioMixerClientData::MixableStreamsVector& mixableStreams);
|
|
||||||
void addStreams(Node& listener, AudioMixerClientData& listenerData);
|
void addStreams(Node& listener, AudioMixerClientData& listenerData);
|
||||||
|
|
||||||
// mixing buffers
|
// mixing buffers
|
||||||
|
@ -69,7 +73,7 @@ private:
|
||||||
ConstIter _begin;
|
ConstIter _begin;
|
||||||
ConstIter _end;
|
ConstIter _end;
|
||||||
unsigned int _frame { 0 };
|
unsigned int _frame { 0 };
|
||||||
float _throttlingRatio { 0.0f };
|
int _numToRetain { -1 };
|
||||||
|
|
||||||
SharedData& _sharedData;
|
SharedData& _sharedData;
|
||||||
};
|
};
|
||||||
|
|
|
@ -74,13 +74,11 @@ void AudioMixerSlavePool::processPackets(ConstIter begin, ConstIter end) {
|
||||||
run(begin, end);
|
run(begin, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerSlavePool::mix(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio) {
|
void AudioMixerSlavePool::mix(ConstIter begin, ConstIter end, unsigned int frame, int numToRetain) {
|
||||||
_function = &AudioMixerSlave::mix;
|
_function = &AudioMixerSlave::mix;
|
||||||
_configure = [=](AudioMixerSlave& slave) {
|
_configure = [=](AudioMixerSlave& slave) {
|
||||||
slave.configureMix(_begin, _end, _frame, _throttlingRatio);
|
slave.configureMix(_begin, _end, frame, numToRetain);
|
||||||
};
|
};
|
||||||
_frame = frame;
|
|
||||||
_throttlingRatio = throttlingRatio;
|
|
||||||
|
|
||||||
run(begin, end);
|
run(begin, end);
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ public:
|
||||||
void processPackets(ConstIter begin, ConstIter end);
|
void processPackets(ConstIter begin, ConstIter end);
|
||||||
|
|
||||||
// mix on slave threads
|
// mix on slave threads
|
||||||
void mix(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio);
|
void mix(ConstIter begin, ConstIter end, unsigned int frame, int numToRetain);
|
||||||
|
|
||||||
// iterate over all slaves
|
// iterate over all slaves
|
||||||
void each(std::function<void(AudioMixerSlave& slave)> functor);
|
void each(std::function<void(AudioMixerSlave& slave)> functor);
|
||||||
|
@ -98,8 +98,6 @@ private:
|
||||||
|
|
||||||
// frame state
|
// frame state
|
||||||
Queue _queue;
|
Queue _queue;
|
||||||
unsigned int _frame { 0 };
|
|
||||||
float _throttlingRatio { 0.0f };
|
|
||||||
ConstIter _begin;
|
ConstIter _begin;
|
||||||
ConstIter _end;
|
ConstIter _end;
|
||||||
|
|
||||||
|
|
|
@ -15,15 +15,27 @@ void AudioMixerStats::reset() {
|
||||||
sumStreams = 0;
|
sumStreams = 0;
|
||||||
sumListeners = 0;
|
sumListeners = 0;
|
||||||
sumListenersSilent = 0;
|
sumListenersSilent = 0;
|
||||||
|
|
||||||
totalMixes = 0;
|
totalMixes = 0;
|
||||||
|
|
||||||
hrtfRenders = 0;
|
hrtfRenders = 0;
|
||||||
hrtfSilentRenders = 0;
|
hrtfResets = 0;
|
||||||
hrtfThrottleRenders = 0;
|
hrtfUpdates = 0;
|
||||||
|
|
||||||
manualStereoMixes = 0;
|
manualStereoMixes = 0;
|
||||||
manualEchoMixes = 0;
|
manualEchoMixes = 0;
|
||||||
skippedThrottle = 0;
|
|
||||||
skippedSilent = 0;
|
skippedToActive = 0;
|
||||||
skippedOther = 0;
|
skippedToInactive = 0;
|
||||||
|
inactiveToSkipped = 0;
|
||||||
|
inactiveToActive = 0;
|
||||||
|
activeToSkipped = 0;
|
||||||
|
activeToInactive = 0;
|
||||||
|
|
||||||
|
skipped = 0;
|
||||||
|
inactive = 0;
|
||||||
|
active = 0;
|
||||||
|
|
||||||
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
||||||
mixTime = 0;
|
mixTime = 0;
|
||||||
#endif
|
#endif
|
||||||
|
@ -33,15 +45,26 @@ void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
|
||||||
sumStreams += otherStats.sumStreams;
|
sumStreams += otherStats.sumStreams;
|
||||||
sumListeners += otherStats.sumListeners;
|
sumListeners += otherStats.sumListeners;
|
||||||
sumListenersSilent += otherStats.sumListenersSilent;
|
sumListenersSilent += otherStats.sumListenersSilent;
|
||||||
|
|
||||||
totalMixes += otherStats.totalMixes;
|
totalMixes += otherStats.totalMixes;
|
||||||
|
|
||||||
hrtfRenders += otherStats.hrtfRenders;
|
hrtfRenders += otherStats.hrtfRenders;
|
||||||
hrtfSilentRenders += otherStats.hrtfSilentRenders;
|
hrtfResets += otherStats.hrtfResets;
|
||||||
hrtfThrottleRenders += otherStats.hrtfThrottleRenders;
|
hrtfUpdates += otherStats.hrtfUpdates;
|
||||||
|
|
||||||
manualStereoMixes += otherStats.manualStereoMixes;
|
manualStereoMixes += otherStats.manualStereoMixes;
|
||||||
manualEchoMixes += otherStats.manualEchoMixes;
|
manualEchoMixes += otherStats.manualEchoMixes;
|
||||||
skippedThrottle += otherStats.skippedThrottle;
|
|
||||||
skippedSilent += otherStats.skippedSilent;
|
skippedToActive += otherStats.skippedToActive;
|
||||||
skippedOther += otherStats.skippedOther;
|
skippedToInactive += otherStats.skippedToInactive;
|
||||||
|
inactiveToSkipped += otherStats.inactiveToSkipped;
|
||||||
|
inactiveToActive += otherStats.inactiveToActive;
|
||||||
|
activeToSkipped += otherStats.activeToSkipped;
|
||||||
|
activeToInactive += otherStats.activeToInactive;
|
||||||
|
|
||||||
|
skipped += otherStats.skipped;
|
||||||
|
inactive += otherStats.inactive;
|
||||||
|
active += otherStats.active;
|
||||||
|
|
||||||
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
||||||
mixTime += otherStats.mixTime;
|
mixTime += otherStats.mixTime;
|
||||||
|
|
|
@ -24,15 +24,22 @@ struct AudioMixerStats {
|
||||||
int totalMixes { 0 };
|
int totalMixes { 0 };
|
||||||
|
|
||||||
int hrtfRenders { 0 };
|
int hrtfRenders { 0 };
|
||||||
int hrtfSilentRenders { 0 };
|
int hrtfResets { 0 };
|
||||||
int hrtfThrottleRenders { 0 };
|
int hrtfUpdates { 0 };
|
||||||
|
|
||||||
int manualStereoMixes { 0 };
|
int manualStereoMixes { 0 };
|
||||||
int manualEchoMixes { 0 };
|
int manualEchoMixes { 0 };
|
||||||
|
|
||||||
int skippedThrottle { 0 };
|
int skippedToActive { 0 };
|
||||||
int skippedSilent { 0 };
|
int skippedToInactive { 0 };
|
||||||
int skippedOther { 0 };
|
int inactiveToSkipped { 0 };
|
||||||
|
int inactiveToActive { 0 };
|
||||||
|
int activeToSkipped { 0 };
|
||||||
|
int activeToInactive { 0 };
|
||||||
|
|
||||||
|
int skipped { 0 };
|
||||||
|
int inactive { 0 };
|
||||||
|
int active { 0 };
|
||||||
|
|
||||||
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
#ifdef HIFI_AUDIO_MIXER_DEBUG
|
||||||
uint64_t mixTime { 0 };
|
uint64_t mixTime { 0 };
|
||||||
|
|
|
@ -1173,20 +1173,5 @@ void AudioHRTF::render(int16_t* input, float* output, int index, float azimuth,
|
||||||
// crossfade old/new output and accumulate
|
// crossfade old/new output and accumulate
|
||||||
crossfade_4x2(bqBuffer, output, crossfadeTable, HRTF_BLOCK);
|
crossfade_4x2(bqBuffer, output, crossfadeTable, HRTF_BLOCK);
|
||||||
|
|
||||||
_silentState = false;
|
_resetState = false;
|
||||||
}
|
|
||||||
|
|
||||||
void AudioHRTF::renderSilent(int16_t* input, float* output, int index, float azimuth, float distance, float gain, int numFrames) {
|
|
||||||
|
|
||||||
// process the first silent block, to flush internal state
|
|
||||||
if (!_silentState) {
|
|
||||||
render(input, output, index, azimuth, distance, gain, numFrames);
|
|
||||||
}
|
|
||||||
|
|
||||||
// new parameters become old
|
|
||||||
_azimuthState = azimuth;
|
|
||||||
_distanceState = distance;
|
|
||||||
_gainState = gain;
|
|
||||||
|
|
||||||
_silentState = true;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,9 +47,14 @@ public:
|
||||||
void render(int16_t* input, float* output, int index, float azimuth, float distance, float gain, int numFrames);
|
void render(int16_t* input, float* output, int index, float azimuth, float distance, float gain, int numFrames);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Fast path when input is known to be silent
|
// Fast path when input is known to be silent and state as been flushed
|
||||||
//
|
//
|
||||||
void renderSilent(int16_t* input, float* output, int index, float azimuth, float distance, float gain, int numFrames);
|
void setParameterHistory(float azimuth, float distance, float gain) {
|
||||||
|
// new parameters become old
|
||||||
|
_azimuthState = azimuth;
|
||||||
|
_distanceState = distance;
|
||||||
|
_gainState = gain;
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// HRTF local gain adjustment in amplitude (1.0 == unity)
|
// HRTF local gain adjustment in amplitude (1.0 == unity)
|
||||||
|
@ -59,23 +64,25 @@ public:
|
||||||
|
|
||||||
// clear internal state, but retain settings
|
// clear internal state, but retain settings
|
||||||
void reset() {
|
void reset() {
|
||||||
// FIR history
|
if (!_resetState) {
|
||||||
memset(_firState, 0, sizeof(_firState));
|
// FIR history
|
||||||
|
memset(_firState, 0, sizeof(_firState));
|
||||||
|
|
||||||
// integer delay history
|
// integer delay history
|
||||||
memset(_delayState, 0, sizeof(_delayState));
|
memset(_delayState, 0, sizeof(_delayState));
|
||||||
|
|
||||||
// biquad history
|
// biquad history
|
||||||
memset(_bqState, 0, sizeof(_bqState));
|
memset(_bqState, 0, sizeof(_bqState));
|
||||||
|
|
||||||
// parameter history
|
// parameter history
|
||||||
_azimuthState = 0.0f;
|
_azimuthState = 0.0f;
|
||||||
_distanceState = 0.0f;
|
_distanceState = 0.0f;
|
||||||
_gainState = 0.0f;
|
_gainState = 0.0f;
|
||||||
|
|
||||||
// _gainAdjust is retained
|
// _gainAdjust is retained
|
||||||
|
|
||||||
_silentState = true;
|
_resetState = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -110,7 +117,7 @@ private:
|
||||||
// global and local gain adjustment
|
// global and local gain adjustment
|
||||||
float _gainAdjust = HRTF_GAIN;
|
float _gainAdjust = HRTF_GAIN;
|
||||||
|
|
||||||
bool _silentState = true;
|
bool _resetState = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // AudioHRTF_h
|
#endif // AudioHRTF_h
|
||||||
|
|
Loading…
Reference in a new issue