Merge pull request #9623 from zzmp/audio/should-ignore

Optimize ignore zone checks for audio mixer
This commit is contained in:
Ken Cooke 2017-02-14 06:18:54 -08:00 committed by GitHub
commit bf077e6fdb
6 changed files with 178 additions and 88 deletions

View file

@ -191,8 +191,7 @@ void AudioMixer::handleNodeKilled(SharedNodePointer killedNode) {
nodeList->eachNode([&killedNode](const SharedNodePointer& node) {
auto clientData = dynamic_cast<AudioMixerClientData*>(node->getLinkedData());
if (clientData) {
QUuid killedUUID = killedNode->getUUID();
clientData->removeHRTFsForNode(killedUUID);
clientData->removeNode(killedNode->getUUID());
}
});
}
@ -325,8 +324,8 @@ void AudioMixer::sendStatsPacket() {
addTiming(_mixTiming, "mix");
addTiming(_eventsTiming, "events");
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
timingStats["ns_per_throttle"] = (_stats.totalMixes > 0) ? (float)(_stats.throttleTime / _stats.totalMixes) : 0;
#ifdef HIFI_AUDIO_MIXER_DEBUG
timingStats["ns_per_mix"] = (_stats.totalMixes > 0) ? (float)(_stats.mixTime / _stats.totalMixes) : 0;
#endif
// call it "avg_..." to keep it higher in the display, sorted alphabetically

View file

@ -26,6 +26,7 @@
AudioMixerClientData::AudioMixerClientData(const QUuid& nodeID) :
NodeData(nodeID),
audioLimiter(AudioConstants::SAMPLE_RATE, AudioConstants::STEREO),
_ignoreZone(*this),
_outgoingMixedAudioSequenceNumber(0),
_downstreamAudioStreamStats()
{
@ -427,3 +428,99 @@ void AudioMixerClientData::cleanupCodec() {
}
}
}
AudioMixerClientData::IgnoreZone& AudioMixerClientData::IgnoreZoneMemo::get(unsigned int frame) {
// check for a memoized zone
if (frame != _frame.load(std::memory_order_acquire)) {
AvatarAudioStream* stream = _data.getAvatarAudioStream();
// get the initial dimensions from the stream
glm::vec3 corner = stream ? stream->getAvatarBoundingBoxCorner() : glm::vec3(0);
glm::vec3 scale = stream ? stream->getAvatarBoundingBoxScale() : glm::vec3(0);
// enforce a minimum scale
static const glm::vec3 MIN_IGNORE_BOX_SCALE = glm::vec3(0.3f, 1.3f, 0.3f);
if (glm::any(glm::lessThan(scale, MIN_IGNORE_BOX_SCALE))) {
scale = MIN_IGNORE_BOX_SCALE;
}
// quadruple the scale (this is arbitrary number chosen for comfort)
const float IGNORE_BOX_SCALE_FACTOR = 4.0f;
scale *= IGNORE_BOX_SCALE_FACTOR;
// create the box (we use a box for the zone for convenience)
AABox box(corner, scale);
// update the memoized zone
// This may be called by multiple threads concurrently,
// so take a lock and only update the memo if this call is first.
// This prevents concurrent updates from invalidating the returned reference
// (contingent on the preconditions listed in the header).
std::lock_guard<std::mutex> lock(_mutex);
if (frame != _frame.load(std::memory_order_acquire)) {
_zone = box;
unsigned int oldFrame = _frame.exchange(frame, std::memory_order_release);
Q_UNUSED(oldFrame);
// check the precondition
assert(oldFrame == 0 || frame == (oldFrame + 1));
}
}
return _zone;
}
void AudioMixerClientData::IgnoreNodeCache::cache(bool shouldIgnore) {
if (!_isCached) {
_shouldIgnore = shouldIgnore;
_isCached = true;
}
}
bool AudioMixerClientData::IgnoreNodeCache::isCached() {
return _isCached;
}
bool AudioMixerClientData::IgnoreNodeCache::shouldIgnore() {
bool ignore = _shouldIgnore;
_isCached = false;
return ignore;
}
bool AudioMixerClientData::shouldIgnore(const SharedNodePointer self, const SharedNodePointer node, unsigned int frame) {
// this is symmetric over self / node; if computed, it is cached in the other
// check the cache to avoid computation
auto& cache = _nodeSourcesIgnoreMap[node->getUUID()];
if (cache.isCached()) {
return cache.shouldIgnore();
}
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
if (!nodeData) {
return false;
}
// compute shouldIgnore
bool shouldIgnore = true;
if ( // the nodes are not ignoring each other explicitly (or are but get data regardless)
(!self->isIgnoringNodeWithID(node->getUUID()) ||
(nodeData->getRequestsDomainListData() && node->getCanKick())) &&
(!node->isIgnoringNodeWithID(self->getUUID()) ||
(getRequestsDomainListData() && self->getCanKick()))) {
// if either node is enabling an ignore radius, check their proximity
if ((self->isIgnoreRadiusEnabled() || node->isIgnoreRadiusEnabled())) {
auto& zone = _ignoreZone.get(frame);
auto& nodeZone = nodeData->_ignoreZone.get(frame);
shouldIgnore = zone.touches(nodeZone);
} else {
shouldIgnore = false;
}
}
// cache in node
nodeData->_nodeSourcesIgnoreMap[self->getUUID()].cache(shouldIgnore);
return shouldIgnore;
}

View file

@ -38,18 +38,22 @@ public:
AudioStreamMap getAudioStreams() { QReadLocker readLock { &_streamsLock }; return _audioStreams; }
AvatarAudioStream* getAvatarAudioStream();
// returns whether self (this data's node) should ignore node, memoized by frame
// precondition: frame is monotonically increasing after first call
bool shouldIgnore(SharedNodePointer self, SharedNodePointer node, unsigned int frame);
// the following methods should be called from the AudioMixer assignment thread ONLY
// they are not thread-safe
// returns a new or existing HRTF object for the given stream from the given node
AudioHRTF& hrtfForStream(const QUuid& nodeID, const QUuid& streamID = QUuid()) { return _nodeSourcesHRTFMap[nodeID][streamID]; }
// remove HRTFs for all sources from this node
void removeHRTFsForNode(const QUuid& nodeID) { _nodeSourcesHRTFMap.erase(nodeID); }
// removes an AudioHRTF object for a given stream
void removeHRTFForStream(const QUuid& nodeID, const QUuid& streamID = QUuid());
// remove all sources and data from this node
void removeNode(const QUuid& nodeID) { _nodeSourcesIgnoreMap.unsafe_erase(nodeID); _nodeSourcesHRTFMap.erase(nodeID); }
void removeAgentAvatarAudioStream();
int parseData(ReceivedMessage& message) override;
@ -86,12 +90,10 @@ public:
bool shouldFlushEncoder() { return _shouldFlushEncoder; }
QString getCodecName() { return _selectedCodecName; }
bool shouldMuteClient() { return _shouldMuteClient; }
void setShouldMuteClient(bool shouldMuteClient) { _shouldMuteClient = shouldMuteClient; }
glm::vec3 getPosition() { return getAvatarAudioStream() ? getAvatarAudioStream()->getPosition() : glm::vec3(0); }
glm::vec3 getAvatarBoundingBoxCorner() { return getAvatarAudioStream() ? getAvatarAudioStream()->getAvatarBoundingBoxCorner() : glm::vec3(0); }
glm::vec3 getAvatarBoundingBoxScale() { return getAvatarAudioStream() ? getAvatarAudioStream()->getAvatarBoundingBoxScale() : glm::vec3(0); }
bool getRequestsDomainListData() { return _requestsDomainListData; }
void setRequestsDomainListData(bool requesting) { _requestsDomainListData = requesting; }
@ -103,9 +105,48 @@ public slots:
void sendSelectAudioFormat(SharedNodePointer node, const QString& selectedCodecName);
private:
using IgnoreZone = AABox;
QReadWriteLock _streamsLock;
AudioStreamMap _audioStreams; // microphone stream from avatar is stored under key of null UUID
class IgnoreZoneMemo {
public:
IgnoreZoneMemo(AudioMixerClientData& data) : _data(data) {}
// returns an ignore zone, memoized by frame (lockless if the zone is already memoized)
// preconditions:
// - frame is monotonically increasing after first call
// - there are no references left from calls to getIgnoreZone(frame - 1)
IgnoreZone& get(unsigned int frame);
private:
AudioMixerClientData& _data;
IgnoreZone _zone;
std::atomic<unsigned int> _frame { 0 };
std::mutex _mutex;
};
IgnoreZoneMemo _ignoreZone;
class IgnoreNodeCache {
public:
// std::atomic is not copyable - always initialize uncached
IgnoreNodeCache() {}
IgnoreNodeCache(const IgnoreNodeCache& other) {}
void cache(bool shouldIgnore);
bool isCached();
bool shouldIgnore();
private:
std::atomic<bool> _isCached { false };
bool _shouldIgnore { false };
};
struct IgnoreNodeCacheHasher { std::size_t operator()(const QUuid& key) const { return qHash(key); } };
using NodeSourcesIgnoreMap = tbb::concurrent_unordered_map<QUuid, IgnoreNodeCache, IgnoreNodeCacheHasher>;
NodeSourcesIgnoreMap _nodeSourcesIgnoreMap;
using HRTFMap = std::unordered_map<QUuid, AudioHRTF>;
using NodeSourcesHRTFMap = std::unordered_map<QUuid, HRTFMap>;
NodeSourcesHRTFMap _nodeSourcesHRTFMap;

View file

@ -46,7 +46,6 @@ void sendMutePacket(const SharedNodePointer& node, AudioMixerClientData&);
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data);
// mix helpers
inline bool shouldIgnoreNode(const SharedNodePointer& listener, const SharedNodePointer& node);
inline float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
const glm::vec3& relativePosition);
inline float computeGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
@ -126,8 +125,7 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
typedef void (AudioMixerSlave::*MixFunctor)(
AudioMixerClientData&, const QUuid&, const AvatarAudioStream&, const PositionalAudioStream&);
auto allStreams = [&](const SharedNodePointer& node, MixFunctor mixFunctor) {
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
auto forAllStreams = [&](const SharedNodePointer& node, AudioMixerClientData* nodeData, MixFunctor mixFunctor) {
auto nodeID = node->getUUID();
for (auto& streamPair : nodeData->getAudioStreams()) {
auto nodeStream = streamPair.second;
@ -135,10 +133,17 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
}
};
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
if (*node == *listener) {
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
#ifdef HIFI_AUDIO_MIXER_DEBUG
auto mixStart = p_high_resolution_clock::now();
#endif
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
if (!nodeData) {
return;
}
if (*node == *listener) {
// only mix the echo, if requested
for (auto& streamPair : nodeData->getAudioStreams()) {
auto nodeStream = streamPair.second;
@ -146,15 +151,10 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
mixStream(*listenerData, node->getUUID(), *listenerAudioStream, *nodeStream);
}
}
} else if (!shouldIgnoreNode(listener, node)) {
} else if (!listenerData->shouldIgnore(listener, node, _frame)) {
if (!isThrottling) {
allStreams(node, &AudioMixerSlave::mixStream);
forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);
} else {
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
auto throttleStart = p_high_resolution_clock::now();
#endif
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
auto nodeID = node->getUUID();
// compute the node's max relative volume
@ -179,13 +179,6 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
if (!throttledNodes.empty()) {
std::push_heap(throttledNodes.begin(), throttledNodes.end());
}
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
auto throttleEnd = p_high_resolution_clock::now();
uint64_t throttleTime =
std::chrono::duration_cast<std::chrono::nanoseconds>(throttleEnd - throttleStart).count();
stats.throttleTime += throttleTime;
#endif
}
}
});
@ -201,7 +194,8 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
std::pop_heap(throttledNodes.begin(), throttledNodes.end());
auto& node = throttledNodes.back().second;
allStreams(node, &AudioMixerSlave::mixStream);
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
forAllStreams(node, nodeData, &AudioMixerSlave::mixStream);
throttledNodes.pop_back();
}
@ -209,10 +203,17 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
// throttle the remaining nodes' streams
for (const std::pair<float, SharedNodePointer>& nodePair : throttledNodes) {
auto& node = nodePair.second;
allStreams(node, &AudioMixerSlave::throttleStream);
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
forAllStreams(node, nodeData, &AudioMixerSlave::throttleStream);
}
}
#ifdef HIFI_AUDIO_MIXER_DEBUG
auto mixEnd = p_high_resolution_clock::now();
auto mixTime = std::chrono::duration_cast<std::chrono::nanoseconds>(mixEnd - mixStart);
stats.mixTime += mixTime.count();
#endif
// use the per listener AudioLimiter to render the mixed data...
listenerData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
@ -452,55 +453,6 @@ void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData&
}
}
bool shouldIgnoreNode(const SharedNodePointer& listener, const SharedNodePointer& node) {
AudioMixerClientData* listenerData = static_cast<AudioMixerClientData*>(listener->getLinkedData());
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// when this is true, the AudioMixer will send Audio data to a client about avatars that have ignored them
bool getsAnyIgnored = listenerData->getRequestsDomainListData() && listener->getCanKick();
bool ignore = true;
if (nodeData &&
// make sure that it isn't being ignored by our listening node
(!listener->isIgnoringNodeWithID(node->getUUID()) || (nodeData->getRequestsDomainListData() && node->getCanKick())) &&
// and that it isn't ignoring our listening node
(!node->isIgnoringNodeWithID(listener->getUUID()) || getsAnyIgnored)) {
// is either node enabling the space bubble / ignore radius?
if ((listener->isIgnoreRadiusEnabled() || node->isIgnoreRadiusEnabled())) {
// define the minimum bubble size
static const glm::vec3 minBubbleSize = glm::vec3(0.3f, 1.3f, 0.3f);
// set up the bounding box for the listener
AABox listenerBox(listenerData->getAvatarBoundingBoxCorner(), listenerData->getAvatarBoundingBoxScale());
if (glm::any(glm::lessThan(listenerData->getAvatarBoundingBoxScale(), minBubbleSize))) {
listenerBox.setScaleStayCentered(minBubbleSize);
}
// set up the bounding box for the node
AABox nodeBox(nodeData->getAvatarBoundingBoxCorner(), nodeData->getAvatarBoundingBoxScale());
// Clamp the size of the bounding box to a minimum scale
if (glm::any(glm::lessThan(nodeData->getAvatarBoundingBoxScale(), minBubbleSize))) {
nodeBox.setScaleStayCentered(minBubbleSize);
}
// quadruple the scale of both bounding boxes
listenerBox.embiggen(4.0f);
nodeBox.embiggen(4.0f);
// perform the collision check between the two bounding boxes
ignore = listenerBox.touches(nodeBox);
} else {
ignore = false;
}
}
return ignore;
}
static const float ATTENUATION_START_DISTANCE = 1.0f;
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
const glm::vec3& relativePosition) {
float gain = 1.0f;
@ -556,6 +508,7 @@ float computeGain(const AvatarAudioStream& listeningNodeStream, const Positional
}
// distance attenuation
const float ATTENUATION_START_DISTANCE = 1.0f;
float distance = glm::length(relativePosition);
assert(ATTENUATION_START_DISTANCE > EPSILON);
if (distance >= ATTENUATION_START_DISTANCE) {

View file

@ -20,8 +20,8 @@ void AudioMixerStats::reset() {
hrtfThrottleRenders = 0;
manualStereoMixes = 0;
manualEchoMixes = 0;
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
throttleTime = 0;
#ifdef HIFI_AUDIO_MIXER_DEBUG
mixTime = 0;
#endif
}
@ -34,7 +34,7 @@ void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
hrtfThrottleRenders += otherStats.hrtfThrottleRenders;
manualStereoMixes += otherStats.manualStereoMixes;
manualEchoMixes += otherStats.manualEchoMixes;
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
throttleTime += otherStats.throttleTime;
#ifdef HIFI_AUDIO_MIXER_DEBUG
mixTime += otherStats.mixTime;
#endif
}

View file

@ -12,7 +12,7 @@
#ifndef hifi_AudioMixerStats_h
#define hifi_AudioMixerStats_h
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
#ifdef HIFI_AUDIO_MIXER_DEBUG
#include <cstdint>
#endif
@ -29,8 +29,8 @@ struct AudioMixerStats {
int manualStereoMixes { 0 };
int manualEchoMixes { 0 };
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
uint64_t throttleTime { 0 };
#ifdef HIFI_AUDIO_MIXER_DEBUG
uint64_t mixTime { 0 };
#endif
void reset();