mirror of
https://github.com/overte-org/overte.git
synced 2025-08-10 14:52:46 +02:00
Merge pull request #9537 from zzmp/audio/throttle-filter
Add injector/avatar attenuation to audio-mixer throttling
This commit is contained in:
commit
ff56eb24c8
5 changed files with 92 additions and 34 deletions
|
@ -316,6 +316,10 @@ void AudioMixer::sendStatsPacket() {
|
||||||
addTiming(_mixTiming, "mix");
|
addTiming(_mixTiming, "mix");
|
||||||
addTiming(_eventsTiming, "events");
|
addTiming(_eventsTiming, "events");
|
||||||
|
|
||||||
|
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
|
||||||
|
timingStats["ns_per_throttle"] = (_stats.totalMixes > 0) ? (float)(_stats.throttleTime / _stats.totalMixes) : 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
// call it "avg_..." to keep it higher in the display, sorted alphabetically
|
// call it "avg_..." to keep it higher in the display, sorted alphabetically
|
||||||
statsObject["avg_timing_stats"] = timingStats;
|
statsObject["avg_timing_stats"] = timingStats;
|
||||||
|
|
||||||
|
|
|
@ -46,10 +46,12 @@ void sendMutePacket(const SharedNodePointer& node, AudioMixerClientData&);
|
||||||
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data);
|
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data);
|
||||||
|
|
||||||
// mix helpers
|
// mix helpers
|
||||||
bool shouldIgnoreNode(const SharedNodePointer& listener, const SharedNodePointer& node);
|
inline bool shouldIgnoreNode(const SharedNodePointer& listener, const SharedNodePointer& node);
|
||||||
float gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
inline float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||||
|
const glm::vec3& relativePosition);
|
||||||
|
inline float computeGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||||
const glm::vec3& relativePosition, bool isEcho);
|
const glm::vec3& relativePosition, bool isEcho);
|
||||||
float azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
inline float computeAzimuth(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||||
const glm::vec3& relativePosition);
|
const glm::vec3& relativePosition);
|
||||||
|
|
||||||
void AudioMixerSlave::configure(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio) {
|
void AudioMixerSlave::configure(ConstIter begin, ConstIter end, unsigned int frame, float throttlingRatio) {
|
||||||
|
@ -126,9 +128,10 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
AudioMixerClientData&, const QUuid&, const AvatarAudioStream&, const PositionalAudioStream&);
|
AudioMixerClientData&, const QUuid&, const AvatarAudioStream&, const PositionalAudioStream&);
|
||||||
auto allStreams = [&](const SharedNodePointer& node, MixFunctor mixFunctor) {
|
auto allStreams = [&](const SharedNodePointer& node, MixFunctor mixFunctor) {
|
||||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
|
auto nodeID = node->getUUID();
|
||||||
for (auto& streamPair : nodeData->getAudioStreams()) {
|
for (auto& streamPair : nodeData->getAudioStreams()) {
|
||||||
auto nodeStream = streamPair.second;
|
auto nodeStream = streamPair.second;
|
||||||
(this->*mixFunctor)(*listenerData, node->getUUID(), *listenerAudioStream, *nodeStream);
|
(this->*mixFunctor)(*listenerData, nodeID, *listenerAudioStream, *nodeStream);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -147,14 +150,28 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
if (!isThrottling) {
|
if (!isThrottling) {
|
||||||
allStreams(node, &AudioMixerSlave::mixStream);
|
allStreams(node, &AudioMixerSlave::mixStream);
|
||||||
} else {
|
} else {
|
||||||
|
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
|
||||||
|
auto throttleStart = p_high_resolution_clock::now();
|
||||||
|
#endif
|
||||||
|
|
||||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||||
|
auto nodeID = node->getUUID();
|
||||||
|
|
||||||
// compute the node's max relative volume
|
// compute the node's max relative volume
|
||||||
float nodeVolume;
|
float nodeVolume;
|
||||||
for (auto& streamPair : nodeData->getAudioStreams()) {
|
for (auto& streamPair : nodeData->getAudioStreams()) {
|
||||||
auto nodeStream = streamPair.second;
|
auto nodeStream = streamPair.second;
|
||||||
float distance = glm::length(nodeStream->getPosition() - listenerAudioStream->getPosition());
|
|
||||||
nodeVolume = std::max(nodeStream->getLastPopOutputTrailingLoudness() / distance, nodeVolume);
|
// approximate the gain
|
||||||
|
glm::vec3 relativePosition = nodeStream->getPosition() - listenerAudioStream->getPosition();
|
||||||
|
float gain = approximateGain(*listenerAudioStream, *nodeStream, relativePosition);
|
||||||
|
|
||||||
|
// modify by hrtf gain adjustment
|
||||||
|
auto& hrtf = listenerData->hrtfForStream(nodeID, nodeStream->getStreamIdentifier());
|
||||||
|
gain *= hrtf.getGainAdjustment();
|
||||||
|
|
||||||
|
auto streamVolume = nodeStream->getLastPopOutputTrailingLoudness() * gain;
|
||||||
|
nodeVolume = std::max(streamVolume, nodeVolume);
|
||||||
}
|
}
|
||||||
|
|
||||||
// max-heapify the nodes by relative volume
|
// max-heapify the nodes by relative volume
|
||||||
|
@ -162,6 +179,13 @@ bool AudioMixerSlave::prepareMix(const SharedNodePointer& listener) {
|
||||||
if (!throttledNodes.empty()) {
|
if (!throttledNodes.empty()) {
|
||||||
std::push_heap(throttledNodes.begin(), throttledNodes.end());
|
std::push_heap(throttledNodes.begin(), throttledNodes.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
|
||||||
|
auto throttleEnd = p_high_resolution_clock::now();
|
||||||
|
uint64_t throttleTime =
|
||||||
|
std::chrono::duration_cast<std::chrono::nanoseconds>(throttleEnd - throttleStart).count();
|
||||||
|
stats.throttleTime += throttleTime;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -227,9 +251,9 @@ void AudioMixerSlave::addStream(AudioMixerClientData& listenerNodeData, const QU
|
||||||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||||
|
|
||||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||||
float gain = gainForSource(listeningNodeStream, streamToAdd, relativePosition, isEcho);
|
float gain = computeGain(listeningNodeStream, streamToAdd, relativePosition, isEcho);
|
||||||
float azimuth = isEcho ? 0.0f : azimuthForSource(listeningNodeStream, listeningNodeStream, relativePosition);
|
float azimuth = isEcho ? 0.0f : computeAzimuth(listeningNodeStream, listeningNodeStream, relativePosition);
|
||||||
static const int HRTF_DATASET_INDEX = 1;
|
const int HRTF_DATASET_INDEX = 1;
|
||||||
|
|
||||||
if (!streamToAdd.lastPopSucceeded()) {
|
if (!streamToAdd.lastPopSucceeded()) {
|
||||||
bool forceSilentBlock = true;
|
bool forceSilentBlock = true;
|
||||||
|
@ -330,7 +354,7 @@ std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 s
|
||||||
}
|
}
|
||||||
|
|
||||||
void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer) {
|
void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer) {
|
||||||
static const int MIX_PACKET_SIZE =
|
const int MIX_PACKET_SIZE =
|
||||||
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
|
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
|
||||||
quint16 sequence = data.getOutgoingSequenceNumber();
|
quint16 sequence = data.getOutgoingSequenceNumber();
|
||||||
QString codec = data.getCodecName();
|
QString codec = data.getCodecName();
|
||||||
|
@ -345,7 +369,7 @@ void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QB
|
||||||
}
|
}
|
||||||
|
|
||||||
void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
|
void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
|
||||||
static const int SILENT_PACKET_SIZE =
|
const int SILENT_PACKET_SIZE =
|
||||||
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + sizeof(quint16);
|
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + sizeof(quint16);
|
||||||
quint16 sequence = data.getOutgoingSequenceNumber();
|
quint16 sequence = data.getOutgoingSequenceNumber();
|
||||||
QString codec = data.getCodecName();
|
QString codec = data.getCodecName();
|
||||||
|
@ -475,40 +499,54 @@ bool shouldIgnoreNode(const SharedNodePointer& listener, const SharedNodePointer
|
||||||
return ignore;
|
return ignore;
|
||||||
}
|
}
|
||||||
|
|
||||||
float gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
static const float ATTENUATION_START_DISTANCE = 1.0f;
|
||||||
const glm::vec3& relativePosition, bool isEcho) {
|
|
||||||
|
float approximateGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||||
|
const glm::vec3& relativePosition) {
|
||||||
float gain = 1.0f;
|
float gain = 1.0f;
|
||||||
|
|
||||||
float distanceBetween = glm::length(relativePosition);
|
// injector: apply attenuation
|
||||||
|
|
||||||
if (distanceBetween < EPSILON) {
|
|
||||||
distanceBetween = EPSILON;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
||||||
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
|
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
|
// avatar: skip attenuation - it is too costly to approximate
|
||||||
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
|
|
||||||
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
|
|
||||||
|
|
||||||
|
// distance attenuation: approximate, ignore zone-specific attenuations
|
||||||
|
// this is a good approximation for streams further than ATTENUATION_START_DISTANCE
|
||||||
|
// those streams closer will be amplified; amplifying close streams is acceptable
|
||||||
|
// when throttling, as close streams are expected to be heard by a user
|
||||||
|
float distance = glm::length(relativePosition);
|
||||||
|
return gain / distance;
|
||||||
|
}
|
||||||
|
|
||||||
|
float computeGain(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||||
|
const glm::vec3& relativePosition, bool isEcho) {
|
||||||
|
float gain = 1.0f;
|
||||||
|
|
||||||
|
// injector: apply attenuation
|
||||||
|
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
||||||
|
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
|
||||||
|
|
||||||
|
// avatar: apply fixed off-axis attenuation to make them quieter as they turn away
|
||||||
|
} else if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
|
||||||
|
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
|
||||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||||
glm::normalize(rotatedListenerPosition));
|
glm::normalize(rotatedListenerPosition));
|
||||||
|
|
||||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
const float OFF_AXIS_ATTENUATION_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||||
|
|
||||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
|
(angleOfDelivery * (OFF_AXIS_ATTENUATION_STEP / PI_OVER_TWO));
|
||||||
|
|
||||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
|
||||||
gain *= offAxisCoefficient;
|
gain *= offAxisCoefficient;
|
||||||
}
|
}
|
||||||
|
|
||||||
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
|
|
||||||
auto& zoneSettings = AudioMixer::getZoneSettings();
|
|
||||||
auto& audioZones = AudioMixer::getAudioZones();
|
auto& audioZones = AudioMixer::getAudioZones();
|
||||||
|
auto& zoneSettings = AudioMixer::getZoneSettings();
|
||||||
|
|
||||||
|
// find distance attenuation coefficient
|
||||||
|
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
|
||||||
for (int i = 0; i < zoneSettings.length(); ++i) {
|
for (int i = 0; i < zoneSettings.length(); ++i) {
|
||||||
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
|
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
|
||||||
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
|
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
|
||||||
|
@ -517,16 +555,17 @@ float gainForSource(const AvatarAudioStream& listeningNodeStream, const Position
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
// distance attenuation
|
||||||
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
|
float distance = glm::length(relativePosition);
|
||||||
|
assert(ATTENUATION_START_DISTANCE > EPSILON);
|
||||||
|
if (distance >= ATTENUATION_START_DISTANCE) {
|
||||||
|
|
||||||
// translate the zone setting to gain per log2(distance)
|
// translate the zone setting to gain per log2(distance)
|
||||||
float g = 1.0f - attenuationPerDoublingInDistance;
|
float g = 1.0f - attenuationPerDoublingInDistance;
|
||||||
g = (g < EPSILON) ? EPSILON : g;
|
g = glm::clamp(g, EPSILON, 1.0f);
|
||||||
g = (g > 1.0f) ? 1.0f : g;
|
|
||||||
|
|
||||||
// calculate the distance coefficient using the distance to this node
|
// calculate the distance coefficient using the distance to this node
|
||||||
float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
|
float distanceCoefficient = fastExp2f(fastLog2f(g) * fastLog2f(distance/ATTENUATION_START_DISTANCE));
|
||||||
|
|
||||||
// multiply the current attenuation coefficient by the distance coefficient
|
// multiply the current attenuation coefficient by the distance coefficient
|
||||||
gain *= distanceCoefficient;
|
gain *= distanceCoefficient;
|
||||||
|
@ -535,7 +574,7 @@ float gainForSource(const AvatarAudioStream& listeningNodeStream, const Position
|
||||||
return gain;
|
return gain;
|
||||||
}
|
}
|
||||||
|
|
||||||
float azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
float computeAzimuth(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||||
const glm::vec3& relativePosition) {
|
const glm::vec3& relativePosition) {
|
||||||
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
|
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,9 @@ void AudioMixerStats::reset() {
|
||||||
hrtfThrottleRenders = 0;
|
hrtfThrottleRenders = 0;
|
||||||
manualStereoMixes = 0;
|
manualStereoMixes = 0;
|
||||||
manualEchoMixes = 0;
|
manualEchoMixes = 0;
|
||||||
|
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
|
||||||
|
throttleTime = 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
|
void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
|
||||||
|
@ -31,4 +34,7 @@ void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
|
||||||
hrtfThrottleRenders += otherStats.hrtfThrottleRenders;
|
hrtfThrottleRenders += otherStats.hrtfThrottleRenders;
|
||||||
manualStereoMixes += otherStats.manualStereoMixes;
|
manualStereoMixes += otherStats.manualStereoMixes;
|
||||||
manualEchoMixes += otherStats.manualEchoMixes;
|
manualEchoMixes += otherStats.manualEchoMixes;
|
||||||
|
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
|
||||||
|
throttleTime += otherStats.throttleTime;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,10 @@
|
||||||
#ifndef hifi_AudioMixerStats_h
|
#ifndef hifi_AudioMixerStats_h
|
||||||
#define hifi_AudioMixerStats_h
|
#define hifi_AudioMixerStats_h
|
||||||
|
|
||||||
|
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
|
||||||
|
#include <cstdint>
|
||||||
|
#endif
|
||||||
|
|
||||||
struct AudioMixerStats {
|
struct AudioMixerStats {
|
||||||
int sumStreams { 0 };
|
int sumStreams { 0 };
|
||||||
int sumListeners { 0 };
|
int sumListeners { 0 };
|
||||||
|
@ -25,6 +29,10 @@ struct AudioMixerStats {
|
||||||
int manualStereoMixes { 0 };
|
int manualStereoMixes { 0 };
|
||||||
int manualEchoMixes { 0 };
|
int manualEchoMixes { 0 };
|
||||||
|
|
||||||
|
#ifdef HIFI_AUDIO_THROTTLE_DEBUG
|
||||||
|
uint64_t throttleTime { 0 };
|
||||||
|
#endif
|
||||||
|
|
||||||
void reset();
|
void reset();
|
||||||
void accumulate(const AudioMixerStats& otherStats);
|
void accumulate(const AudioMixerStats& otherStats);
|
||||||
};
|
};
|
||||||
|
|
|
@ -48,6 +48,7 @@ public:
|
||||||
// HRTF local gain adjustment in amplitude (1.0 == unity)
|
// HRTF local gain adjustment in amplitude (1.0 == unity)
|
||||||
//
|
//
|
||||||
void setGainAdjustment(float gain) { _gainAdjust = HRTF_GAIN * gain; };
|
void setGainAdjustment(float gain) { _gainAdjust = HRTF_GAIN * gain; };
|
||||||
|
float getGainAdjustment() { return _gainAdjust; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
AudioHRTF(const AudioHRTF&) = delete;
|
AudioHRTF(const AudioHRTF&) = delete;
|
||||||
|
|
Loading…
Reference in a new issue