mirror of
https://github.com/overte-org/overte.git
synced 2025-04-20 12:04:18 +02:00
Merge remote-tracking branch 'refs/remotes/highfidelity/master' into photobooth
This commit is contained in:
commit
c078751946
25 changed files with 1349 additions and 757 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -4,6 +4,10 @@ CMakeFiles/
|
|||
CMakeScripts/
|
||||
cmake_install.cmake
|
||||
build*/
|
||||
release*/
|
||||
debug*/
|
||||
gprof*/
|
||||
valgrind*/
|
||||
ext/
|
||||
Makefile
|
||||
*.user
|
||||
|
|
|
@ -9,38 +9,12 @@
|
|||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <math.h>
|
||||
#include <memory>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <thread>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <math.h>
|
||||
#else
|
||||
#include <arpa/inet.h>
|
||||
#include <netinet/in.h>
|
||||
#include <sys/socket.h>
|
||||
#endif //_WIN32
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtx/norm.hpp>
|
||||
#include <glm/gtx/vector_angle.hpp>
|
||||
|
||||
#include <QtCore/QCoreApplication>
|
||||
#include <QtCore/QJsonArray>
|
||||
#include <QtCore/QJsonDocument>
|
||||
#include <QtCore/QJsonObject>
|
||||
#include <QtCore/QJsonValue>
|
||||
#include <QtCore/QThread>
|
||||
#include <QtNetwork/QNetworkRequest>
|
||||
#include <QtNetwork/QNetworkReply>
|
||||
|
||||
#include <LogHandler.h>
|
||||
#include <NetworkAccessManager.h>
|
||||
|
@ -67,22 +41,20 @@ static const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f;
|
|||
static const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
|
||||
static const QString AUDIO_ENV_GROUP_KEY = "audio_env";
|
||||
static const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
|
||||
static const QString AUDIO_THREADING_GROUP_KEY = "audio_threading";
|
||||
|
||||
int AudioMixer::_numStaticJitterFrames{ -1 };
|
||||
bool AudioMixer::_enableFilter = true;
|
||||
|
||||
bool AudioMixer::shouldMute(float quietestFrame) {
|
||||
return (quietestFrame > _noiseMutingThreshold);
|
||||
}
|
||||
float AudioMixer::_noiseMutingThreshold{ DEFAULT_NOISE_MUTING_THRESHOLD };
|
||||
float AudioMixer::_attenuationPerDoublingInDistance{ DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE };
|
||||
float AudioMixer::_trailingSleepRatio{ 1.0f };
|
||||
float AudioMixer::_performanceThrottlingRatio{ 0.0f };
|
||||
float AudioMixer::_minAudibilityThreshold{ LOUDNESS_TO_DISTANCE_RATIO / 2.0f };
|
||||
QHash<QString, AABox> AudioMixer::_audioZones;
|
||||
QVector<AudioMixer::ZoneSettings> AudioMixer::_zoneSettings;
|
||||
QVector<AudioMixer::ReverbSettings> AudioMixer::_zoneReverbSettings;
|
||||
|
||||
AudioMixer::AudioMixer(ReceivedMessage& message) :
|
||||
ThreadedAssignment(message),
|
||||
_trailingSleepRatio(1.0f),
|
||||
_minAudibilityThreshold(LOUDNESS_TO_DISTANCE_RATIO / 2.0f),
|
||||
_performanceThrottlingRatio(0.0f),
|
||||
_attenuationPerDoublingInDistance(DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE),
|
||||
_noiseMutingThreshold(DEFAULT_NOISE_MUTING_THRESHOLD)
|
||||
{
|
||||
ThreadedAssignment(message) {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
auto& packetReceiver = nodeList->getPacketReceiver();
|
||||
|
||||
|
@ -96,405 +68,10 @@ AudioMixer::AudioMixer(ReceivedMessage& message) :
|
|||
packetReceiver.registerListener(PacketType::KillAvatar, this, "handleKillAvatarPacket");
|
||||
packetReceiver.registerListener(PacketType::NodeMuteRequest, this, "handleNodeMuteRequestPacket");
|
||||
packetReceiver.registerListener(PacketType::RadiusIgnoreRequest, this, "handleRadiusIgnoreRequestPacket");
|
||||
|
||||
connect(nodeList.data(), &NodeList::nodeKilled, this, &AudioMixer::handleNodeKilled);
|
||||
}
|
||||
|
||||
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||
|
||||
const int IEEE754_MANT_BITS = 23;
|
||||
const int IEEE754_EXPN_BIAS = 127;
|
||||
|
||||
//
|
||||
// for x > 0.0f, returns log2(x)
|
||||
// for x <= 0.0f, returns large negative value
|
||||
//
|
||||
// abs |error| < 8e-3, smooth (exact for x=2^N) for NPOLY=3
|
||||
// abs |error| < 2e-4, smooth (exact for x=2^N) for NPOLY=5
|
||||
// rel |error| < 0.4 from precision loss very close to 1.0f
|
||||
//
|
||||
static inline float fastlog2(float x) {
|
||||
|
||||
union { float f; int32_t i; } mant, bits = { x };
|
||||
|
||||
// split into mantissa and exponent
|
||||
mant.i = (bits.i & ((1 << IEEE754_MANT_BITS) - 1)) | (IEEE754_EXPN_BIAS << IEEE754_MANT_BITS);
|
||||
int32_t expn = (bits.i >> IEEE754_MANT_BITS) - IEEE754_EXPN_BIAS;
|
||||
|
||||
mant.f -= 1.0f;
|
||||
|
||||
// polynomial for log2(1+x) over x=[0,1]
|
||||
//x = (-0.346555386f * mant.f + 1.346555386f) * mant.f;
|
||||
x = (((-0.0821307180f * mant.f + 0.321188984f) * mant.f - 0.677784014f) * mant.f + 1.43872575f) * mant.f;
|
||||
|
||||
return x + expn;
|
||||
}
|
||||
|
||||
//
|
||||
// for -126 <= x < 128, returns exp2(x)
|
||||
//
|
||||
// rel |error| < 3e-3, smooth (exact for x=N) for NPOLY=3
|
||||
// rel |error| < 9e-6, smooth (exact for x=N) for NPOLY=5
|
||||
//
|
||||
static inline float fastexp2(float x) {
|
||||
|
||||
union { float f; int32_t i; } xi;
|
||||
|
||||
// bias such that x > 0
|
||||
x += IEEE754_EXPN_BIAS;
|
||||
//x = MAX(x, 1.0f);
|
||||
//x = MIN(x, 254.9999f);
|
||||
|
||||
// split into integer and fraction
|
||||
xi.i = (int32_t)x;
|
||||
x -= xi.i;
|
||||
|
||||
// construct exp2(xi) as a float
|
||||
xi.i <<= IEEE754_MANT_BITS;
|
||||
|
||||
// polynomial for exp2(x) over x=[0,1]
|
||||
//x = (0.339766028f * x + 0.660233972f) * x + 1.0f;
|
||||
x = (((0.0135557472f * x + 0.0520323690f) * x + 0.241379763f) * x + 0.693032121f) * x + 1.0f;
|
||||
|
||||
return x * xi.f;
|
||||
}
|
||||
|
||||
float AudioMixer::gainForSource(const PositionalAudioStream& streamToAdd,
|
||||
const AvatarAudioStream& listeningNodeStream, const glm::vec3& relativePosition, bool isEcho) {
|
||||
float gain = 1.0f;
|
||||
|
||||
float distanceBetween = glm::length(relativePosition);
|
||||
|
||||
if (distanceBetween < EPSILON) {
|
||||
distanceBetween = EPSILON;
|
||||
}
|
||||
|
||||
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
||||
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
|
||||
}
|
||||
|
||||
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
|
||||
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
|
||||
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
|
||||
|
||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedListenerPosition));
|
||||
|
||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||
|
||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
|
||||
|
||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||
gain *= offAxisCoefficient;
|
||||
}
|
||||
|
||||
float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance;
|
||||
for (int i = 0; i < _zonesSettings.length(); ++i) {
|
||||
if (_audioZones[_zonesSettings[i].source].contains(streamToAdd.getPosition()) &&
|
||||
_audioZones[_zonesSettings[i].listener].contains(listeningNodeStream.getPosition())) {
|
||||
attenuationPerDoublingInDistance = _zonesSettings[i].coefficient;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
|
||||
|
||||
// translate the zone setting to gain per log2(distance)
|
||||
float g = 1.0f - attenuationPerDoublingInDistance;
|
||||
g = (g < EPSILON) ? EPSILON : g;
|
||||
g = (g > 1.0f) ? 1.0f : g;
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = fastexp2(fastlog2(g) * fastlog2(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
|
||||
|
||||
// multiply the current attenuation coefficient by the distance coefficient
|
||||
gain *= distanceCoefficient;
|
||||
}
|
||||
|
||||
return gain;
|
||||
}
|
||||
|
||||
float AudioMixer::azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
|
||||
const glm::vec3& relativePosition) {
|
||||
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
|
||||
|
||||
// Compute sample delay for the two ears to create phase panning
|
||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||
|
||||
// project the rotated source position vector onto the XZ plane
|
||||
rotatedSourcePosition.y = 0.0f;
|
||||
|
||||
static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;
|
||||
|
||||
if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
|
||||
// produce an oriented angle about the y-axis
|
||||
return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
|
||||
} else {
|
||||
// there is no distance between listener and source - return no azimuth
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
|
||||
const PositionalAudioStream& streamToAdd,
|
||||
const QUuid& sourceNodeID,
|
||||
const AvatarAudioStream& listeningNodeStream) {
|
||||
|
||||
|
||||
// to reduce artifacts we calculate the gain and azimuth for every source for this listener
|
||||
// even if we are not going to end up mixing in this source
|
||||
|
||||
++_totalMixes;
|
||||
|
||||
// this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct
|
||||
|
||||
// check if this is a server echo of a source back to itself
|
||||
bool isEcho = (&streamToAdd == &listeningNodeStream);
|
||||
|
||||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||
|
||||
// figure out the distance between source and listener
|
||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||
|
||||
// figure out the gain for this source at the listener
|
||||
float gain = gainForSource(streamToAdd, listeningNodeStream, relativePosition, isEcho);
|
||||
|
||||
// figure out the azimuth to this source at the listener
|
||||
float azimuth = isEcho ? 0.0f : azimuthForSource(streamToAdd, listeningNodeStream, relativePosition);
|
||||
|
||||
float repeatedFrameFadeFactor = 1.0f;
|
||||
|
||||
static const int HRTF_DATASET_INDEX = 1;
|
||||
|
||||
if (!streamToAdd.lastPopSucceeded()) {
|
||||
bool forceSilentBlock = true;
|
||||
|
||||
if (!streamToAdd.getLastPopOutput().isNull()) {
|
||||
bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);
|
||||
|
||||
// in an injector, just go silent - the injector has likely ended
|
||||
// in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
|
||||
|
||||
// we'll repeat the last block until it has a block to mix
|
||||
// and we'll gradually fade that repeated block into silence.
|
||||
|
||||
// calculate its fade factor, which depends on how many times it's already been repeated.
|
||||
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
|
||||
if (!isInjector && repeatedFrameFadeFactor > 0.0f) {
|
||||
// apply the repeatedFrameFadeFactor to the gain
|
||||
gain *= repeatedFrameFadeFactor;
|
||||
|
||||
forceSilentBlock = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (forceSilentBlock) {
|
||||
// we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
|
||||
// in this case we will call renderSilent with a forced silent block
|
||||
// this ensures the correct tail from the previously mixed block and the correct spatialization of first block
|
||||
// of any upcoming audio
|
||||
|
||||
if (!streamToAdd.isStereo() && !isEcho) {
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
// this is not done for stereo streams since they do not go through the HRTF
|
||||
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
|
||||
hrtf.renderSilent(silentMonoBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++_hrtfSilentRenders;;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// grab the stream from the ring buffer
|
||||
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
|
||||
|
||||
if (streamToAdd.isStereo() || isEcho) {
|
||||
// this is a stereo source or server echo so we do not pass it through the HRTF
|
||||
// simply apply our calculated gain to each sample
|
||||
if (streamToAdd.isStereo()) {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
_mixedSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
++_manualStereoMixes;
|
||||
} else {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
|
||||
auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
_mixedSamples[i] += monoSample;
|
||||
_mixedSamples[i + 1] += monoSample;
|
||||
}
|
||||
|
||||
++_manualEchoMixes;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
static int16_t streamBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL];
|
||||
|
||||
streamPopOutput.readSamples(streamBlock, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// if the frame we're about to mix is silent, simply call render silent and move on
|
||||
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
|
||||
// silent frame from source
|
||||
|
||||
// we still need to call renderSilent via the HRTF for mono source
|
||||
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++_hrtfSilentRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (_performanceThrottlingRatio > 0.0f
|
||||
&& streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= _minAudibilityThreshold) {
|
||||
// the mixer is struggling so we're going to drop off some streams
|
||||
|
||||
// we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
|
||||
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++_hrtfStruggleRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
++_hrtfRenders;
|
||||
|
||||
// mono stream, call the HRTF with our block and calculated azimuth and gain
|
||||
hrtf.render(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
|
||||
bool AudioMixer::prepareMixForListeningNode(Node* node) {
|
||||
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_mixedSamples, 0, sizeof(_mixedSamples));
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
|
||||
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
|
||||
// make sure that we have audio data for this other node
|
||||
// and that it isn't being ignored by our listening node
|
||||
// and that it isn't ignoring our listening node
|
||||
if (otherNode->getLinkedData()
|
||||
&& !node->isIgnoringNodeWithID(otherNode->getUUID()) && !otherNode->isIgnoringNodeWithID(node->getUUID())) {
|
||||
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
|
||||
|
||||
// check to see if we're ignoring in radius
|
||||
bool insideIgnoreRadius = false;
|
||||
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
|
||||
AudioMixerClientData* otherData = reinterpret_cast<AudioMixerClientData*>(otherNode->getLinkedData());
|
||||
AudioMixerClientData* nodeData = reinterpret_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
float ignoreRadius = glm::min(node->getIgnoreRadius(), otherNode->getIgnoreRadius());
|
||||
if (glm::distance(nodeData->getPosition(), otherData->getPosition()) < ignoreRadius) {
|
||||
insideIgnoreRadius = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!insideIgnoreRadius) {
|
||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||
auto streamsCopy = otherNodeClientData->getAudioStreams();
|
||||
for (auto& streamPair : streamsCopy) {
|
||||
auto otherNodeStream = streamPair.second;
|
||||
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
|
||||
addStreamToMixForListeningNodeWithStream(*listenerNodeData, *otherNodeStream, otherNode->getUUID(),
|
||||
*nodeAudioStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// use the per listner AudioLimiter to render the mixed data...
|
||||
listenerNodeData->audioLimiter.render(_mixedSamples, _clampedSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// check for silent audio after the peak limitor has converted the samples
|
||||
bool hasAudio = false;
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
if (_clampedSamples[i] != 0) {
|
||||
hasAudio = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return hasAudio;
|
||||
}
|
||||
|
||||
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
|
||||
// Send stream properties
|
||||
bool hasReverb = false;
|
||||
float reverbTime, wetLevel;
|
||||
// find reverb properties
|
||||
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
|
||||
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
|
||||
AABox box = _audioZones[_zoneReverbSettings[i].zone];
|
||||
if (box.contains(streamPosition)) {
|
||||
hasReverb = true;
|
||||
reverbTime = _zoneReverbSettings[i].reverbTime;
|
||||
wetLevel = _zoneReverbSettings[i].wetLevel;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
AvatarAudioStream* stream = nodeData->getAvatarAudioStream();
|
||||
bool dataChanged = (stream->hasReverb() != hasReverb) ||
|
||||
(stream->hasReverb() && (stream->getRevebTime() != reverbTime ||
|
||||
stream->getWetLevel() != wetLevel));
|
||||
if (dataChanged) {
|
||||
// Update stream
|
||||
if (hasReverb) {
|
||||
stream->setReverb(reverbTime, wetLevel);
|
||||
} else {
|
||||
stream->clearReverb();
|
||||
}
|
||||
}
|
||||
|
||||
// Send at change or every so often
|
||||
float CHANCE_OF_SEND = 0.01f;
|
||||
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
|
||||
|
||||
if (sendData) {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
unsigned char bitset = 0;
|
||||
|
||||
int packetSize = sizeof(bitset);
|
||||
|
||||
if (hasReverb) {
|
||||
packetSize += sizeof(reverbTime) + sizeof(wetLevel);
|
||||
}
|
||||
|
||||
auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
|
||||
|
||||
if (hasReverb) {
|
||||
setAtBit(bitset, HAS_REVERB_BIT);
|
||||
}
|
||||
|
||||
envPacket->writePrimitive(bitset);
|
||||
|
||||
if (hasReverb) {
|
||||
envPacket->writePrimitive(reverbTime);
|
||||
envPacket->writePrimitive(wetLevel);
|
||||
}
|
||||
nodeList->sendPacket(std::move(envPacket), *node);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixer::handleNodeAudioPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
|
||||
getOrCreateClientData(sendingNode.data());
|
||||
DependencyManager::get<NodeList>()->updateNodeWithDataFromPacket(message, sendingNode);
|
||||
|
@ -668,8 +245,8 @@ void AudioMixer::removeHRTFsForFinishedInjector(const QUuid& streamID) {
|
|||
}
|
||||
|
||||
QString AudioMixer::percentageForMixStats(int counter) {
|
||||
if (_totalMixes > 0) {
|
||||
float mixPercentage = (float(counter) / _totalMixes) * 100.0f;
|
||||
if (_stats.totalMixes > 0) {
|
||||
float mixPercentage = (float(counter) / _stats.totalMixes) * 100.0f;
|
||||
return QString::number(mixPercentage, 'f', 2);
|
||||
} else {
|
||||
return QString("0.0");
|
||||
|
@ -683,34 +260,57 @@ void AudioMixer::sendStatsPacket() {
|
|||
return;
|
||||
}
|
||||
|
||||
// general stats
|
||||
statsObject["useDynamicJitterBuffers"] = _numStaticJitterFrames == -1;
|
||||
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
|
||||
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
|
||||
|
||||
statsObject["avg_streams_per_frame"] = (float)_sumStreams / (float)_numStatFrames;
|
||||
statsObject["avg_listeners_per_frame"] = (float)_sumListeners / (float)_numStatFrames;
|
||||
statsObject["avg_streams_per_frame"] = (float)_stats.sumStreams / (float)_numStatFrames;
|
||||
statsObject["avg_listeners_per_frame"] = (float)_stats.sumListeners / (float)_numStatFrames;
|
||||
|
||||
// timing stats
|
||||
QJsonObject timingStats;
|
||||
uint64_t timing, trailing;
|
||||
|
||||
_sleepTiming.get(timing, trailing);
|
||||
timingStats["us_per_sleep"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_sleep_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_frameTiming.get(timing, trailing);
|
||||
timingStats["us_per_frame"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_frame_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_prepareTiming.get(timing, trailing);
|
||||
timingStats["us_per_prepare"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_prepare_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_mixTiming.get(timing, trailing);
|
||||
timingStats["us_per_mix"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_mix_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
_eventsTiming.get(timing, trailing);
|
||||
timingStats["us_per_events"] = (qint64)(timing / _numStatFrames);
|
||||
timingStats["us_per_events_trailing"] = (qint64)(trailing / _numStatFrames);
|
||||
|
||||
// call it "avg_..." to keep it higher in the display, sorted alphabetically
|
||||
statsObject["avg_timing_stats"] = timingStats;
|
||||
|
||||
// mix stats
|
||||
QJsonObject mixStats;
|
||||
mixStats["%_hrtf_mixes"] = percentageForMixStats(_hrtfRenders);
|
||||
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_hrtfSilentRenders);
|
||||
mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_hrtfStruggleRenders);
|
||||
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_manualStereoMixes);
|
||||
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_manualEchoMixes);
|
||||
|
||||
mixStats["total_mixes"] = _totalMixes;
|
||||
mixStats["avg_mixes_per_block"] = _totalMixes / _numStatFrames;
|
||||
mixStats["%_hrtf_mixes"] = percentageForMixStats(_stats.hrtfRenders);
|
||||
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_stats.hrtfSilentRenders);
|
||||
mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_stats.hrtfStruggleRenders);
|
||||
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_stats.manualStereoMixes);
|
||||
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_stats.manualEchoMixes);
|
||||
|
||||
mixStats["total_mixes"] = _stats.totalMixes;
|
||||
mixStats["avg_mixes_per_block"] = _stats.totalMixes / _numStatFrames;
|
||||
|
||||
statsObject["mix_stats"] = mixStats;
|
||||
|
||||
_sumStreams = 0;
|
||||
_sumListeners = 0;
|
||||
_hrtfRenders = 0;
|
||||
_hrtfSilentRenders = 0;
|
||||
_hrtfStruggleRenders = 0;
|
||||
_manualStereoMixes = 0;
|
||||
_manualEchoMixes = 0;
|
||||
_totalMixes = 0;
|
||||
_numStatFrames = 0;
|
||||
_stats.reset();
|
||||
|
||||
// add stats for each listerner
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
@ -744,7 +344,7 @@ void AudioMixer::run() {
|
|||
|
||||
// wait until we have the domain-server settings, otherwise we bail
|
||||
DomainHandler& domainHandler = DependencyManager::get<NodeList>()->getDomainHandler();
|
||||
connect(&domainHandler, &DomainHandler::settingsReceived, this, &AudioMixer::domainSettingsRequestComplete);
|
||||
connect(&domainHandler, &DomainHandler::settingsReceived, this, &AudioMixer::start);
|
||||
connect(&domainHandler, &DomainHandler::settingsReceiveFail, this, &AudioMixer::domainSettingsRequestFailed);
|
||||
|
||||
ThreadedAssignment::commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);
|
||||
|
@ -762,202 +362,165 @@ AudioMixerClientData* AudioMixer::getOrCreateClientData(Node* node) {
|
|||
return clientData;
|
||||
}
|
||||
|
||||
void AudioMixer::domainSettingsRequestComplete() {
|
||||
void AudioMixer::start() {
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
// prepare the NodeList
|
||||
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
|
||||
|
||||
nodeList->linkedDataCreateCallback = [&](Node* node) { getOrCreateClientData(node); };
|
||||
|
||||
DomainHandler& domainHandler = nodeList->getDomainHandler();
|
||||
const QJsonObject& settingsObject = domainHandler.getSettingsObject();
|
||||
// parse out any AudioMixer settings
|
||||
{
|
||||
DomainHandler& domainHandler = nodeList->getDomainHandler();
|
||||
const QJsonObject& settingsObject = domainHandler.getSettingsObject();
|
||||
parseSettingsObject(settingsObject);
|
||||
}
|
||||
|
||||
// check the settings object to see if we have anything we can parse out
|
||||
parseSettingsObject(settingsObject);
|
||||
// manageLoad state
|
||||
auto frameTimestamp = p_high_resolution_clock::time_point::min();
|
||||
unsigned int framesSinceManagement = std::numeric_limits<int>::max();
|
||||
|
||||
// queue up a connection to start broadcasting mixes now that we're ready to go
|
||||
QMetaObject::invokeMethod(this, "broadcastMixes", Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
void AudioMixer::broadcastMixes() {
|
||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
||||
|
||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
||||
|
||||
const float RATIO_BACK_OFF = 0.02f;
|
||||
|
||||
auto nodeList = DependencyManager::get<NodeList>();
|
||||
|
||||
auto nextFrameTimestamp = p_high_resolution_clock::now();
|
||||
auto timeToSleep = std::chrono::microseconds(0);
|
||||
|
||||
int currentFrame = 1;
|
||||
int numFramesPerSecond = (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
|
||||
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
|
||||
// mix state
|
||||
unsigned int frame = 1;
|
||||
|
||||
while (!_isFinished) {
|
||||
// manage mixer load
|
||||
{
|
||||
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) +
|
||||
// ratio of frame spent sleeping / total frame time
|
||||
((CURRENT_FRAME_RATIO * timeToSleep.count()) / (float) AudioConstants::NETWORK_FRAME_USECS);
|
||||
|
||||
bool hasRatioChanged = false;
|
||||
|
||||
if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
|
||||
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
|
||||
qDebug() << "Mixer is struggling";
|
||||
// change our min required loudness to reduce some load
|
||||
_performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));
|
||||
hasRatioChanged = true;
|
||||
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
|
||||
qDebug() << "Mixer is recovering";
|
||||
// back off the required loudness
|
||||
_performanceThrottlingRatio = std::max(0.0f, _performanceThrottlingRatio - RATIO_BACK_OFF);
|
||||
hasRatioChanged = true;
|
||||
}
|
||||
|
||||
if (hasRatioChanged) {
|
||||
// set out min audability threshold from the new ratio
|
||||
_minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
|
||||
framesSinceCutoffEvent = 0;
|
||||
|
||||
qDebug() << "Sleeping" << _trailingSleepRatio << "of frame";
|
||||
qDebug() << "Cutoff is" << _performanceThrottlingRatio;
|
||||
qDebug() << "Minimum audibility to be mixed is" << _minAudibilityThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasRatioChanged) {
|
||||
++framesSinceCutoffEvent;
|
||||
}
|
||||
auto timer = _sleepTiming.timer();
|
||||
manageLoad(frameTimestamp, framesSinceManagement);
|
||||
}
|
||||
|
||||
// mix
|
||||
nodeList->eachNode([&](const SharedNodePointer& node) {
|
||||
if (node->getLinkedData()) {
|
||||
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
|
||||
auto timer = _frameTiming.timer();
|
||||
|
||||
// this function will attempt to pop a frame from each audio stream.
|
||||
// a pointer to the popped data is stored as a member in InboundAudioStream.
|
||||
// That's how the popped audio data will be read for mixing (but only if the pop was successful)
|
||||
_sumStreams += nodeData->checkBuffersBeforeFrameSend();
|
||||
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
|
||||
// prepare frames; pop off any new audio from their streams
|
||||
{
|
||||
auto timer = _prepareTiming.timer();
|
||||
std::for_each(cbegin, cend, [&](const SharedNodePointer& node) {
|
||||
_stats.sumStreams += prepareFrame(node, frame);
|
||||
});
|
||||
}
|
||||
|
||||
// if the stream should be muted, send mute packet
|
||||
if (nodeData->getAvatarAudioStream()
|
||||
&& (shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())
|
||||
|| nodeData->shouldMuteClient())) {
|
||||
auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
|
||||
nodeList->sendPacket(std::move(mutePacket), *node);
|
||||
|
||||
// probably now we just reset the flag, once should do it (?)
|
||||
nodeData->setShouldMuteClient(false);
|
||||
}
|
||||
|
||||
if (node->getType() == NodeType::Agent && node->getActiveSocket()
|
||||
&& nodeData->getAvatarAudioStream()) {
|
||||
|
||||
bool mixHasAudio = prepareMixForListeningNode(node.data());
|
||||
|
||||
std::unique_ptr<NLPacket> mixPacket;
|
||||
|
||||
if (mixHasAudio || nodeData->shouldFlushEncoder()) {
|
||||
|
||||
int mixPacketBytes = sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE
|
||||
+ AudioConstants::NETWORK_FRAME_BYTES_STEREO;
|
||||
mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);
|
||||
|
||||
// pack sequence number
|
||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||
mixPacket->writePrimitive(sequence);
|
||||
|
||||
// write the codec
|
||||
QString codecInPacket = nodeData->getCodecName();
|
||||
mixPacket->writeString(codecInPacket);
|
||||
|
||||
QByteArray encodedBuffer;
|
||||
if (mixHasAudio) {
|
||||
QByteArray decodedBuffer(reinterpret_cast<char*>(_clampedSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||
nodeData->encode(decodedBuffer, encodedBuffer);
|
||||
} else {
|
||||
// time to flush, which resets the shouldFlush until next time we encode something
|
||||
nodeData->encodeFrameOfZeros(encodedBuffer);
|
||||
}
|
||||
// pack mixed audio samples
|
||||
mixPacket->write(encodedBuffer.constData(), encodedBuffer.size());
|
||||
|
||||
} else {
|
||||
int silentPacketBytes = sizeof(quint16) + sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE;
|
||||
mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);
|
||||
|
||||
// pack sequence number
|
||||
quint16 sequence = nodeData->getOutgoingSequenceNumber();
|
||||
mixPacket->writePrimitive(sequence);
|
||||
|
||||
// write the codec
|
||||
QString codecInPacket = nodeData->getCodecName();
|
||||
mixPacket->writeString(codecInPacket);
|
||||
|
||||
// pack number of silent audio samples
|
||||
quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
|
||||
mixPacket->writePrimitive(numSilentSamples);
|
||||
}
|
||||
|
||||
// Send audio environment
|
||||
sendAudioEnvironmentPacket(node);
|
||||
|
||||
// send mixed audio packet
|
||||
nodeList->sendPacket(std::move(mixPacket), *node);
|
||||
nodeData->incrementOutgoingMixedAudioSequenceNumber();
|
||||
|
||||
// send an audio stream stats packet to the client approximately every second
|
||||
++currentFrame;
|
||||
currentFrame %= numFramesPerSecond;
|
||||
|
||||
if (nodeData->shouldSendStats(currentFrame)) {
|
||||
nodeData->sendAudioStreamStatsPackets(node);
|
||||
}
|
||||
|
||||
++_sumListeners;
|
||||
}
|
||||
// mix across slave threads
|
||||
{
|
||||
auto timer = _mixTiming.timer();
|
||||
_slavePool.mix(cbegin, cend, frame);
|
||||
}
|
||||
});
|
||||
|
||||
// gather stats
|
||||
_slavePool.each([&](AudioMixerSlave& slave) {
|
||||
_stats.accumulate(slave.stats);
|
||||
slave.stats.reset();
|
||||
});
|
||||
|
||||
++frame;
|
||||
++_numStatFrames;
|
||||
|
||||
// play nice with qt event-looping
|
||||
{
|
||||
// since we're a while loop we need to help qt's event processing
|
||||
auto timer = _eventsTiming.timer();
|
||||
|
||||
// since we're a while loop we need to yield to qt's event processing
|
||||
QCoreApplication::processEvents();
|
||||
|
||||
if (_isFinished) {
|
||||
// alert qt that this is finished
|
||||
// alert qt eventing that this is finished
|
||||
QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sleep until the next frame, if necessary
|
||||
{
|
||||
nextFrameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);
|
||||
void AudioMixer::manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceCutoffEvent) {
|
||||
auto timeToSleep = std::chrono::microseconds(0);
|
||||
|
||||
auto now = p_high_resolution_clock::now();
|
||||
timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(nextFrameTimestamp - now);
|
||||
// sleep until the next frame, if necessary
|
||||
{
|
||||
// advance the next frame
|
||||
frameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);
|
||||
auto now = p_high_resolution_clock::now();
|
||||
|
||||
if (timeToSleep.count() < 0) {
|
||||
nextFrameTimestamp = now;
|
||||
timeToSleep = std::chrono::microseconds(0);
|
||||
// calculate sleep
|
||||
if (frameTimestamp < now) {
|
||||
frameTimestamp = now;
|
||||
} else {
|
||||
timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(frameTimestamp - now);
|
||||
std::this_thread::sleep_for(timeToSleep);
|
||||
}
|
||||
}
|
||||
|
||||
// manage mixer load
|
||||
{
|
||||
const int TRAILING_AVERAGE_FRAMES = 100;
|
||||
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
|
||||
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
|
||||
|
||||
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
|
||||
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
|
||||
|
||||
const float RATIO_BACK_OFF = 0.02f;
|
||||
|
||||
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) +
|
||||
// ratio of frame spent sleeping / total frame time
|
||||
((CURRENT_FRAME_RATIO * timeToSleep.count()) / (float) AudioConstants::NETWORK_FRAME_USECS);
|
||||
|
||||
bool hasRatioChanged = false;
|
||||
|
||||
if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
|
||||
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
|
||||
qDebug() << "Mixer is struggling";
|
||||
// change our min required loudness to reduce some load
|
||||
_performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));
|
||||
hasRatioChanged = true;
|
||||
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
|
||||
qDebug() << "Mixer is recovering";
|
||||
// back off the required loudness
|
||||
_performanceThrottlingRatio = std::max(0.0f, _performanceThrottlingRatio - RATIO_BACK_OFF);
|
||||
hasRatioChanged = true;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(timeToSleep);
|
||||
if (hasRatioChanged) {
|
||||
// set out min audability threshold from the new ratio
|
||||
_minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
|
||||
framesSinceCutoffEvent = 0;
|
||||
|
||||
qDebug() << "Sleeping" << _trailingSleepRatio << "of frame";
|
||||
qDebug() << "Cutoff is" << _performanceThrottlingRatio;
|
||||
qDebug() << "Minimum audibility to be mixed is" << _minAudibilityThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasRatioChanged) {
|
||||
++framesSinceCutoffEvent;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int AudioMixer::prepareFrame(const SharedNodePointer& node, unsigned int frame) {
|
||||
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
|
||||
if (data == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return data->checkBuffersBeforeFrameSend();
|
||||
}
|
||||
|
||||
void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
||||
if (settingsObject.contains(AUDIO_THREADING_GROUP_KEY)) {
|
||||
QJsonObject audioThreadingGroupObject = settingsObject[AUDIO_THREADING_GROUP_KEY].toObject();
|
||||
const QString AUTO_THREADS = "auto_threads";
|
||||
bool autoThreads = audioThreadingGroupObject[AUTO_THREADS].toBool();
|
||||
if (!autoThreads) {
|
||||
bool ok;
|
||||
const QString NUM_THREADS = "num_threads";
|
||||
int numThreads = audioThreadingGroupObject[NUM_THREADS].toString().toInt(&ok);
|
||||
if (ok) {
|
||||
_slavePool.setNumThreads(numThreads);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (settingsObject.contains(AUDIO_BUFFER_GROUP_KEY)) {
|
||||
QJsonObject audioBufferGroupObject = settingsObject[AUDIO_BUFFER_GROUP_KEY].toObject();
|
||||
|
||||
|
@ -1051,14 +614,6 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
}
|
||||
}
|
||||
|
||||
const QString FILTER_KEY = "enable_filter";
|
||||
if (audioEnvGroupObject[FILTER_KEY].isBool()) {
|
||||
_enableFilter = audioEnvGroupObject[FILTER_KEY].toBool();
|
||||
}
|
||||
if (_enableFilter) {
|
||||
qDebug() << "Filter enabled";
|
||||
}
|
||||
|
||||
const QString AUDIO_ZONES = "zones";
|
||||
if (audioEnvGroupObject[AUDIO_ZONES].isObject()) {
|
||||
const QJsonObject& zones = audioEnvGroupObject[AUDIO_ZONES].toObject();
|
||||
|
@ -1116,7 +671,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
coefficientObject.contains(LISTENER) &&
|
||||
coefficientObject.contains(COEFFICIENT)) {
|
||||
|
||||
ZonesSettings settings;
|
||||
ZoneSettings settings;
|
||||
|
||||
bool ok;
|
||||
settings.source = coefficientObject.value(SOURCE).toString();
|
||||
|
@ -1126,7 +681,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
if (ok && settings.coefficient >= 0.0f && settings.coefficient <= 1.0f &&
|
||||
_audioZones.contains(settings.source) && _audioZones.contains(settings.listener)) {
|
||||
|
||||
_zonesSettings.push_back(settings);
|
||||
_zoneSettings.push_back(settings);
|
||||
qDebug() << "Added Coefficient:" << settings.source << settings.listener << settings.coefficient;
|
||||
}
|
||||
}
|
||||
|
@ -1159,6 +714,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
settings.wetLevel = wetLevel;
|
||||
|
||||
_zoneReverbSettings.push_back(settings);
|
||||
|
||||
qDebug() << "Added Reverb:" << zone << reverbTime << wetLevel;
|
||||
}
|
||||
}
|
||||
|
@ -1166,3 +722,28 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
AudioMixer::Timer::Timing::Timing(uint64_t& sum) : _sum(sum) {
|
||||
_timing = p_high_resolution_clock::now();
|
||||
}
|
||||
|
||||
AudioMixer::Timer::Timing::~Timing() {
|
||||
_sum += std::chrono::duration_cast<std::chrono::microseconds>(p_high_resolution_clock::now() - _timing).count();
|
||||
}
|
||||
|
||||
void AudioMixer::Timer::get(uint64_t& timing, uint64_t& trailing) {
|
||||
// update history
|
||||
_index = (_index + 1) % TIMER_TRAILING_SECONDS;
|
||||
uint64_t oldTiming = _history[_index];
|
||||
_history[_index] = _sum;
|
||||
|
||||
// update trailing
|
||||
_trailing -= oldTiming;
|
||||
_trailing += _sum;
|
||||
|
||||
timing = _sum;
|
||||
trailing = _trailing / TIMER_TRAILING_SECONDS;
|
||||
|
||||
// reset _sum;
|
||||
_sum = 0;
|
||||
}
|
||||
|
|
|
@ -18,31 +18,45 @@
|
|||
#include <ThreadedAssignment.h>
|
||||
#include <UUIDHasher.h>
|
||||
|
||||
#include "AudioMixerStats.h"
|
||||
#include "AudioMixerSlavePool.h"
|
||||
|
||||
class PositionalAudioStream;
|
||||
class AvatarAudioStream;
|
||||
class AudioHRTF;
|
||||
class AudioMixerClientData;
|
||||
|
||||
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
|
||||
|
||||
const int READ_DATAGRAMS_STATS_WINDOW_SECONDS = 30;
|
||||
|
||||
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
|
||||
class AudioMixer : public ThreadedAssignment {
|
||||
Q_OBJECT
|
||||
public:
|
||||
AudioMixer(ReceivedMessage& message);
|
||||
|
||||
public slots:
|
||||
/// threaded run of assignment
|
||||
void run() override;
|
||||
|
||||
void sendStatsPacket() override;
|
||||
struct ZoneSettings {
|
||||
QString source;
|
||||
QString listener;
|
||||
float coefficient;
|
||||
};
|
||||
struct ReverbSettings {
|
||||
QString zone;
|
||||
float reverbTime;
|
||||
float wetLevel;
|
||||
};
|
||||
|
||||
static int getStaticJitterFrames() { return _numStaticJitterFrames; }
|
||||
static bool shouldMute(float quietestFrame) { return quietestFrame > _noiseMutingThreshold; }
|
||||
static float getAttenuationPerDoublingInDistance() { return _attenuationPerDoublingInDistance; }
|
||||
static float getMinimumAudibilityThreshold() { return _performanceThrottlingRatio > 0.0f ? _minAudibilityThreshold : 0.0f; }
|
||||
static const QHash<QString, AABox>& getAudioZones() { return _audioZones; }
|
||||
static const QVector<ZoneSettings>& getZoneSettings() { return _zoneSettings; }
|
||||
static const QVector<ReverbSettings>& getReverbSettings() { return _zoneReverbSettings; }
|
||||
|
||||
public slots:
|
||||
void run() override;
|
||||
void sendStatsPacket() override;
|
||||
|
||||
private slots:
|
||||
void broadcastMixes();
|
||||
// packet handlers
|
||||
void handleNodeAudioPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
void handleMuteEnvironmentPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
void handleNegotiateAudioFormat(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
|
||||
|
@ -52,74 +66,66 @@ private slots:
|
|||
void handleKillAvatarPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
void handleNodeMuteRequestPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
|
||||
|
||||
void start();
|
||||
void removeHRTFsForFinishedInjector(const QUuid& streamID);
|
||||
|
||||
private:
|
||||
// mixing helpers
|
||||
// check and maybe throttle mixer load by changing audibility threshold
|
||||
void manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceManagement);
|
||||
// pop a frame from any streams on the node
|
||||
// returns the number of available streams
|
||||
int prepareFrame(const SharedNodePointer& node, unsigned int frame);
|
||||
|
||||
AudioMixerClientData* getOrCreateClientData(Node* node);
|
||||
void domainSettingsRequestComplete();
|
||||
|
||||
/// adds one stream to the mix for a listening node
|
||||
void addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
|
||||
const PositionalAudioStream& streamToAdd,
|
||||
const QUuid& sourceNodeID,
|
||||
const AvatarAudioStream& listeningNodeStream);
|
||||
|
||||
float gainForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
|
||||
const glm::vec3& relativePosition, bool isEcho);
|
||||
float azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
|
||||
const glm::vec3& relativePosition);
|
||||
|
||||
/// prepares and sends a mix to one Node
|
||||
bool prepareMixForListeningNode(Node* node);
|
||||
|
||||
/// Send Audio Environment packet for a single node
|
||||
void sendAudioEnvironmentPacket(SharedNodePointer node);
|
||||
|
||||
void perSecondActions();
|
||||
|
||||
QString percentageForMixStats(int counter);
|
||||
|
||||
bool shouldMute(float quietestFrame);
|
||||
|
||||
void parseSettingsObject(const QJsonObject& settingsObject);
|
||||
|
||||
float _trailingSleepRatio;
|
||||
float _minAudibilityThreshold;
|
||||
float _performanceThrottlingRatio;
|
||||
float _attenuationPerDoublingInDistance;
|
||||
float _noiseMutingThreshold;
|
||||
int _numStatFrames { 0 };
|
||||
int _sumStreams { 0 };
|
||||
int _sumListeners { 0 };
|
||||
int _hrtfRenders { 0 };
|
||||
int _hrtfSilentRenders { 0 };
|
||||
int _hrtfStruggleRenders { 0 };
|
||||
int _manualStereoMixes { 0 };
|
||||
int _manualEchoMixes { 0 };
|
||||
int _totalMixes { 0 };
|
||||
AudioMixerStats _stats;
|
||||
|
||||
QString _codecPreferenceOrder;
|
||||
|
||||
float _mixedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
int16_t _clampedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
AudioMixerSlavePool _slavePool;
|
||||
|
||||
QHash<QString, AABox> _audioZones;
|
||||
struct ZonesSettings {
|
||||
QString source;
|
||||
QString listener;
|
||||
float coefficient;
|
||||
class Timer {
|
||||
public:
|
||||
class Timing{
|
||||
public:
|
||||
Timing(uint64_t& sum);
|
||||
~Timing();
|
||||
private:
|
||||
p_high_resolution_clock::time_point _timing;
|
||||
uint64_t& _sum;
|
||||
};
|
||||
|
||||
Timing timer() { return Timing(_sum); }
|
||||
void get(uint64_t& timing, uint64_t& trailing);
|
||||
private:
|
||||
static const int TIMER_TRAILING_SECONDS = 10;
|
||||
|
||||
uint64_t _sum { 0 };
|
||||
uint64_t _trailing { 0 };
|
||||
uint64_t _history[TIMER_TRAILING_SECONDS] {};
|
||||
int _index { 0 };
|
||||
};
|
||||
QVector<ZonesSettings> _zonesSettings;
|
||||
struct ReverbSettings {
|
||||
QString zone;
|
||||
float reverbTime;
|
||||
float wetLevel;
|
||||
};
|
||||
QVector<ReverbSettings> _zoneReverbSettings;
|
||||
Timer _sleepTiming;
|
||||
Timer _frameTiming;
|
||||
Timer _prepareTiming;
|
||||
Timer _mixTiming;
|
||||
Timer _eventsTiming;
|
||||
|
||||
static int _numStaticJitterFrames; // -1 denotes dynamic jitter buffering
|
||||
|
||||
static bool _enableFilter;
|
||||
static float _noiseMutingThreshold;
|
||||
static float _attenuationPerDoublingInDistance;
|
||||
static float _trailingSleepRatio;
|
||||
static float _performanceThrottlingRatio;
|
||||
static float _minAudibilityThreshold;
|
||||
static QHash<QString, AABox> _audioZones;
|
||||
static QVector<ZoneSettings> _zoneSettings;
|
||||
static QVector<ReverbSettings> _zoneReverbSettings;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixer_h
|
||||
|
|
519
assignment-client/src/audio/AudioMixerSlave.cpp
Normal file
519
assignment-client/src/audio/AudioMixerSlave.cpp
Normal file
|
@ -0,0 +1,519 @@
|
|||
//
|
||||
// AudioMixerSlave.cpp
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtx/norm.hpp>
|
||||
#include <glm/gtx/vector_angle.hpp>
|
||||
|
||||
#include <LogHandler.h>
|
||||
#include <NetworkAccessManager.h>
|
||||
#include <NodeList.h>
|
||||
#include <Node.h>
|
||||
#include <OctreeConstants.h>
|
||||
#include <plugins/PluginManager.h>
|
||||
#include <plugins/CodecPlugin.h>
|
||||
#include <udt/PacketHeaders.h>
|
||||
#include <SharedUtil.h>
|
||||
#include <StDev.h>
|
||||
#include <UUID.h>
|
||||
|
||||
#include "AudioRingBuffer.h"
|
||||
#include "AudioMixer.h"
|
||||
#include "AudioMixerClientData.h"
|
||||
#include "AvatarAudioStream.h"
|
||||
#include "InjectedAudioStream.h"
|
||||
|
||||
#include "AudioMixerSlave.h"
|
||||
|
||||
std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 sequence, QString codec) {
|
||||
auto audioPacket = NLPacket::create(type, size);
|
||||
audioPacket->writePrimitive(sequence);
|
||||
audioPacket->writeString(codec);
|
||||
return audioPacket;
|
||||
}
|
||||
|
||||
void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer) {
|
||||
static const int MIX_PACKET_SIZE =
|
||||
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
|
||||
quint16 sequence = data.getOutgoingSequenceNumber();
|
||||
QString codec = data.getCodecName();
|
||||
auto mixPacket = createAudioPacket(PacketType::MixedAudio, MIX_PACKET_SIZE, sequence, codec);
|
||||
|
||||
// pack samples
|
||||
mixPacket->write(buffer.constData(), buffer.size());
|
||||
|
||||
// send packet
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
|
||||
data.incrementOutgoingMixedAudioSequenceNumber();
|
||||
}
|
||||
|
||||
void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
|
||||
static const int SILENT_PACKET_SIZE =
|
||||
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + sizeof(quint16);
|
||||
quint16 sequence = data.getOutgoingSequenceNumber();
|
||||
QString codec = data.getCodecName();
|
||||
auto mixPacket = createAudioPacket(PacketType::SilentAudioFrame, SILENT_PACKET_SIZE, sequence, codec);
|
||||
|
||||
// pack number of samples
|
||||
mixPacket->writePrimitive(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
|
||||
|
||||
// send packet
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
|
||||
data.incrementOutgoingMixedAudioSequenceNumber();
|
||||
}
|
||||
|
||||
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
|
||||
bool hasReverb = false;
|
||||
float reverbTime, wetLevel;
|
||||
|
||||
auto& reverbSettings = AudioMixer::getReverbSettings();
|
||||
auto& audioZones = AudioMixer::getAudioZones();
|
||||
|
||||
AvatarAudioStream* stream = data.getAvatarAudioStream();
|
||||
glm::vec3 streamPosition = stream->getPosition();
|
||||
|
||||
// find reverb properties
|
||||
for (int i = 0; i < reverbSettings.size(); ++i) {
|
||||
AABox box = audioZones[reverbSettings[i].zone];
|
||||
if (box.contains(streamPosition)) {
|
||||
hasReverb = true;
|
||||
reverbTime = reverbSettings[i].reverbTime;
|
||||
wetLevel = reverbSettings[i].wetLevel;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// check if data changed
|
||||
bool dataChanged = (stream->hasReverb() != hasReverb) ||
|
||||
(stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel));
|
||||
if (dataChanged) {
|
||||
// update stream
|
||||
if (hasReverb) {
|
||||
stream->setReverb(reverbTime, wetLevel);
|
||||
} else {
|
||||
stream->clearReverb();
|
||||
}
|
||||
}
|
||||
|
||||
// send packet at change or every so often
|
||||
float CHANCE_OF_SEND = 0.01f;
|
||||
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
|
||||
|
||||
if (sendData) {
|
||||
// size the packet
|
||||
unsigned char bitset = 0;
|
||||
int packetSize = sizeof(bitset);
|
||||
if (hasReverb) {
|
||||
packetSize += sizeof(reverbTime) + sizeof(wetLevel);
|
||||
}
|
||||
|
||||
// write the packet
|
||||
auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
|
||||
if (hasReverb) {
|
||||
setAtBit(bitset, HAS_REVERB_BIT);
|
||||
}
|
||||
envPacket->writePrimitive(bitset);
|
||||
if (hasReverb) {
|
||||
envPacket->writePrimitive(reverbTime);
|
||||
envPacket->writePrimitive(wetLevel);
|
||||
}
|
||||
|
||||
// send the packet
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerSlave::configure(ConstIter begin, ConstIter end, unsigned int frame) {
|
||||
_begin = begin;
|
||||
_end = end;
|
||||
_frame = frame;
|
||||
}
|
||||
|
||||
void AudioMixerSlave::mix(const SharedNodePointer& node) {
|
||||
// check that the node is valid
|
||||
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
|
||||
if (data == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto avatarStream = data->getAvatarAudioStream();
|
||||
if (avatarStream == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// send mute packet, if necessary
|
||||
if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) {
|
||||
auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
|
||||
DependencyManager::get<NodeList>()->sendPacket(std::move(mutePacket), *node);
|
||||
|
||||
// probably now we just reset the flag, once should do it (?)
|
||||
data->setShouldMuteClient(false);
|
||||
}
|
||||
|
||||
// send audio packets, if necessary
|
||||
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
|
||||
++stats.sumListeners;
|
||||
|
||||
// mix the audio
|
||||
bool mixHasAudio = prepareMix(node);
|
||||
|
||||
// send audio packet
|
||||
if (mixHasAudio || data->shouldFlushEncoder()) {
|
||||
// encode the audio
|
||||
QByteArray encodedBuffer;
|
||||
if (mixHasAudio) {
|
||||
QByteArray decodedBuffer(reinterpret_cast<char*>(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
|
||||
data->encode(decodedBuffer, encodedBuffer);
|
||||
} else {
|
||||
// time to flush, which resets the shouldFlush until next time we encode something
|
||||
data->encodeFrameOfZeros(encodedBuffer);
|
||||
}
|
||||
|
||||
sendMixPacket(node, *data, encodedBuffer);
|
||||
} else {
|
||||
sendSilentPacket(node, *data);
|
||||
}
|
||||
|
||||
// send environment packet
|
||||
sendEnvironmentPacket(node, *data);
|
||||
|
||||
// send stats packet (about every second)
|
||||
static const unsigned int NUM_FRAMES_PER_SEC = (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
|
||||
if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) {
|
||||
data->sendAudioStreamStatsPackets(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool AudioMixerSlave::prepareMix(const SharedNodePointer& node) {
|
||||
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
|
||||
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
|
||||
|
||||
// zero out the client mix for this node
|
||||
memset(_mixSamples, 0, sizeof(_mixSamples));
|
||||
|
||||
// loop through all other nodes that have sufficient audio to mix
|
||||
std::for_each(_begin, _end, [&](const SharedNodePointer& otherNode){
|
||||
// make sure that we have audio data for this other node
|
||||
// and that it isn't being ignored by our listening node
|
||||
// and that it isn't ignoring our listening node
|
||||
AudioMixerClientData* otherData = static_cast<AudioMixerClientData*>(otherNode->getLinkedData());
|
||||
if (otherData
|
||||
&& !node->isIgnoringNodeWithID(otherNode->getUUID()) && !otherNode->isIgnoringNodeWithID(node->getUUID())) {
|
||||
|
||||
// check if distance is inside ignore radius
|
||||
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
|
||||
float ignoreRadius = glm::min(node->getIgnoreRadius(), otherNode->getIgnoreRadius());
|
||||
if (glm::distance(nodeData->getPosition(), otherData->getPosition()) < ignoreRadius) {
|
||||
// skip, distance is inside ignore radius
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
|
||||
auto streamsCopy = otherData->getAudioStreams();
|
||||
for (auto& streamPair : streamsCopy) {
|
||||
auto otherNodeStream = streamPair.second;
|
||||
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
|
||||
addStreamToMix(*nodeData, otherNode->getUUID(), *nodeAudioStream, *otherNodeStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// use the per listener AudioLimiter to render the mixed data...
|
||||
nodeData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// check for silent audio after the peak limiter has converted the samples
|
||||
bool hasAudio = false;
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
if (_bufferSamples[i] != 0) {
|
||||
hasAudio = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return hasAudio;
|
||||
}
|
||||
|
||||
void AudioMixerSlave::addStreamToMix(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID,
|
||||
const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
|
||||
// to reduce artifacts we calculate the gain and azimuth for every source for this listener
|
||||
// even if we are not going to end up mixing in this source
|
||||
|
||||
++stats.totalMixes;
|
||||
|
||||
// this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct
|
||||
|
||||
// check if this is a server echo of a source back to itself
|
||||
bool isEcho = (&streamToAdd == &listeningNodeStream);
|
||||
|
||||
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
|
||||
|
||||
// figure out the distance between source and listener
|
||||
float distance = glm::max(glm::length(relativePosition), EPSILON);
|
||||
|
||||
// figure out the gain for this source at the listener
|
||||
float gain = gainForSource(listeningNodeStream, streamToAdd, relativePosition, isEcho);
|
||||
|
||||
// figure out the azimuth to this source at the listener
|
||||
float azimuth = isEcho ? 0.0f : azimuthForSource(listeningNodeStream, listeningNodeStream, relativePosition);
|
||||
|
||||
float repeatedFrameFadeFactor = 1.0f;
|
||||
|
||||
static const int HRTF_DATASET_INDEX = 1;
|
||||
|
||||
if (!streamToAdd.lastPopSucceeded()) {
|
||||
bool forceSilentBlock = true;
|
||||
|
||||
if (!streamToAdd.getLastPopOutput().isNull()) {
|
||||
bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);
|
||||
|
||||
// in an injector, just go silent - the injector has likely ended
|
||||
// in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
|
||||
|
||||
// we'll repeat the last block until it has a block to mix
|
||||
// and we'll gradually fade that repeated block into silence.
|
||||
|
||||
// calculate its fade factor, which depends on how many times it's already been repeated.
|
||||
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
|
||||
if (!isInjector && repeatedFrameFadeFactor > 0.0f) {
|
||||
// apply the repeatedFrameFadeFactor to the gain
|
||||
gain *= repeatedFrameFadeFactor;
|
||||
|
||||
forceSilentBlock = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (forceSilentBlock) {
|
||||
// we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
|
||||
// in this case we will call renderSilent with a forced silent block
|
||||
// this ensures the correct tail from the previously mixed block and the correct spatialization of first block
|
||||
// of any upcoming audio
|
||||
|
||||
if (!streamToAdd.isStereo() && !isEcho) {
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
// this is not done for stereo streams since they do not go through the HRTF
|
||||
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
|
||||
hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfSilentRenders;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// grab the stream from the ring buffer
|
||||
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
|
||||
|
||||
if (streamToAdd.isStereo() || isEcho) {
|
||||
// this is a stereo source or server echo so we do not pass it through the HRTF
|
||||
// simply apply our calculated gain to each sample
|
||||
if (streamToAdd.isStereo()) {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
|
||||
_mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
}
|
||||
|
||||
++stats.manualStereoMixes;
|
||||
} else {
|
||||
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
|
||||
auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
|
||||
_mixSamples[i] += monoSample;
|
||||
_mixSamples[i + 1] += monoSample;
|
||||
}
|
||||
|
||||
++stats.manualEchoMixes;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// get the existing listener-source HRTF object, or create a new one
|
||||
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
|
||||
|
||||
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
// if the frame we're about to mix is silent, simply call render silent and move on
|
||||
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
|
||||
// silent frame from source
|
||||
|
||||
// we still need to call renderSilent via the HRTF for mono source
|
||||
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfSilentRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
float audibilityThreshold = AudioMixer::getMinimumAudibilityThreshold();
|
||||
if (audibilityThreshold > 0.0f &&
|
||||
streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= audibilityThreshold) {
|
||||
// the mixer is struggling so we're going to drop off some streams
|
||||
|
||||
// we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
|
||||
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
|
||||
++stats.hrtfStruggleRenders;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
++stats.hrtfRenders;
|
||||
|
||||
// mono stream, call the HRTF with our block and calculated azimuth and gain
|
||||
hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
|
||||
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
|
||||
}
|
||||
|
||||
const int IEEE754_MANT_BITS = 23;
|
||||
const int IEEE754_EXPN_BIAS = 127;
|
||||
|
||||
//
|
||||
// for x > 0.0f, returns log2(x)
|
||||
// for x <= 0.0f, returns large negative value
|
||||
//
|
||||
// abs |error| < 8e-3, smooth (exact for x=2^N) for NPOLY=3
|
||||
// abs |error| < 2e-4, smooth (exact for x=2^N) for NPOLY=5
|
||||
// rel |error| < 0.4 from precision loss very close to 1.0f
|
||||
//
|
||||
static inline float fastlog2(float x) {
|
||||
|
||||
union { float f; int32_t i; } mant, bits = { x };
|
||||
|
||||
// split into mantissa and exponent
|
||||
mant.i = (bits.i & ((1 << IEEE754_MANT_BITS) - 1)) | (IEEE754_EXPN_BIAS << IEEE754_MANT_BITS);
|
||||
int32_t expn = (bits.i >> IEEE754_MANT_BITS) - IEEE754_EXPN_BIAS;
|
||||
|
||||
mant.f -= 1.0f;
|
||||
|
||||
// polynomial for log2(1+x) over x=[0,1]
|
||||
//x = (-0.346555386f * mant.f + 1.346555386f) * mant.f;
|
||||
x = (((-0.0821307180f * mant.f + 0.321188984f) * mant.f - 0.677784014f) * mant.f + 1.43872575f) * mant.f;
|
||||
|
||||
return x + expn;
|
||||
}
|
||||
|
||||
//
|
||||
// for -126 <= x < 128, returns exp2(x)
|
||||
//
|
||||
// rel |error| < 3e-3, smooth (exact for x=N) for NPOLY=3
|
||||
// rel |error| < 9e-6, smooth (exact for x=N) for NPOLY=5
|
||||
//
|
||||
static inline float fastexp2(float x) {
|
||||
|
||||
union { float f; int32_t i; } xi;
|
||||
|
||||
// bias such that x > 0
|
||||
x += IEEE754_EXPN_BIAS;
|
||||
//x = MAX(x, 1.0f);
|
||||
//x = MIN(x, 254.9999f);
|
||||
|
||||
// split into integer and fraction
|
||||
xi.i = (int32_t)x;
|
||||
x -= xi.i;
|
||||
|
||||
// construct exp2(xi) as a float
|
||||
xi.i <<= IEEE754_MANT_BITS;
|
||||
|
||||
// polynomial for exp2(x) over x=[0,1]
|
||||
//x = (0.339766028f * x + 0.660233972f) * x + 1.0f;
|
||||
x = (((0.0135557472f * x + 0.0520323690f) * x + 0.241379763f) * x + 0.693032121f) * x + 1.0f;
|
||||
|
||||
return x * xi.f;
|
||||
}
|
||||
|
||||
float AudioMixerSlave::gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition, bool isEcho) {
|
||||
float gain = 1.0f;
|
||||
|
||||
float distanceBetween = glm::length(relativePosition);
|
||||
|
||||
if (distanceBetween < EPSILON) {
|
||||
distanceBetween = EPSILON;
|
||||
}
|
||||
|
||||
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
|
||||
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
|
||||
}
|
||||
|
||||
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
|
||||
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
|
||||
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
|
||||
|
||||
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
|
||||
glm::normalize(rotatedListenerPosition));
|
||||
|
||||
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
|
||||
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
|
||||
|
||||
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
|
||||
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
|
||||
|
||||
// multiply the current attenuation coefficient by the calculated off axis coefficient
|
||||
gain *= offAxisCoefficient;
|
||||
}
|
||||
|
||||
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
|
||||
auto& zoneSettings = AudioMixer::getZoneSettings();
|
||||
auto& audioZones = AudioMixer::getAudioZones();
|
||||
for (int i = 0; i < zoneSettings.length(); ++i) {
|
||||
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
|
||||
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
|
||||
attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
|
||||
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
|
||||
|
||||
// translate the zone setting to gain per log2(distance)
|
||||
float g = 1.0f - attenuationPerDoublingInDistance;
|
||||
g = (g < EPSILON) ? EPSILON : g;
|
||||
g = (g > 1.0f) ? 1.0f : g;
|
||||
|
||||
// calculate the distance coefficient using the distance to this node
|
||||
float distanceCoefficient = fastexp2(fastlog2(g) * fastlog2(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
|
||||
|
||||
// multiply the current attenuation coefficient by the distance coefficient
|
||||
gain *= distanceCoefficient;
|
||||
}
|
||||
|
||||
return gain;
|
||||
}
|
||||
|
||||
float AudioMixerSlave::azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
|
||||
const glm::vec3& relativePosition) {
|
||||
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
|
||||
|
||||
// Compute sample delay for the two ears to create phase panning
|
||||
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
|
||||
|
||||
// project the rotated source position vector onto the XZ plane
|
||||
rotatedSourcePosition.y = 0.0f;
|
||||
|
||||
static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;
|
||||
|
||||
if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
|
||||
// produce an oriented angle about the y-axis
|
||||
return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
|
||||
} else {
|
||||
// there is no distance between listener and source - return no azimuth
|
||||
return 0;
|
||||
}
|
||||
}
|
63
assignment-client/src/audio/AudioMixerSlave.h
Normal file
63
assignment-client/src/audio/AudioMixerSlave.h
Normal file
|
@ -0,0 +1,63 @@
|
|||
//
|
||||
// AudioMixerSlave.h
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioMixerSlave_h
|
||||
#define hifi_AudioMixerSlave_h
|
||||
|
||||
#include <AABox.h>
|
||||
#include <AudioHRTF.h>
|
||||
#include <AudioRingBuffer.h>
|
||||
#include <ThreadedAssignment.h>
|
||||
#include <UUIDHasher.h>
|
||||
#include <NodeList.h>
|
||||
|
||||
#include "AudioMixerStats.h"
|
||||
|
||||
class PositionalAudioStream;
|
||||
class AvatarAudioStream;
|
||||
class AudioHRTF;
|
||||
class AudioMixerClientData;
|
||||
|
||||
class AudioMixerSlave {
|
||||
public:
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
|
||||
void configure(ConstIter begin, ConstIter end, unsigned int frame);
|
||||
|
||||
// mix and broadcast non-ignored streams to the node
|
||||
// returns true if a mixed packet was sent to the node
|
||||
void mix(const SharedNodePointer& node);
|
||||
|
||||
AudioMixerStats stats;
|
||||
|
||||
private:
|
||||
// create mix, returns true if mix has audio
|
||||
bool prepareMix(const SharedNodePointer& node);
|
||||
// add a stream to the mix
|
||||
void addStreamToMix(AudioMixerClientData& listenerData, const QUuid& streamerID,
|
||||
const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer);
|
||||
|
||||
float gainForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer,
|
||||
const glm::vec3& relativePosition, bool isEcho);
|
||||
float azimuthForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer,
|
||||
const glm::vec3& relativePosition);
|
||||
|
||||
// mixing buffers
|
||||
float _mixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
int16_t _bufferSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
|
||||
|
||||
// frame state
|
||||
ConstIter _begin;
|
||||
ConstIter _end;
|
||||
unsigned int _frame { 0 };
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerSlave_h
|
187
assignment-client/src/audio/AudioMixerSlavePool.cpp
Normal file
187
assignment-client/src/audio/AudioMixerSlavePool.cpp
Normal file
|
@ -0,0 +1,187 @@
|
|||
//
|
||||
// AudioMixerSlavePool.cpp
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/16/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include <assert.h>
|
||||
#include <algorithm>
|
||||
|
||||
#include "AudioMixerSlavePool.h"
|
||||
|
||||
void AudioMixerSlaveThread::run() {
|
||||
while (true) {
|
||||
wait();
|
||||
|
||||
// iterate over all available nodes
|
||||
SharedNodePointer node;
|
||||
while (try_pop(node)) {
|
||||
mix(node);
|
||||
}
|
||||
|
||||
bool stopping = _stop;
|
||||
notify(stopping);
|
||||
if (stopping) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AudioMixerSlaveThread::wait() {
|
||||
{
|
||||
Lock lock(_pool._mutex);
|
||||
_pool._slaveCondition.wait(lock, [&] {
|
||||
assert(_pool._numStarted <= _pool._numThreads);
|
||||
return _pool._numStarted != _pool._numThreads;
|
||||
});
|
||||
++_pool._numStarted;
|
||||
}
|
||||
configure(_pool._begin, _pool._end, _pool._frame);
|
||||
}
|
||||
|
||||
void AudioMixerSlaveThread::notify(bool stopping) {
|
||||
{
|
||||
Lock lock(_pool._mutex);
|
||||
assert(_pool._numFinished < _pool._numThreads);
|
||||
++_pool._numFinished;
|
||||
if (stopping) {
|
||||
++_pool._numStopped;
|
||||
}
|
||||
}
|
||||
_pool._poolCondition.notify_one();
|
||||
}
|
||||
|
||||
bool AudioMixerSlaveThread::try_pop(SharedNodePointer& node) {
|
||||
return _pool._queue.try_pop(node);
|
||||
}
|
||||
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
static AudioMixerSlave slave;
|
||||
#endif
|
||||
|
||||
void AudioMixerSlavePool::mix(ConstIter begin, ConstIter end, unsigned int frame) {
|
||||
_begin = begin;
|
||||
_end = end;
|
||||
_frame = frame;
|
||||
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
slave.configure(_begin, _end, frame);
|
||||
std::for_each(begin, end, [&](const SharedNodePointer& node) {
|
||||
slave.mix(node);
|
||||
});
|
||||
#else
|
||||
// fill the queue
|
||||
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
|
||||
_queue.emplace(node);
|
||||
});
|
||||
|
||||
{
|
||||
Lock lock(_mutex);
|
||||
|
||||
// mix
|
||||
_numStarted = _numFinished = 0;
|
||||
_slaveCondition.notify_all();
|
||||
|
||||
// wait
|
||||
_poolCondition.wait(lock, [&] {
|
||||
assert(_numFinished <= _numThreads);
|
||||
return _numFinished == _numThreads;
|
||||
});
|
||||
|
||||
assert(_numStarted == _numThreads);
|
||||
}
|
||||
|
||||
assert(_queue.empty());
|
||||
#endif
|
||||
}
|
||||
|
||||
void AudioMixerSlavePool::each(std::function<void(AudioMixerSlave& slave)> functor) {
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
functor(slave);
|
||||
#else
|
||||
for (auto& slave : _slaves) {
|
||||
functor(*slave.get());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void AudioMixerSlavePool::setNumThreads(int numThreads) {
|
||||
// clamp to allowed size
|
||||
{
|
||||
int maxThreads = QThread::idealThreadCount();
|
||||
if (maxThreads == -1) {
|
||||
// idealThreadCount returns -1 if cores cannot be detected
|
||||
static const int MAX_THREADS_IF_UNKNOWN = 4;
|
||||
maxThreads = MAX_THREADS_IF_UNKNOWN;
|
||||
}
|
||||
|
||||
int clampedThreads = std::min(std::max(1, numThreads), maxThreads);
|
||||
if (clampedThreads != numThreads) {
|
||||
qWarning("%s: clamped to %d (was %d)", __FUNCTION__, clampedThreads, numThreads);
|
||||
numThreads = clampedThreads;
|
||||
}
|
||||
}
|
||||
|
||||
resize(numThreads);
|
||||
}
|
||||
|
||||
void AudioMixerSlavePool::resize(int numThreads) {
|
||||
assert(_numThreads == _slaves.size());
|
||||
|
||||
#ifdef AUDIO_SINGLE_THREADED
|
||||
qDebug("%s: running single threaded", __FUNCTION__, numThreads);
|
||||
#else
|
||||
qDebug("%s: set %d threads (was %d)", __FUNCTION__, numThreads, _numThreads);
|
||||
|
||||
Lock lock(_mutex);
|
||||
|
||||
if (numThreads > _numThreads) {
|
||||
// start new slaves
|
||||
for (int i = 0; i < numThreads - _numThreads; ++i) {
|
||||
auto slave = new AudioMixerSlaveThread(*this);
|
||||
slave->start();
|
||||
_slaves.emplace_back(slave);
|
||||
}
|
||||
} else if (numThreads < _numThreads) {
|
||||
auto extraBegin = _slaves.begin() + numThreads;
|
||||
|
||||
// mark slaves to stop...
|
||||
auto slave = extraBegin;
|
||||
while (slave != _slaves.end()) {
|
||||
(*slave)->_stop = true;
|
||||
++slave;
|
||||
}
|
||||
|
||||
// ...cycle them until they do stop...
|
||||
_numStopped = 0;
|
||||
while (_numStopped != (_numThreads - numThreads)) {
|
||||
_numStarted = _numFinished = _numStopped;
|
||||
_slaveCondition.notify_all();
|
||||
_poolCondition.wait(lock, [&] {
|
||||
assert(_numFinished <= _numThreads);
|
||||
return _numFinished == _numThreads;
|
||||
});
|
||||
}
|
||||
|
||||
// ...wait for threads to finish...
|
||||
slave = extraBegin;
|
||||
while (slave != _slaves.end()) {
|
||||
QThread* thread = reinterpret_cast<QThread*>(slave->get());
|
||||
static const int MAX_THREAD_WAIT_TIME = 10;
|
||||
thread->wait(MAX_THREAD_WAIT_TIME);
|
||||
++slave;
|
||||
}
|
||||
|
||||
// ...and erase them
|
||||
_slaves.erase(extraBegin, _slaves.end());
|
||||
}
|
||||
|
||||
_numThreads = _numStarted = _numFinished = numThreads;
|
||||
assert(_numThreads == _slaves.size());
|
||||
#endif
|
||||
}
|
97
assignment-client/src/audio/AudioMixerSlavePool.h
Normal file
97
assignment-client/src/audio/AudioMixerSlavePool.h
Normal file
|
@ -0,0 +1,97 @@
|
|||
//
|
||||
// AudioMixerSlavePool.h
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/16/2016.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioMixerSlavePool_h
|
||||
#define hifi_AudioMixerSlavePool_h
|
||||
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include <tbb/concurrent_queue.h>
|
||||
|
||||
#include <QThread>
|
||||
|
||||
#include "AudioMixerSlave.h"
|
||||
|
||||
class AudioMixerSlavePool;
|
||||
|
||||
class AudioMixerSlaveThread : public QThread, public AudioMixerSlave {
|
||||
Q_OBJECT
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
using Mutex = std::mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
|
||||
public:
|
||||
AudioMixerSlaveThread(AudioMixerSlavePool& pool) : _pool(pool) {}
|
||||
|
||||
void run() override final;
|
||||
|
||||
private:
|
||||
friend class AudioMixerSlavePool;
|
||||
|
||||
void wait();
|
||||
void notify(bool stopping);
|
||||
bool try_pop(SharedNodePointer& node);
|
||||
|
||||
AudioMixerSlavePool& _pool;
|
||||
bool _stop { false };
|
||||
};
|
||||
|
||||
// Slave pool for audio mixers
|
||||
// AudioMixerSlavePool is not thread-safe! It should be instantiated and used from a single thread.
|
||||
class AudioMixerSlavePool {
|
||||
using Queue = tbb::concurrent_queue<SharedNodePointer>;
|
||||
using Mutex = std::mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
using ConditionVariable = std::condition_variable;
|
||||
|
||||
public:
|
||||
using ConstIter = NodeList::const_iterator;
|
||||
|
||||
AudioMixerSlavePool(int numThreads = QThread::idealThreadCount()) { setNumThreads(numThreads); }
|
||||
~AudioMixerSlavePool() { resize(0); }
|
||||
|
||||
// mix on slave threads
|
||||
void mix(ConstIter begin, ConstIter end, unsigned int frame);
|
||||
|
||||
// iterate over all slaves
|
||||
void each(std::function<void(AudioMixerSlave& slave)> functor);
|
||||
|
||||
void setNumThreads(int numThreads);
|
||||
int numThreads() { return _numThreads; }
|
||||
|
||||
private:
|
||||
void resize(int numThreads);
|
||||
|
||||
std::vector<std::unique_ptr<AudioMixerSlaveThread>> _slaves;
|
||||
|
||||
friend void AudioMixerSlaveThread::wait();
|
||||
friend void AudioMixerSlaveThread::notify(bool stopping);
|
||||
friend bool AudioMixerSlaveThread::try_pop(SharedNodePointer& node);
|
||||
|
||||
// synchronization state
|
||||
Mutex _mutex;
|
||||
ConditionVariable _slaveCondition;
|
||||
ConditionVariable _poolCondition;
|
||||
int _numThreads { 0 };
|
||||
int _numStarted { 0 }; // guarded by _mutex
|
||||
int _numFinished { 0 }; // guarded by _mutex
|
||||
int _numStopped { 0 }; // guarded by _mutex
|
||||
|
||||
// frame state
|
||||
Queue _queue;
|
||||
unsigned int _frame { 0 };
|
||||
ConstIter _begin;
|
||||
ConstIter _end;
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerSlavePool_h
|
34
assignment-client/src/audio/AudioMixerStats.cpp
Normal file
34
assignment-client/src/audio/AudioMixerStats.cpp
Normal file
|
@ -0,0 +1,34 @@
|
|||
//
|
||||
// AudioMixerStats.cpp
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#include "AudioMixerStats.h"
|
||||
|
||||
void AudioMixerStats::reset() {
|
||||
sumStreams = 0;
|
||||
sumListeners = 0;
|
||||
totalMixes = 0;
|
||||
hrtfRenders = 0;
|
||||
hrtfSilentRenders = 0;
|
||||
hrtfStruggleRenders = 0;
|
||||
manualStereoMixes = 0;
|
||||
manualEchoMixes = 0;
|
||||
}
|
||||
|
||||
void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
|
||||
sumStreams += otherStats.sumStreams;
|
||||
sumListeners += otherStats.sumListeners;
|
||||
totalMixes += otherStats.totalMixes;
|
||||
hrtfRenders += otherStats.hrtfRenders;
|
||||
hrtfSilentRenders += otherStats.hrtfSilentRenders;
|
||||
hrtfStruggleRenders += otherStats.hrtfStruggleRenders;
|
||||
manualStereoMixes += otherStats.manualStereoMixes;
|
||||
manualEchoMixes += otherStats.manualEchoMixes;
|
||||
}
|
32
assignment-client/src/audio/AudioMixerStats.h
Normal file
32
assignment-client/src/audio/AudioMixerStats.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
//
|
||||
// AudioMixerStats.h
|
||||
// assignment-client/src/audio
|
||||
//
|
||||
// Created by Zach Pomerantz on 11/22/16.
|
||||
// Copyright 2016 High Fidelity, Inc.
|
||||
//
|
||||
// Distributed under the Apache License, Version 2.0.
|
||||
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
|
||||
//
|
||||
|
||||
#ifndef hifi_AudioMixerStats_h
|
||||
#define hifi_AudioMixerStats_h
|
||||
|
||||
struct AudioMixerStats {
|
||||
int sumStreams { 0 };
|
||||
int sumListeners { 0 };
|
||||
|
||||
int totalMixes { 0 };
|
||||
|
||||
int hrtfRenders { 0 };
|
||||
int hrtfSilentRenders { 0 };
|
||||
int hrtfStruggleRenders { 0 };
|
||||
|
||||
int manualStereoMixes { 0 };
|
||||
int manualEchoMixes { 0 };
|
||||
|
||||
void reset();
|
||||
void accumulate(const AudioMixerStats& otherStats);
|
||||
};
|
||||
|
||||
#endif // hifi_AudioMixerStats_h
|
4
cmake/externals/wasapi/CMakeLists.txt
vendored
4
cmake/externals/wasapi/CMakeLists.txt
vendored
|
@ -6,8 +6,8 @@ if (WIN32)
|
|||
include(ExternalProject)
|
||||
ExternalProject_Add(
|
||||
${EXTERNAL_NAME}
|
||||
URL http://hifi-public.s3.amazonaws.com/dependencies/qtaudio_wasapi5.zip
|
||||
URL_MD5 0530753e855ffc00232cc969bf1c84a8
|
||||
URL http://hifi-public.s3.amazonaws.com/dependencies/qtaudio_wasapi4.zip
|
||||
URL_MD5 2abde5340a64d387848f12b9536a7e85
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
|
|
|
@ -978,6 +978,29 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "audio_threading",
|
||||
"label": "Audio Threading",
|
||||
"assignment-types": [0],
|
||||
"settings": [
|
||||
{
|
||||
"name": "auto_threads",
|
||||
"label": "Automatically determine thread count",
|
||||
"type": "checkbox",
|
||||
"help": "Allow system to determine number of threads (recommended)",
|
||||
"default": false,
|
||||
"advanced": true
|
||||
},
|
||||
{
|
||||
"name": "num_threads",
|
||||
"label": "Number of Threads",
|
||||
"help": "Threads to spin up for audio mixing (if not automatically set)",
|
||||
"placeholder": "1",
|
||||
"default": "1",
|
||||
"advanced": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "audio_env",
|
||||
"label": "Audio Environment",
|
||||
|
|
|
@ -44,8 +44,8 @@
|
|||
{ "from": "GamePad.RB", "to": "Standard.RB" },
|
||||
{ "from": "GamePad.RS", "to": "Standard.RS" },
|
||||
|
||||
{ "from": "GamePad.Start", "to": "Actions.CycleCamera" },
|
||||
{ "from": "GamePad.Back", "to": "Standard.Start" },
|
||||
{ "from": "GamePad.Start", "to": "Standard.Start" },
|
||||
{ "from": "GamePad.Back", "to": "Actions.CycleCamera" },
|
||||
|
||||
{ "from": "GamePad.DU", "to": "Standard.DU" },
|
||||
{ "from": "GamePad.DD", "to": "Standard.DD" },
|
||||
|
|
|
@ -591,8 +591,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
qCDebug(interfaceapp) << "[VERSION] We will use DEVELOPMENT global services.";
|
||||
#endif
|
||||
|
||||
|
||||
bool wantsSandboxRunning = shouldRunServer();
|
||||
|
||||
static const QString NO_UPDATER_ARG = "--no-updater";
|
||||
static const bool noUpdater = arguments().indexOf(NO_UPDATER_ARG) != -1;
|
||||
static const bool wantsSandboxRunning = shouldRunServer();
|
||||
static bool determinedSandboxState = false;
|
||||
static bool sandboxIsRunning = false;
|
||||
SandboxUtils sandboxUtils;
|
||||
|
@ -602,11 +604,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
qCDebug(interfaceapp) << "Home sandbox appears to be running.....";
|
||||
determinedSandboxState = true;
|
||||
sandboxIsRunning = true;
|
||||
}, [&, wantsSandboxRunning]() {
|
||||
}, [&]() {
|
||||
qCDebug(interfaceapp) << "Home sandbox does not appear to be running....";
|
||||
if (wantsSandboxRunning) {
|
||||
QString contentPath = getRunServerPath();
|
||||
bool noUpdater = SteamClient::isRunning();
|
||||
SandboxUtils::runLocalSandbox(contentPath, true, RUNNING_MARKER_FILENAME, noUpdater);
|
||||
sandboxIsRunning = true;
|
||||
}
|
||||
|
@ -1128,7 +1129,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
|
|||
#endif
|
||||
|
||||
// If launched from Steam, let it handle updates
|
||||
if (!SteamClient::isRunning()) {
|
||||
if (!noUpdater) {
|
||||
auto applicationUpdater = DependencyManager::get<AutoUpdater>();
|
||||
connect(applicationUpdater.data(), &AutoUpdater::newVersionIsAvailable, dialogsManager.data(), &DialogsManager::showUpdateDialog);
|
||||
applicationUpdater->checkForUpdate();
|
||||
|
@ -4750,7 +4751,7 @@ void Application::resetSensors(bool andReload) {
|
|||
DependencyManager::get<EyeTracker>()->reset();
|
||||
getActiveDisplayPlugin()->resetSensors();
|
||||
_overlayConductor.centerUI();
|
||||
getMyAvatar()->reset(andReload);
|
||||
getMyAvatar()->reset(true, andReload);
|
||||
QMetaObject::invokeMethod(DependencyManager::get<AudioClient>().data(), "reset", Qt::QueuedConnection);
|
||||
}
|
||||
|
||||
|
@ -5735,6 +5736,7 @@ void Application::updateDisplayMode() {
|
|||
QObject::connect(displayPlugin.get(), &DisplayPlugin::recommendedFramebufferSizeChanged, [this](const QSize & size) {
|
||||
resizeGL();
|
||||
});
|
||||
QObject::connect(displayPlugin.get(), &DisplayPlugin::resetSensorsRequested, this, &Application::requestReset);
|
||||
first = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1165,6 +1165,7 @@ EntityItemProperties EntityItem::getProperties(EntityPropertyFlags desiredProper
|
|||
properties._id = getID();
|
||||
properties._idSet = true;
|
||||
properties._created = _created;
|
||||
properties._lastEdited = _lastEdited;
|
||||
properties.setClientOnly(_clientOnly);
|
||||
properties.setOwningAvatarID(_owningAvatarID);
|
||||
|
||||
|
|
|
@ -369,6 +369,7 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
|
|||
COPY_PROPERTY_TO_QSCRIPTVALUE_GETTER_NO_SKIP(ageAsText, formatSecondsElapsed(getAge())); // gettable, but not settable
|
||||
}
|
||||
|
||||
properties.setProperty("lastEdited", convertScriptValue(engine, _lastEdited));
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_LAST_EDITED_BY, lastEditedBy);
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_POSITION, position);
|
||||
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_DIMENSIONS, dimensions);
|
||||
|
|
|
@ -174,7 +174,26 @@ public:
|
|||
void sendPeerQueryToIceServer(const HifiSockAddr& iceServerSockAddr, const QUuid& clientID, const QUuid& peerID);
|
||||
|
||||
SharedNodePointer findNodeWithAddr(const HifiSockAddr& addr);
|
||||
|
||||
|
||||
using value_type = SharedNodePointer;
|
||||
using const_iterator = std::vector<value_type>::const_iterator;
|
||||
|
||||
// Cede control of iteration under a single read lock (e.g. for use by thread pools)
|
||||
// Use this for nested loops instead of taking nested read locks!
|
||||
// This allows multiple threads (i.e. a thread pool) to share a lock
|
||||
// without deadlocking when a dying node attempts to acquire a write lock
|
||||
template<typename NestedNodeLambda>
|
||||
void nestedEach(NestedNodeLambda functor) {
|
||||
QReadLocker readLock(&_nodeMutex);
|
||||
|
||||
std::vector<SharedNodePointer> nodes(_nodeHash.size());
|
||||
std::transform(_nodeHash.cbegin(), _nodeHash.cend(), nodes.begin(), [](const NodeHash::value_type& it) {
|
||||
return it.second;
|
||||
});
|
||||
|
||||
functor(nodes.cbegin(), nodes.cend());
|
||||
}
|
||||
|
||||
template<typename NodeLambda>
|
||||
void eachNode(NodeLambda functor) {
|
||||
QReadLocker readLock(&_nodeMutex);
|
||||
|
@ -280,7 +299,7 @@ signals:
|
|||
protected slots:
|
||||
void connectedForLocalSocketTest();
|
||||
void errorTestingLocalSocket();
|
||||
|
||||
|
||||
void clientConnectionToSockAddrReset(const HifiSockAddr& sockAddr);
|
||||
|
||||
protected:
|
||||
|
@ -347,7 +366,7 @@ protected:
|
|||
functor(it);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private slots:
|
||||
void flagTimeForConnectionStep(ConnectionStep connectionStep, quint64 timestamp);
|
||||
void possiblyTimeoutSTUNAddressLookup();
|
||||
|
|
|
@ -205,7 +205,8 @@ public:
|
|||
|
||||
|
||||
signals:
|
||||
void recommendedFramebufferSizeChanged(const QSize & size);
|
||||
void recommendedFramebufferSizeChanged(const QSize& size);
|
||||
void resetSensorsRequested();
|
||||
|
||||
protected:
|
||||
void incrementPresentCount();
|
||||
|
|
|
@ -24,7 +24,7 @@ struct FrustumGrid {
|
|||
mat4 eyeToWorldMat;
|
||||
};
|
||||
|
||||
uniform frustumGridBuffer {
|
||||
layout(std140) uniform frustumGridBuffer {
|
||||
FrustumGrid frustumGrid;
|
||||
};
|
||||
|
||||
|
@ -51,16 +51,20 @@ float projection_getFar(mat4 projection) {
|
|||
#define GRID_INDEX_TYPE ivec4
|
||||
#define GRID_FETCH_BUFFER(i) i / 4][i % 4
|
||||
<@else@>
|
||||
#define GRID_NUM_ELEMENTS 16384
|
||||
#define GRID_NUM_ELEMENTS 4096
|
||||
#define GRID_INDEX_TYPE ivec4
|
||||
#define GRID_FETCH_BUFFER(i) i / 4][i % 4
|
||||
|
||||
<!#define GRID_NUM_ELEMENTS 16384
|
||||
#define GRID_INDEX_TYPE int
|
||||
#define GRID_FETCH_BUFFER(i) i
|
||||
#define GRID_FETCH_BUFFER(i) i!>
|
||||
<@endif@>
|
||||
|
||||
uniform clusterGridBuffer {
|
||||
layout(std140) uniform clusterGridBuffer {
|
||||
GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];
|
||||
};
|
||||
|
||||
uniform clusterContentBuffer {
|
||||
layout(std140) uniform clusterContentBuffer {
|
||||
GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];
|
||||
};
|
||||
|
||||
|
|
|
@ -27,21 +27,21 @@
|
|||
|
||||
enum LightClusterGridShader_MapSlot {
|
||||
DEFERRED_BUFFER_LINEAR_DEPTH_UNIT = 0,
|
||||
DEFERRED_BUFFER_COLOR_UNIT,
|
||||
DEFERRED_BUFFER_NORMAL_UNIT,
|
||||
DEFERRED_BUFFER_EMISSIVE_UNIT,
|
||||
DEFERRED_BUFFER_DEPTH_UNIT,
|
||||
DEFERRED_BUFFER_COLOR_UNIT = 1,
|
||||
DEFERRED_BUFFER_NORMAL_UNIT = 2,
|
||||
DEFERRED_BUFFER_EMISSIVE_UNIT = 3,
|
||||
DEFERRED_BUFFER_DEPTH_UNIT = 4,
|
||||
};
|
||||
|
||||
enum LightClusterGridShader_BufferSlot {
|
||||
LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT = 0,
|
||||
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT,
|
||||
CAMERA_CORRECTION_BUFFER_SLOT,
|
||||
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT =1,
|
||||
CAMERA_CORRECTION_BUFFER_SLOT = 2,
|
||||
LIGHT_GPU_SLOT = render::ShapePipeline::Slot::LIGHT,
|
||||
LIGHT_INDEX_GPU_SLOT,
|
||||
LIGHT_INDEX_GPU_SLOT = 5,
|
||||
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT,
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT,
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT = 6,
|
||||
LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT = 7,
|
||||
};
|
||||
|
||||
FrustumGrid::FrustumGrid(const FrustumGrid& source) :
|
||||
|
|
|
@ -34,7 +34,6 @@ in vec2 _texCoord0;
|
|||
out vec4 _fragColor;
|
||||
|
||||
void main(void) {
|
||||
|
||||
// Grab the fragment data from the uv
|
||||
vec2 texCoord = _texCoord0.st;
|
||||
|
||||
|
@ -49,7 +48,7 @@ void main(void) {
|
|||
// Frag pos in world
|
||||
mat4 invViewMat = getViewInverse();
|
||||
vec4 fragPos = invViewMat * fragPosition;
|
||||
|
||||
|
||||
// From frag world pos find the cluster
|
||||
vec4 clusterEyePos = frustumGrid_worldToEye(fragPos);
|
||||
ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);
|
||||
|
@ -84,8 +83,8 @@ void main(void) {
|
|||
vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);
|
||||
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
|
||||
|
||||
// COmpute the rougness into gloss2 once:
|
||||
float fragGloss2 = pow(frag.roughness + 0.001, 2.0);
|
||||
// Compute the rougness into gloss2 once:
|
||||
float fragGloss2 = pow(frag.roughness + 0.001, 4.0);
|
||||
bool withScattering = (frag.scattering * isScatteringEnabled() > 0.0);
|
||||
|
||||
int numLightTouching = 0;
|
||||
|
|
|
@ -568,46 +568,20 @@ void AABox::transform(const Transform& transform) {
|
|||
translate(transform.getTranslation());
|
||||
}
|
||||
|
||||
// Logic based on http://clb.demon.fi/MathGeoLib/nightly/docs/AABB.cpp_code.html#471
|
||||
void AABox::transform(const glm::mat4& matrix) {
|
||||
auto minimum = _corner;
|
||||
auto maximum = _corner + _scale;
|
||||
auto halfSize = _scale * 0.5f;
|
||||
auto center = _corner + halfSize;
|
||||
halfSize = abs(halfSize);
|
||||
auto newCenter = transformPoint(matrix, center);
|
||||
|
||||
glm::vec3 bottomLeftNear(minimum.x, minimum.y, minimum.z);
|
||||
glm::vec3 bottomRightNear(maximum.x, minimum.y, minimum.z);
|
||||
glm::vec3 bottomLeftFar(minimum.x, minimum.y, maximum.z);
|
||||
glm::vec3 bottomRightFar(maximum.x, minimum.y, maximum.z);
|
||||
glm::vec3 topLeftNear(minimum.x, maximum.y, minimum.z);
|
||||
glm::vec3 topRightNear(maximum.x, maximum.y, minimum.z);
|
||||
glm::vec3 topLeftFar(minimum.x, maximum.y, maximum.z);
|
||||
glm::vec3 topRightFar(maximum.x, maximum.y, maximum.z);
|
||||
auto mm = glm::transpose(glm::mat3(matrix));
|
||||
vec3 newDir = vec3(
|
||||
glm::dot(glm::abs(vec3(mm[0])), halfSize),
|
||||
glm::dot(glm::abs(vec3(mm[1])), halfSize),
|
||||
glm::dot(glm::abs(vec3(mm[2])), halfSize)
|
||||
);
|
||||
|
||||
glm::vec3 bottomLeftNearTransformed = transformPoint(matrix, bottomLeftNear);
|
||||
glm::vec3 bottomRightNearTransformed = transformPoint(matrix, bottomRightNear);
|
||||
glm::vec3 bottomLeftFarTransformed = transformPoint(matrix, bottomLeftFar);
|
||||
glm::vec3 bottomRightFarTransformed = transformPoint(matrix, bottomRightFar);
|
||||
glm::vec3 topLeftNearTransformed = transformPoint(matrix, topLeftNear);
|
||||
glm::vec3 topRightNearTransformed = transformPoint(matrix, topRightNear);
|
||||
glm::vec3 topLeftFarTransformed = transformPoint(matrix, topLeftFar);
|
||||
glm::vec3 topRightFarTransformed = transformPoint(matrix, topRightFar);
|
||||
|
||||
minimum = glm::min(bottomLeftNearTransformed,
|
||||
glm::min(bottomRightNearTransformed,
|
||||
glm::min(bottomLeftFarTransformed,
|
||||
glm::min(bottomRightFarTransformed,
|
||||
glm::min(topLeftNearTransformed,
|
||||
glm::min(topRightNearTransformed,
|
||||
glm::min(topLeftFarTransformed,
|
||||
topRightFarTransformed)))))));
|
||||
|
||||
maximum = glm::max(bottomLeftNearTransformed,
|
||||
glm::max(bottomRightNearTransformed,
|
||||
glm::max(bottomLeftFarTransformed,
|
||||
glm::max(bottomRightFarTransformed,
|
||||
glm::max(topLeftNearTransformed,
|
||||
glm::max(topRightNearTransformed,
|
||||
glm::max(topLeftFarTransformed,
|
||||
topRightFarTransformed)))))));
|
||||
|
||||
_corner = minimum;
|
||||
_scale = maximum - minimum;
|
||||
_corner = newCenter - newDir;
|
||||
_scale = newDir * 2.0f;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,15 @@ void OculusBaseDisplayPlugin::resetSensors() {
|
|||
}
|
||||
|
||||
bool OculusBaseDisplayPlugin::beginFrameRender(uint32_t frameIndex) {
|
||||
handleOVREvents();
|
||||
if (quitRequested()) {
|
||||
QMetaObject::invokeMethod(qApp, "quit");
|
||||
return false;
|
||||
}
|
||||
if (reorientRequested()) {
|
||||
emit resetSensorsRequested();
|
||||
}
|
||||
|
||||
_currentRenderFrameInfo = FrameInfo();
|
||||
_currentRenderFrameInfo.sensorSampleTime = ovr_GetTimeInSeconds();
|
||||
_currentRenderFrameInfo.predictedDisplayTime = ovr_GetPredictedDisplayTime(_session, frameIndex);
|
||||
|
|
|
@ -20,15 +20,15 @@
|
|||
#include <controllers/Pose.h>
|
||||
#include <NumericalConstants.h>
|
||||
|
||||
using Mutex = std::mutex;
|
||||
using Lock = std::unique_lock<Mutex>;
|
||||
|
||||
Q_DECLARE_LOGGING_CATEGORY(oculus)
|
||||
Q_LOGGING_CATEGORY(oculus, "hifi.plugins.oculus")
|
||||
|
||||
static std::atomic<uint32_t> refCount { 0 };
|
||||
static ovrSession session { nullptr };
|
||||
|
||||
static bool _quitRequested { false };
|
||||
static bool _reorientRequested { false };
|
||||
|
||||
inline ovrErrorInfo getError() {
|
||||
ovrErrorInfo error;
|
||||
ovr_GetLastErrorInfo(&error);
|
||||
|
@ -116,6 +116,26 @@ void releaseOculusSession() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void handleOVREvents() {
|
||||
if (!session) {
|
||||
return;
|
||||
}
|
||||
|
||||
ovrSessionStatus status;
|
||||
if (!OVR_SUCCESS(ovr_GetSessionStatus(session, &status))) {
|
||||
return;
|
||||
}
|
||||
|
||||
_quitRequested = status.ShouldQuit;
|
||||
_reorientRequested = status.ShouldRecenter;
|
||||
}
|
||||
|
||||
bool quitRequested() {
|
||||
return _quitRequested;
|
||||
}
|
||||
bool reorientRequested() {
|
||||
return _reorientRequested;
|
||||
}
|
||||
|
||||
controller::Pose ovrControllerPoseToHandPose(
|
||||
ovrHandType hand,
|
||||
|
|
|
@ -20,6 +20,10 @@ bool oculusAvailable();
|
|||
ovrSession acquireOculusSession();
|
||||
void releaseOculusSession();
|
||||
|
||||
void handleOVREvents();
|
||||
bool quitRequested();
|
||||
bool reorientRequested();
|
||||
|
||||
// Convenience method for looping over each eye with a lambda
|
||||
template <typename Function>
|
||||
inline void ovr_for_each_eye(Function function) {
|
||||
|
|
|
@ -50,4 +50,16 @@ describe('Entity', function() {
|
|||
var props = Entities.getEntityProperties(boxEntity);
|
||||
expect(Math.round(props.position.z)).toEqual(Math.round(newPos.z));
|
||||
});
|
||||
|
||||
it("\'s last edited property working correctly", function() {
|
||||
var props = Entities.getEntityProperties(boxEntity);
|
||||
expect(props.lastEdited).toBeDefined();
|
||||
expect(props.lastEdited).not.toBe(0);
|
||||
var prevLastEdited = props.lastEdited;
|
||||
|
||||
// Now we make an edit to the entity, which should update its last edited time
|
||||
Entities.editEntity(boxEntity, {color: {red: 0, green: 255, blue: 0}});
|
||||
props = Entities.getEntityProperties(boxEntity);
|
||||
expect(props.lastEdited).toBeGreaterThan(prevLastEdited);
|
||||
});
|
||||
});
|
Loading…
Reference in a new issue