Merge branch 'master' into 21089

This commit is contained in:
David Rowe 2016-12-10 11:47:43 +13:00
commit a52d8d1982
29 changed files with 2399 additions and 779 deletions

4
.gitignore vendored
View file

@ -4,6 +4,10 @@ CMakeFiles/
CMakeScripts/
cmake_install.cmake
build*/
release*/
debug*/
gprof*/
valgrind*/
ext/
Makefile
*.user

View file

@ -9,38 +9,12 @@
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <errno.h>
#include <fcntl.h>
#include <fstream>
#include <iostream>
#include <math.h>
#include <memory>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <thread>
#ifdef _WIN32
#include <math.h>
#else
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/socket.h>
#endif //_WIN32
#include <glm/glm.hpp>
#include <glm/gtx/norm.hpp>
#include <glm/gtx/vector_angle.hpp>
#include <QtCore/QCoreApplication>
#include <QtCore/QJsonArray>
#include <QtCore/QJsonDocument>
#include <QtCore/QJsonObject>
#include <QtCore/QJsonValue>
#include <QtCore/QThread>
#include <QtNetwork/QNetworkRequest>
#include <QtNetwork/QNetworkReply>
#include <LogHandler.h>
#include <NetworkAccessManager.h>
@ -67,22 +41,20 @@ static const float DEFAULT_NOISE_MUTING_THRESHOLD = 0.003f;
static const QString AUDIO_MIXER_LOGGING_TARGET_NAME = "audio-mixer";
static const QString AUDIO_ENV_GROUP_KEY = "audio_env";
static const QString AUDIO_BUFFER_GROUP_KEY = "audio_buffer";
static const QString AUDIO_THREADING_GROUP_KEY = "audio_threading";
int AudioMixer::_numStaticJitterFrames{ -1 };
bool AudioMixer::_enableFilter = true;
bool AudioMixer::shouldMute(float quietestFrame) {
return (quietestFrame > _noiseMutingThreshold);
}
float AudioMixer::_noiseMutingThreshold{ DEFAULT_NOISE_MUTING_THRESHOLD };
float AudioMixer::_attenuationPerDoublingInDistance{ DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE };
float AudioMixer::_trailingSleepRatio{ 1.0f };
float AudioMixer::_performanceThrottlingRatio{ 0.0f };
float AudioMixer::_minAudibilityThreshold{ LOUDNESS_TO_DISTANCE_RATIO / 2.0f };
QHash<QString, AABox> AudioMixer::_audioZones;
QVector<AudioMixer::ZoneSettings> AudioMixer::_zoneSettings;
QVector<AudioMixer::ReverbSettings> AudioMixer::_zoneReverbSettings;
AudioMixer::AudioMixer(ReceivedMessage& message) :
ThreadedAssignment(message),
_trailingSleepRatio(1.0f),
_minAudibilityThreshold(LOUDNESS_TO_DISTANCE_RATIO / 2.0f),
_performanceThrottlingRatio(0.0f),
_attenuationPerDoublingInDistance(DEFAULT_ATTENUATION_PER_DOUBLING_IN_DISTANCE),
_noiseMutingThreshold(DEFAULT_NOISE_MUTING_THRESHOLD)
{
ThreadedAssignment(message) {
auto nodeList = DependencyManager::get<NodeList>();
auto& packetReceiver = nodeList->getPacketReceiver();
@ -96,405 +68,10 @@ AudioMixer::AudioMixer(ReceivedMessage& message) :
packetReceiver.registerListener(PacketType::KillAvatar, this, "handleKillAvatarPacket");
packetReceiver.registerListener(PacketType::NodeMuteRequest, this, "handleNodeMuteRequestPacket");
packetReceiver.registerListener(PacketType::RadiusIgnoreRequest, this, "handleRadiusIgnoreRequestPacket");
connect(nodeList.data(), &NodeList::nodeKilled, this, &AudioMixer::handleNodeKilled);
}
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
const int IEEE754_MANT_BITS = 23;
const int IEEE754_EXPN_BIAS = 127;
//
// for x > 0.0f, returns log2(x)
// for x <= 0.0f, returns large negative value
//
// abs |error| < 8e-3, smooth (exact for x=2^N) for NPOLY=3
// abs |error| < 2e-4, smooth (exact for x=2^N) for NPOLY=5
// rel |error| < 0.4 from precision loss very close to 1.0f
//
static inline float fastlog2(float x) {
union { float f; int32_t i; } mant, bits = { x };
// split into mantissa and exponent
mant.i = (bits.i & ((1 << IEEE754_MANT_BITS) - 1)) | (IEEE754_EXPN_BIAS << IEEE754_MANT_BITS);
int32_t expn = (bits.i >> IEEE754_MANT_BITS) - IEEE754_EXPN_BIAS;
mant.f -= 1.0f;
// polynomial for log2(1+x) over x=[0,1]
//x = (-0.346555386f * mant.f + 1.346555386f) * mant.f;
x = (((-0.0821307180f * mant.f + 0.321188984f) * mant.f - 0.677784014f) * mant.f + 1.43872575f) * mant.f;
return x + expn;
}
//
// for -126 <= x < 128, returns exp2(x)
//
// rel |error| < 3e-3, smooth (exact for x=N) for NPOLY=3
// rel |error| < 9e-6, smooth (exact for x=N) for NPOLY=5
//
static inline float fastexp2(float x) {
union { float f; int32_t i; } xi;
// bias such that x > 0
x += IEEE754_EXPN_BIAS;
//x = MAX(x, 1.0f);
//x = MIN(x, 254.9999f);
// split into integer and fraction
xi.i = (int32_t)x;
x -= xi.i;
// construct exp2(xi) as a float
xi.i <<= IEEE754_MANT_BITS;
// polynomial for exp2(x) over x=[0,1]
//x = (0.339766028f * x + 0.660233972f) * x + 1.0f;
x = (((0.0135557472f * x + 0.0520323690f) * x + 0.241379763f) * x + 0.693032121f) * x + 1.0f;
return x * xi.f;
}
float AudioMixer::gainForSource(const PositionalAudioStream& streamToAdd,
const AvatarAudioStream& listeningNodeStream, const glm::vec3& relativePosition, bool isEcho) {
float gain = 1.0f;
float distanceBetween = glm::length(relativePosition);
if (distanceBetween < EPSILON) {
distanceBetween = EPSILON;
}
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
}
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
// multiply the current attenuation coefficient by the calculated off axis coefficient
gain *= offAxisCoefficient;
}
float attenuationPerDoublingInDistance = _attenuationPerDoublingInDistance;
for (int i = 0; i < _zonesSettings.length(); ++i) {
if (_audioZones[_zonesSettings[i].source].contains(streamToAdd.getPosition()) &&
_audioZones[_zonesSettings[i].listener].contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = _zonesSettings[i].coefficient;
break;
}
}
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
// translate the zone setting to gain per log2(distance)
float g = 1.0f - attenuationPerDoublingInDistance;
g = (g < EPSILON) ? EPSILON : g;
g = (g > 1.0f) ? 1.0f : g;
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = fastexp2(fastlog2(g) * fastlog2(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
// multiply the current attenuation coefficient by the distance coefficient
gain *= distanceCoefficient;
}
return gain;
}
float AudioMixer::azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
const glm::vec3& relativePosition) {
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
// Compute sample delay for the two ears to create phase panning
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
// project the rotated source position vector onto the XZ plane
rotatedSourcePosition.y = 0.0f;
static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;
if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
// produce an oriented angle about the y-axis
return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
} else {
// there is no distance between listener and source - return no azimuth
return 0;
}
}
void AudioMixer::addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
const PositionalAudioStream& streamToAdd,
const QUuid& sourceNodeID,
const AvatarAudioStream& listeningNodeStream) {
// to reduce artifacts we calculate the gain and azimuth for every source for this listener
// even if we are not going to end up mixing in this source
++_totalMixes;
// this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct
// check if this is a server echo of a source back to itself
bool isEcho = (&streamToAdd == &listeningNodeStream);
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
// figure out the distance between source and listener
float distance = glm::max(glm::length(relativePosition), EPSILON);
// figure out the gain for this source at the listener
float gain = gainForSource(streamToAdd, listeningNodeStream, relativePosition, isEcho);
// figure out the azimuth to this source at the listener
float azimuth = isEcho ? 0.0f : azimuthForSource(streamToAdd, listeningNodeStream, relativePosition);
float repeatedFrameFadeFactor = 1.0f;
static const int HRTF_DATASET_INDEX = 1;
if (!streamToAdd.lastPopSucceeded()) {
bool forceSilentBlock = true;
if (!streamToAdd.getLastPopOutput().isNull()) {
bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);
// in an injector, just go silent - the injector has likely ended
// in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
// we'll repeat the last block until it has a block to mix
// and we'll gradually fade that repeated block into silence.
// calculate its fade factor, which depends on how many times it's already been repeated.
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
if (!isInjector && repeatedFrameFadeFactor > 0.0f) {
// apply the repeatedFrameFadeFactor to the gain
gain *= repeatedFrameFadeFactor;
forceSilentBlock = false;
}
}
if (forceSilentBlock) {
// we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
// in this case we will call renderSilent with a forced silent block
// this ensures the correct tail from the previously mixed block and the correct spatialization of first block
// of any upcoming audio
if (!streamToAdd.isStereo() && !isEcho) {
// get the existing listener-source HRTF object, or create a new one
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
// this is not done for stereo streams since they do not go through the HRTF
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
hrtf.renderSilent(silentMonoBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++_hrtfSilentRenders;;
}
return;
}
}
// grab the stream from the ring buffer
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
if (streamToAdd.isStereo() || isEcho) {
// this is a stereo source or server echo so we do not pass it through the HRTF
// simply apply our calculated gain to each sample
if (streamToAdd.isStereo()) {
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
_mixedSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
}
++_manualStereoMixes;
} else {
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
_mixedSamples[i] += monoSample;
_mixedSamples[i + 1] += monoSample;
}
++_manualEchoMixes;
}
return;
}
// get the existing listener-source HRTF object, or create a new one
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
static int16_t streamBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL];
streamPopOutput.readSamples(streamBlock, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// if the frame we're about to mix is silent, simply call render silent and move on
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
// silent frame from source
// we still need to call renderSilent via the HRTF for mono source
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++_hrtfSilentRenders;
return;
}
if (_performanceThrottlingRatio > 0.0f
&& streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= _minAudibilityThreshold) {
// the mixer is struggling so we're going to drop off some streams
// we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
hrtf.renderSilent(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++_hrtfStruggleRenders;
return;
}
++_hrtfRenders;
// mono stream, call the HRTF with our block and calculated azimuth and gain
hrtf.render(streamBlock, _mixedSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
bool AudioMixer::prepareMixForListeningNode(Node* node) {
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* listenerNodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// zero out the client mix for this node
memset(_mixedSamples, 0, sizeof(_mixedSamples));
// loop through all other nodes that have sufficient audio to mix
DependencyManager::get<NodeList>()->eachNode([&](const SharedNodePointer& otherNode){
// make sure that we have audio data for this other node
// and that it isn't being ignored by our listening node
// and that it isn't ignoring our listening node
if (otherNode->getLinkedData()
&& !node->isIgnoringNodeWithID(otherNode->getUUID()) && !otherNode->isIgnoringNodeWithID(node->getUUID())) {
AudioMixerClientData* otherNodeClientData = (AudioMixerClientData*) otherNode->getLinkedData();
// check to see if we're ignoring in radius
bool insideIgnoreRadius = false;
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
AudioMixerClientData* otherData = reinterpret_cast<AudioMixerClientData*>(otherNode->getLinkedData());
AudioMixerClientData* nodeData = reinterpret_cast<AudioMixerClientData*>(node->getLinkedData());
float ignoreRadius = glm::min(node->getIgnoreRadius(), otherNode->getIgnoreRadius());
if (glm::distance(nodeData->getPosition(), otherData->getPosition()) < ignoreRadius) {
insideIgnoreRadius = true;
}
}
if (!insideIgnoreRadius) {
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
auto streamsCopy = otherNodeClientData->getAudioStreams();
for (auto& streamPair : streamsCopy) {
auto otherNodeStream = streamPair.second;
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
addStreamToMixForListeningNodeWithStream(*listenerNodeData, *otherNodeStream, otherNode->getUUID(),
*nodeAudioStream);
}
}
}
}
});
// use the per listner AudioLimiter to render the mixed data...
listenerNodeData->audioLimiter.render(_mixedSamples, _clampedSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// check for silent audio after the peak limitor has converted the samples
bool hasAudio = false;
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
if (_clampedSamples[i] != 0) {
hasAudio = true;
break;
}
}
return hasAudio;
}
void AudioMixer::sendAudioEnvironmentPacket(SharedNodePointer node) {
// Send stream properties
bool hasReverb = false;
float reverbTime, wetLevel;
// find reverb properties
for (int i = 0; i < _zoneReverbSettings.size(); ++i) {
AudioMixerClientData* data = static_cast<AudioMixerClientData*>(node->getLinkedData());
glm::vec3 streamPosition = data->getAvatarAudioStream()->getPosition();
AABox box = _audioZones[_zoneReverbSettings[i].zone];
if (box.contains(streamPosition)) {
hasReverb = true;
reverbTime = _zoneReverbSettings[i].reverbTime;
wetLevel = _zoneReverbSettings[i].wetLevel;
break;
}
}
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
AvatarAudioStream* stream = nodeData->getAvatarAudioStream();
bool dataChanged = (stream->hasReverb() != hasReverb) ||
(stream->hasReverb() && (stream->getRevebTime() != reverbTime ||
stream->getWetLevel() != wetLevel));
if (dataChanged) {
// Update stream
if (hasReverb) {
stream->setReverb(reverbTime, wetLevel);
} else {
stream->clearReverb();
}
}
// Send at change or every so often
float CHANCE_OF_SEND = 0.01f;
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
if (sendData) {
auto nodeList = DependencyManager::get<NodeList>();
unsigned char bitset = 0;
int packetSize = sizeof(bitset);
if (hasReverb) {
packetSize += sizeof(reverbTime) + sizeof(wetLevel);
}
auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
if (hasReverb) {
setAtBit(bitset, HAS_REVERB_BIT);
}
envPacket->writePrimitive(bitset);
if (hasReverb) {
envPacket->writePrimitive(reverbTime);
envPacket->writePrimitive(wetLevel);
}
nodeList->sendPacket(std::move(envPacket), *node);
}
}
void AudioMixer::handleNodeAudioPacket(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode) {
getOrCreateClientData(sendingNode.data());
DependencyManager::get<NodeList>()->updateNodeWithDataFromPacket(message, sendingNode);
@ -668,8 +245,8 @@ void AudioMixer::removeHRTFsForFinishedInjector(const QUuid& streamID) {
}
QString AudioMixer::percentageForMixStats(int counter) {
if (_totalMixes > 0) {
float mixPercentage = (float(counter) / _totalMixes) * 100.0f;
if (_stats.totalMixes > 0) {
float mixPercentage = (float(counter) / _stats.totalMixes) * 100.0f;
return QString::number(mixPercentage, 'f', 2);
} else {
return QString("0.0");
@ -683,34 +260,57 @@ void AudioMixer::sendStatsPacket() {
return;
}
// general stats
statsObject["useDynamicJitterBuffers"] = _numStaticJitterFrames == -1;
statsObject["trailing_sleep_percentage"] = _trailingSleepRatio * 100.0f;
statsObject["performance_throttling_ratio"] = _performanceThrottlingRatio;
statsObject["avg_streams_per_frame"] = (float)_sumStreams / (float)_numStatFrames;
statsObject["avg_listeners_per_frame"] = (float)_sumListeners / (float)_numStatFrames;
statsObject["avg_streams_per_frame"] = (float)_stats.sumStreams / (float)_numStatFrames;
statsObject["avg_listeners_per_frame"] = (float)_stats.sumListeners / (float)_numStatFrames;
// timing stats
QJsonObject timingStats;
uint64_t timing, trailing;
_sleepTiming.get(timing, trailing);
timingStats["us_per_sleep"] = (qint64)(timing / _numStatFrames);
timingStats["us_per_sleep_trailing"] = (qint64)(trailing / _numStatFrames);
_frameTiming.get(timing, trailing);
timingStats["us_per_frame"] = (qint64)(timing / _numStatFrames);
timingStats["us_per_frame_trailing"] = (qint64)(trailing / _numStatFrames);
_prepareTiming.get(timing, trailing);
timingStats["us_per_prepare"] = (qint64)(timing / _numStatFrames);
timingStats["us_per_prepare_trailing"] = (qint64)(trailing / _numStatFrames);
_mixTiming.get(timing, trailing);
timingStats["us_per_mix"] = (qint64)(timing / _numStatFrames);
timingStats["us_per_mix_trailing"] = (qint64)(trailing / _numStatFrames);
_eventsTiming.get(timing, trailing);
timingStats["us_per_events"] = (qint64)(timing / _numStatFrames);
timingStats["us_per_events_trailing"] = (qint64)(trailing / _numStatFrames);
// call it "avg_..." to keep it higher in the display, sorted alphabetically
statsObject["avg_timing_stats"] = timingStats;
// mix stats
QJsonObject mixStats;
mixStats["%_hrtf_mixes"] = percentageForMixStats(_hrtfRenders);
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_hrtfSilentRenders);
mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_hrtfStruggleRenders);
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_manualStereoMixes);
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_manualEchoMixes);
mixStats["total_mixes"] = _totalMixes;
mixStats["avg_mixes_per_block"] = _totalMixes / _numStatFrames;
mixStats["%_hrtf_mixes"] = percentageForMixStats(_stats.hrtfRenders);
mixStats["%_hrtf_silent_mixes"] = percentageForMixStats(_stats.hrtfSilentRenders);
mixStats["%_hrtf_struggle_mixes"] = percentageForMixStats(_stats.hrtfStruggleRenders);
mixStats["%_manual_stereo_mixes"] = percentageForMixStats(_stats.manualStereoMixes);
mixStats["%_manual_echo_mixes"] = percentageForMixStats(_stats.manualEchoMixes);
mixStats["total_mixes"] = _stats.totalMixes;
mixStats["avg_mixes_per_block"] = _stats.totalMixes / _numStatFrames;
statsObject["mix_stats"] = mixStats;
_sumStreams = 0;
_sumListeners = 0;
_hrtfRenders = 0;
_hrtfSilentRenders = 0;
_hrtfStruggleRenders = 0;
_manualStereoMixes = 0;
_manualEchoMixes = 0;
_totalMixes = 0;
_numStatFrames = 0;
_stats.reset();
// add stats for each listerner
auto nodeList = DependencyManager::get<NodeList>();
@ -744,7 +344,7 @@ void AudioMixer::run() {
// wait until we have the domain-server settings, otherwise we bail
DomainHandler& domainHandler = DependencyManager::get<NodeList>()->getDomainHandler();
connect(&domainHandler, &DomainHandler::settingsReceived, this, &AudioMixer::domainSettingsRequestComplete);
connect(&domainHandler, &DomainHandler::settingsReceived, this, &AudioMixer::start);
connect(&domainHandler, &DomainHandler::settingsReceiveFail, this, &AudioMixer::domainSettingsRequestFailed);
ThreadedAssignment::commonInit(AUDIO_MIXER_LOGGING_TARGET_NAME, NodeType::AudioMixer);
@ -762,202 +362,165 @@ AudioMixerClientData* AudioMixer::getOrCreateClientData(Node* node) {
return clientData;
}
void AudioMixer::domainSettingsRequestComplete() {
void AudioMixer::start() {
auto nodeList = DependencyManager::get<NodeList>();
// prepare the NodeList
nodeList->addNodeTypeToInterestSet(NodeType::Agent);
nodeList->linkedDataCreateCallback = [&](Node* node) { getOrCreateClientData(node); };
DomainHandler& domainHandler = nodeList->getDomainHandler();
const QJsonObject& settingsObject = domainHandler.getSettingsObject();
// parse out any AudioMixer settings
{
DomainHandler& domainHandler = nodeList->getDomainHandler();
const QJsonObject& settingsObject = domainHandler.getSettingsObject();
parseSettingsObject(settingsObject);
}
// check the settings object to see if we have anything we can parse out
parseSettingsObject(settingsObject);
// manageLoad state
auto frameTimestamp = p_high_resolution_clock::time_point::min();
unsigned int framesSinceManagement = std::numeric_limits<int>::max();
// queue up a connection to start broadcasting mixes now that we're ready to go
QMetaObject::invokeMethod(this, "broadcastMixes", Qt::QueuedConnection);
}
void AudioMixer::broadcastMixes() {
const int TRAILING_AVERAGE_FRAMES = 100;
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
const float RATIO_BACK_OFF = 0.02f;
auto nodeList = DependencyManager::get<NodeList>();
auto nextFrameTimestamp = p_high_resolution_clock::now();
auto timeToSleep = std::chrono::microseconds(0);
int currentFrame = 1;
int numFramesPerSecond = (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
int framesSinceCutoffEvent = TRAILING_AVERAGE_FRAMES;
// mix state
unsigned int frame = 1;
while (!_isFinished) {
// manage mixer load
{
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) +
// ratio of frame spent sleeping / total frame time
((CURRENT_FRAME_RATIO * timeToSleep.count()) / (float) AudioConstants::NETWORK_FRAME_USECS);
bool hasRatioChanged = false;
if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
qDebug() << "Mixer is struggling";
// change our min required loudness to reduce some load
_performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));
hasRatioChanged = true;
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
qDebug() << "Mixer is recovering";
// back off the required loudness
_performanceThrottlingRatio = std::max(0.0f, _performanceThrottlingRatio - RATIO_BACK_OFF);
hasRatioChanged = true;
}
if (hasRatioChanged) {
// set out min audability threshold from the new ratio
_minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
framesSinceCutoffEvent = 0;
qDebug() << "Sleeping" << _trailingSleepRatio << "of frame";
qDebug() << "Cutoff is" << _performanceThrottlingRatio;
qDebug() << "Minimum audibility to be mixed is" << _minAudibilityThreshold;
}
}
if (!hasRatioChanged) {
++framesSinceCutoffEvent;
}
auto timer = _sleepTiming.timer();
manageLoad(frameTimestamp, framesSinceManagement);
}
// mix
nodeList->eachNode([&](const SharedNodePointer& node) {
if (node->getLinkedData()) {
AudioMixerClientData* nodeData = (AudioMixerClientData*)node->getLinkedData();
auto timer = _frameTiming.timer();
// this function will attempt to pop a frame from each audio stream.
// a pointer to the popped data is stored as a member in InboundAudioStream.
// That's how the popped audio data will be read for mixing (but only if the pop was successful)
_sumStreams += nodeData->checkBuffersBeforeFrameSend();
nodeList->nestedEach([&](NodeList::const_iterator cbegin, NodeList::const_iterator cend) {
// prepare frames; pop off any new audio from their streams
{
auto timer = _prepareTiming.timer();
std::for_each(cbegin, cend, [&](const SharedNodePointer& node) {
_stats.sumStreams += prepareFrame(node, frame);
});
}
// if the stream should be muted, send mute packet
if (nodeData->getAvatarAudioStream()
&& (shouldMute(nodeData->getAvatarAudioStream()->getQuietestFrameLoudness())
|| nodeData->shouldMuteClient())) {
auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
nodeList->sendPacket(std::move(mutePacket), *node);
// probably now we just reset the flag, once should do it (?)
nodeData->setShouldMuteClient(false);
}
if (node->getType() == NodeType::Agent && node->getActiveSocket()
&& nodeData->getAvatarAudioStream()) {
bool mixHasAudio = prepareMixForListeningNode(node.data());
std::unique_ptr<NLPacket> mixPacket;
if (mixHasAudio || nodeData->shouldFlushEncoder()) {
int mixPacketBytes = sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE
+ AudioConstants::NETWORK_FRAME_BYTES_STEREO;
mixPacket = NLPacket::create(PacketType::MixedAudio, mixPacketBytes);
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
mixPacket->writePrimitive(sequence);
// write the codec
QString codecInPacket = nodeData->getCodecName();
mixPacket->writeString(codecInPacket);
QByteArray encodedBuffer;
if (mixHasAudio) {
QByteArray decodedBuffer(reinterpret_cast<char*>(_clampedSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
nodeData->encode(decodedBuffer, encodedBuffer);
} else {
// time to flush, which resets the shouldFlush until next time we encode something
nodeData->encodeFrameOfZeros(encodedBuffer);
}
// pack mixed audio samples
mixPacket->write(encodedBuffer.constData(), encodedBuffer.size());
} else {
int silentPacketBytes = sizeof(quint16) + sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE;
mixPacket = NLPacket::create(PacketType::SilentAudioFrame, silentPacketBytes);
// pack sequence number
quint16 sequence = nodeData->getOutgoingSequenceNumber();
mixPacket->writePrimitive(sequence);
// write the codec
QString codecInPacket = nodeData->getCodecName();
mixPacket->writeString(codecInPacket);
// pack number of silent audio samples
quint16 numSilentSamples = AudioConstants::NETWORK_FRAME_SAMPLES_STEREO;
mixPacket->writePrimitive(numSilentSamples);
}
// Send audio environment
sendAudioEnvironmentPacket(node);
// send mixed audio packet
nodeList->sendPacket(std::move(mixPacket), *node);
nodeData->incrementOutgoingMixedAudioSequenceNumber();
// send an audio stream stats packet to the client approximately every second
++currentFrame;
currentFrame %= numFramesPerSecond;
if (nodeData->shouldSendStats(currentFrame)) {
nodeData->sendAudioStreamStatsPackets(node);
}
++_sumListeners;
}
// mix across slave threads
{
auto timer = _mixTiming.timer();
_slavePool.mix(cbegin, cend, frame);
}
});
// gather stats
_slavePool.each([&](AudioMixerSlave& slave) {
_stats.accumulate(slave.stats);
slave.stats.reset();
});
++frame;
++_numStatFrames;
// play nice with qt event-looping
{
// since we're a while loop we need to help qt's event processing
auto timer = _eventsTiming.timer();
// since we're a while loop we need to yield to qt's event processing
QCoreApplication::processEvents();
if (_isFinished) {
// alert qt that this is finished
// alert qt eventing that this is finished
QCoreApplication::sendPostedEvents(this, QEvent::DeferredDelete);
break;
}
}
}
}
// sleep until the next frame, if necessary
{
nextFrameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);
void AudioMixer::manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceCutoffEvent) {
auto timeToSleep = std::chrono::microseconds(0);
auto now = p_high_resolution_clock::now();
timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(nextFrameTimestamp - now);
// sleep until the next frame, if necessary
{
// advance the next frame
frameTimestamp += std::chrono::microseconds(AudioConstants::NETWORK_FRAME_USECS);
auto now = p_high_resolution_clock::now();
if (timeToSleep.count() < 0) {
nextFrameTimestamp = now;
timeToSleep = std::chrono::microseconds(0);
// calculate sleep
if (frameTimestamp < now) {
frameTimestamp = now;
} else {
timeToSleep = std::chrono::duration_cast<std::chrono::microseconds>(frameTimestamp - now);
std::this_thread::sleep_for(timeToSleep);
}
}
// manage mixer load
{
const int TRAILING_AVERAGE_FRAMES = 100;
const float CURRENT_FRAME_RATIO = 1.0f / TRAILING_AVERAGE_FRAMES;
const float PREVIOUS_FRAMES_RATIO = 1.0f - CURRENT_FRAME_RATIO;
const float STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.10f;
const float BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD = 0.20f;
const float RATIO_BACK_OFF = 0.02f;
_trailingSleepRatio = (PREVIOUS_FRAMES_RATIO * _trailingSleepRatio) +
// ratio of frame spent sleeping / total frame time
((CURRENT_FRAME_RATIO * timeToSleep.count()) / (float) AudioConstants::NETWORK_FRAME_USECS);
bool hasRatioChanged = false;
if (framesSinceCutoffEvent >= TRAILING_AVERAGE_FRAMES) {
if (_trailingSleepRatio <= STRUGGLE_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD) {
qDebug() << "Mixer is struggling";
// change our min required loudness to reduce some load
_performanceThrottlingRatio = _performanceThrottlingRatio + (0.5f * (1.0f - _performanceThrottlingRatio));
hasRatioChanged = true;
} else if (_trailingSleepRatio >= BACK_OFF_TRIGGER_SLEEP_PERCENTAGE_THRESHOLD && _performanceThrottlingRatio != 0) {
qDebug() << "Mixer is recovering";
// back off the required loudness
_performanceThrottlingRatio = std::max(0.0f, _performanceThrottlingRatio - RATIO_BACK_OFF);
hasRatioChanged = true;
}
std::this_thread::sleep_for(timeToSleep);
if (hasRatioChanged) {
// set out min audability threshold from the new ratio
_minAudibilityThreshold = LOUDNESS_TO_DISTANCE_RATIO / (2.0f * (1.0f - _performanceThrottlingRatio));
framesSinceCutoffEvent = 0;
qDebug() << "Sleeping" << _trailingSleepRatio << "of frame";
qDebug() << "Cutoff is" << _performanceThrottlingRatio;
qDebug() << "Minimum audibility to be mixed is" << _minAudibilityThreshold;
}
}
if (!hasRatioChanged) {
++framesSinceCutoffEvent;
}
}
}
int AudioMixer::prepareFrame(const SharedNodePointer& node, unsigned int frame) {
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
if (data == nullptr) {
return 0;
}
return data->checkBuffersBeforeFrameSend();
}
void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
if (settingsObject.contains(AUDIO_THREADING_GROUP_KEY)) {
QJsonObject audioThreadingGroupObject = settingsObject[AUDIO_THREADING_GROUP_KEY].toObject();
const QString AUTO_THREADS = "auto_threads";
bool autoThreads = audioThreadingGroupObject[AUTO_THREADS].toBool();
if (!autoThreads) {
bool ok;
const QString NUM_THREADS = "num_threads";
int numThreads = audioThreadingGroupObject[NUM_THREADS].toString().toInt(&ok);
if (ok) {
_slavePool.setNumThreads(numThreads);
}
}
}
if (settingsObject.contains(AUDIO_BUFFER_GROUP_KEY)) {
QJsonObject audioBufferGroupObject = settingsObject[AUDIO_BUFFER_GROUP_KEY].toObject();
@ -1051,14 +614,6 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
}
}
const QString FILTER_KEY = "enable_filter";
if (audioEnvGroupObject[FILTER_KEY].isBool()) {
_enableFilter = audioEnvGroupObject[FILTER_KEY].toBool();
}
if (_enableFilter) {
qDebug() << "Filter enabled";
}
const QString AUDIO_ZONES = "zones";
if (audioEnvGroupObject[AUDIO_ZONES].isObject()) {
const QJsonObject& zones = audioEnvGroupObject[AUDIO_ZONES].toObject();
@ -1116,7 +671,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
coefficientObject.contains(LISTENER) &&
coefficientObject.contains(COEFFICIENT)) {
ZonesSettings settings;
ZoneSettings settings;
bool ok;
settings.source = coefficientObject.value(SOURCE).toString();
@ -1126,7 +681,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
if (ok && settings.coefficient >= 0.0f && settings.coefficient <= 1.0f &&
_audioZones.contains(settings.source) && _audioZones.contains(settings.listener)) {
_zonesSettings.push_back(settings);
_zoneSettings.push_back(settings);
qDebug() << "Added Coefficient:" << settings.source << settings.listener << settings.coefficient;
}
}
@ -1159,6 +714,7 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
settings.wetLevel = wetLevel;
_zoneReverbSettings.push_back(settings);
qDebug() << "Added Reverb:" << zone << reverbTime << wetLevel;
}
}
@ -1166,3 +722,28 @@ void AudioMixer::parseSettingsObject(const QJsonObject &settingsObject) {
}
}
}
AudioMixer::Timer::Timing::Timing(uint64_t& sum) : _sum(sum) {
_timing = p_high_resolution_clock::now();
}
AudioMixer::Timer::Timing::~Timing() {
_sum += std::chrono::duration_cast<std::chrono::microseconds>(p_high_resolution_clock::now() - _timing).count();
}
void AudioMixer::Timer::get(uint64_t& timing, uint64_t& trailing) {
// update history
_index = (_index + 1) % TIMER_TRAILING_SECONDS;
uint64_t oldTiming = _history[_index];
_history[_index] = _sum;
// update trailing
_trailing -= oldTiming;
_trailing += _sum;
timing = _sum;
trailing = _trailing / TIMER_TRAILING_SECONDS;
// reset _sum;
_sum = 0;
}

View file

@ -18,31 +18,45 @@
#include <ThreadedAssignment.h>
#include <UUIDHasher.h>
#include "AudioMixerStats.h"
#include "AudioMixerSlavePool.h"
class PositionalAudioStream;
class AvatarAudioStream;
class AudioHRTF;
class AudioMixerClientData;
const int SAMPLE_PHASE_DELAY_AT_90 = 20;
const int READ_DATAGRAMS_STATS_WINDOW_SECONDS = 30;
/// Handles assignments of type AudioMixer - mixing streams of audio and re-distributing to various clients.
class AudioMixer : public ThreadedAssignment {
Q_OBJECT
public:
AudioMixer(ReceivedMessage& message);
public slots:
/// threaded run of assignment
void run() override;
void sendStatsPacket() override;
struct ZoneSettings {
QString source;
QString listener;
float coefficient;
};
struct ReverbSettings {
QString zone;
float reverbTime;
float wetLevel;
};
static int getStaticJitterFrames() { return _numStaticJitterFrames; }
static bool shouldMute(float quietestFrame) { return quietestFrame > _noiseMutingThreshold; }
static float getAttenuationPerDoublingInDistance() { return _attenuationPerDoublingInDistance; }
static float getMinimumAudibilityThreshold() { return _performanceThrottlingRatio > 0.0f ? _minAudibilityThreshold : 0.0f; }
static const QHash<QString, AABox>& getAudioZones() { return _audioZones; }
static const QVector<ZoneSettings>& getZoneSettings() { return _zoneSettings; }
static const QVector<ReverbSettings>& getReverbSettings() { return _zoneReverbSettings; }
public slots:
void run() override;
void sendStatsPacket() override;
private slots:
void broadcastMixes();
// packet handlers
void handleNodeAudioPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
void handleMuteEnvironmentPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
void handleNegotiateAudioFormat(QSharedPointer<ReceivedMessage> message, SharedNodePointer sendingNode);
@ -52,74 +66,66 @@ private slots:
void handleKillAvatarPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
void handleNodeMuteRequestPacket(QSharedPointer<ReceivedMessage> packet, SharedNodePointer sendingNode);
void start();
void removeHRTFsForFinishedInjector(const QUuid& streamID);
private:
// mixing helpers
// check and maybe throttle mixer load by changing audibility threshold
void manageLoad(p_high_resolution_clock::time_point& frameTimestamp, unsigned int& framesSinceManagement);
// pop a frame from any streams on the node
// returns the number of available streams
int prepareFrame(const SharedNodePointer& node, unsigned int frame);
AudioMixerClientData* getOrCreateClientData(Node* node);
void domainSettingsRequestComplete();
/// adds one stream to the mix for a listening node
void addStreamToMixForListeningNodeWithStream(AudioMixerClientData& listenerNodeData,
const PositionalAudioStream& streamToAdd,
const QUuid& sourceNodeID,
const AvatarAudioStream& listeningNodeStream);
float gainForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
const glm::vec3& relativePosition, bool isEcho);
float azimuthForSource(const PositionalAudioStream& streamToAdd, const AvatarAudioStream& listeningNodeStream,
const glm::vec3& relativePosition);
/// prepares and sends a mix to one Node
bool prepareMixForListeningNode(Node* node);
/// Send Audio Environment packet for a single node
void sendAudioEnvironmentPacket(SharedNodePointer node);
void perSecondActions();
QString percentageForMixStats(int counter);
bool shouldMute(float quietestFrame);
void parseSettingsObject(const QJsonObject& settingsObject);
float _trailingSleepRatio;
float _minAudibilityThreshold;
float _performanceThrottlingRatio;
float _attenuationPerDoublingInDistance;
float _noiseMutingThreshold;
int _numStatFrames { 0 };
int _sumStreams { 0 };
int _sumListeners { 0 };
int _hrtfRenders { 0 };
int _hrtfSilentRenders { 0 };
int _hrtfStruggleRenders { 0 };
int _manualStereoMixes { 0 };
int _manualEchoMixes { 0 };
int _totalMixes { 0 };
AudioMixerStats _stats;
QString _codecPreferenceOrder;
float _mixedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
int16_t _clampedSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
AudioMixerSlavePool _slavePool;
QHash<QString, AABox> _audioZones;
struct ZonesSettings {
QString source;
QString listener;
float coefficient;
class Timer {
public:
class Timing{
public:
Timing(uint64_t& sum);
~Timing();
private:
p_high_resolution_clock::time_point _timing;
uint64_t& _sum;
};
Timing timer() { return Timing(_sum); }
void get(uint64_t& timing, uint64_t& trailing);
private:
static const int TIMER_TRAILING_SECONDS = 10;
uint64_t _sum { 0 };
uint64_t _trailing { 0 };
uint64_t _history[TIMER_TRAILING_SECONDS] {};
int _index { 0 };
};
QVector<ZonesSettings> _zonesSettings;
struct ReverbSettings {
QString zone;
float reverbTime;
float wetLevel;
};
QVector<ReverbSettings> _zoneReverbSettings;
Timer _sleepTiming;
Timer _frameTiming;
Timer _prepareTiming;
Timer _mixTiming;
Timer _eventsTiming;
static int _numStaticJitterFrames; // -1 denotes dynamic jitter buffering
static bool _enableFilter;
static float _noiseMutingThreshold;
static float _attenuationPerDoublingInDistance;
static float _trailingSleepRatio;
static float _performanceThrottlingRatio;
static float _minAudibilityThreshold;
static QHash<QString, AABox> _audioZones;
static QVector<ZoneSettings> _zoneSettings;
static QVector<ReverbSettings> _zoneReverbSettings;
};
#endif // hifi_AudioMixer_h

View file

@ -0,0 +1,519 @@
//
// AudioMixerSlave.cpp
// assignment-client/src/audio
//
// Created by Zach Pomerantz on 11/22/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <algorithm>
#include <glm/glm.hpp>
#include <glm/gtx/norm.hpp>
#include <glm/gtx/vector_angle.hpp>
#include <LogHandler.h>
#include <NetworkAccessManager.h>
#include <NodeList.h>
#include <Node.h>
#include <OctreeConstants.h>
#include <plugins/PluginManager.h>
#include <plugins/CodecPlugin.h>
#include <udt/PacketHeaders.h>
#include <SharedUtil.h>
#include <StDev.h>
#include <UUID.h>
#include "AudioRingBuffer.h"
#include "AudioMixer.h"
#include "AudioMixerClientData.h"
#include "AvatarAudioStream.h"
#include "InjectedAudioStream.h"
#include "AudioMixerSlave.h"
std::unique_ptr<NLPacket> createAudioPacket(PacketType type, int size, quint16 sequence, QString codec) {
auto audioPacket = NLPacket::create(type, size);
audioPacket->writePrimitive(sequence);
audioPacket->writeString(codec);
return audioPacket;
}
void sendMixPacket(const SharedNodePointer& node, AudioMixerClientData& data, QByteArray& buffer) {
static const int MIX_PACKET_SIZE =
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + AudioConstants::NETWORK_FRAME_BYTES_STEREO;
quint16 sequence = data.getOutgoingSequenceNumber();
QString codec = data.getCodecName();
auto mixPacket = createAudioPacket(PacketType::MixedAudio, MIX_PACKET_SIZE, sequence, codec);
// pack samples
mixPacket->write(buffer.constData(), buffer.size());
// send packet
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
data.incrementOutgoingMixedAudioSequenceNumber();
}
void sendSilentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
static const int SILENT_PACKET_SIZE =
sizeof(quint16) + AudioConstants::MAX_CODEC_NAME_LENGTH_ON_WIRE + sizeof(quint16);
quint16 sequence = data.getOutgoingSequenceNumber();
QString codec = data.getCodecName();
auto mixPacket = createAudioPacket(PacketType::SilentAudioFrame, SILENT_PACKET_SIZE, sequence, codec);
// pack number of samples
mixPacket->writePrimitive(AudioConstants::NETWORK_FRAME_SAMPLES_STEREO);
// send packet
DependencyManager::get<NodeList>()->sendPacket(std::move(mixPacket), *node);
data.incrementOutgoingMixedAudioSequenceNumber();
}
void sendEnvironmentPacket(const SharedNodePointer& node, AudioMixerClientData& data) {
bool hasReverb = false;
float reverbTime, wetLevel;
auto& reverbSettings = AudioMixer::getReverbSettings();
auto& audioZones = AudioMixer::getAudioZones();
AvatarAudioStream* stream = data.getAvatarAudioStream();
glm::vec3 streamPosition = stream->getPosition();
// find reverb properties
for (int i = 0; i < reverbSettings.size(); ++i) {
AABox box = audioZones[reverbSettings[i].zone];
if (box.contains(streamPosition)) {
hasReverb = true;
reverbTime = reverbSettings[i].reverbTime;
wetLevel = reverbSettings[i].wetLevel;
break;
}
}
// check if data changed
bool dataChanged = (stream->hasReverb() != hasReverb) ||
(stream->hasReverb() && (stream->getRevebTime() != reverbTime || stream->getWetLevel() != wetLevel));
if (dataChanged) {
// update stream
if (hasReverb) {
stream->setReverb(reverbTime, wetLevel);
} else {
stream->clearReverb();
}
}
// send packet at change or every so often
float CHANCE_OF_SEND = 0.01f;
bool sendData = dataChanged || (randFloat() < CHANCE_OF_SEND);
if (sendData) {
// size the packet
unsigned char bitset = 0;
int packetSize = sizeof(bitset);
if (hasReverb) {
packetSize += sizeof(reverbTime) + sizeof(wetLevel);
}
// write the packet
auto envPacket = NLPacket::create(PacketType::AudioEnvironment, packetSize);
if (hasReverb) {
setAtBit(bitset, HAS_REVERB_BIT);
}
envPacket->writePrimitive(bitset);
if (hasReverb) {
envPacket->writePrimitive(reverbTime);
envPacket->writePrimitive(wetLevel);
}
// send the packet
DependencyManager::get<NodeList>()->sendPacket(std::move(envPacket), *node);
}
}
void AudioMixerSlave::configure(ConstIter begin, ConstIter end, unsigned int frame) {
_begin = begin;
_end = end;
_frame = frame;
}
void AudioMixerSlave::mix(const SharedNodePointer& node) {
// check that the node is valid
AudioMixerClientData* data = (AudioMixerClientData*)node->getLinkedData();
if (data == nullptr) {
return;
}
auto avatarStream = data->getAvatarAudioStream();
if (avatarStream == nullptr) {
return;
}
// send mute packet, if necessary
if (AudioMixer::shouldMute(avatarStream->getQuietestFrameLoudness()) || data->shouldMuteClient()) {
auto mutePacket = NLPacket::create(PacketType::NoisyMute, 0);
DependencyManager::get<NodeList>()->sendPacket(std::move(mutePacket), *node);
// probably now we just reset the flag, once should do it (?)
data->setShouldMuteClient(false);
}
// send audio packets, if necessary
if (node->getType() == NodeType::Agent && node->getActiveSocket()) {
++stats.sumListeners;
// mix the audio
bool mixHasAudio = prepareMix(node);
// send audio packet
if (mixHasAudio || data->shouldFlushEncoder()) {
// encode the audio
QByteArray encodedBuffer;
if (mixHasAudio) {
QByteArray decodedBuffer(reinterpret_cast<char*>(_bufferSamples), AudioConstants::NETWORK_FRAME_BYTES_STEREO);
data->encode(decodedBuffer, encodedBuffer);
} else {
// time to flush, which resets the shouldFlush until next time we encode something
data->encodeFrameOfZeros(encodedBuffer);
}
sendMixPacket(node, *data, encodedBuffer);
} else {
sendSilentPacket(node, *data);
}
// send environment packet
sendEnvironmentPacket(node, *data);
// send stats packet (about every second)
static const unsigned int NUM_FRAMES_PER_SEC = (int) ceil(AudioConstants::NETWORK_FRAMES_PER_SEC);
if (data->shouldSendStats(_frame % NUM_FRAMES_PER_SEC)) {
data->sendAudioStreamStatsPackets(node);
}
}
}
bool AudioMixerSlave::prepareMix(const SharedNodePointer& node) {
AvatarAudioStream* nodeAudioStream = static_cast<AudioMixerClientData*>(node->getLinkedData())->getAvatarAudioStream();
AudioMixerClientData* nodeData = static_cast<AudioMixerClientData*>(node->getLinkedData());
// zero out the client mix for this node
memset(_mixSamples, 0, sizeof(_mixSamples));
// loop through all other nodes that have sufficient audio to mix
std::for_each(_begin, _end, [&](const SharedNodePointer& otherNode){
// make sure that we have audio data for this other node
// and that it isn't being ignored by our listening node
// and that it isn't ignoring our listening node
AudioMixerClientData* otherData = static_cast<AudioMixerClientData*>(otherNode->getLinkedData());
if (otherData
&& !node->isIgnoringNodeWithID(otherNode->getUUID()) && !otherNode->isIgnoringNodeWithID(node->getUUID())) {
// check if distance is inside ignore radius
if (node->isIgnoreRadiusEnabled() || otherNode->isIgnoreRadiusEnabled()) {
float ignoreRadius = glm::min(node->getIgnoreRadius(), otherNode->getIgnoreRadius());
if (glm::distance(nodeData->getPosition(), otherData->getPosition()) < ignoreRadius) {
// skip, distance is inside ignore radius
return;
}
}
// enumerate the ARBs attached to the otherNode and add all that should be added to mix
auto streamsCopy = otherData->getAudioStreams();
for (auto& streamPair : streamsCopy) {
auto otherNodeStream = streamPair.second;
if (*otherNode != *node || otherNodeStream->shouldLoopbackForNode()) {
addStreamToMix(*nodeData, otherNode->getUUID(), *nodeAudioStream, *otherNodeStream);
}
}
}
});
// use the per listener AudioLimiter to render the mixed data...
nodeData->audioLimiter.render(_mixSamples, _bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// check for silent audio after the peak limiter has converted the samples
bool hasAudio = false;
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
if (_bufferSamples[i] != 0) {
hasAudio = true;
break;
}
}
return hasAudio;
}
void AudioMixerSlave::addStreamToMix(AudioMixerClientData& listenerNodeData, const QUuid& sourceNodeID,
const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd) {
// to reduce artifacts we calculate the gain and azimuth for every source for this listener
// even if we are not going to end up mixing in this source
++stats.totalMixes;
// this ensures that the tail of any previously mixed audio or the first block of new audio sounds correct
// check if this is a server echo of a source back to itself
bool isEcho = (&streamToAdd == &listeningNodeStream);
glm::vec3 relativePosition = streamToAdd.getPosition() - listeningNodeStream.getPosition();
// figure out the distance between source and listener
float distance = glm::max(glm::length(relativePosition), EPSILON);
// figure out the gain for this source at the listener
float gain = gainForSource(listeningNodeStream, streamToAdd, relativePosition, isEcho);
// figure out the azimuth to this source at the listener
float azimuth = isEcho ? 0.0f : azimuthForSource(listeningNodeStream, listeningNodeStream, relativePosition);
float repeatedFrameFadeFactor = 1.0f;
static const int HRTF_DATASET_INDEX = 1;
if (!streamToAdd.lastPopSucceeded()) {
bool forceSilentBlock = true;
if (!streamToAdd.getLastPopOutput().isNull()) {
bool isInjector = dynamic_cast<const InjectedAudioStream*>(&streamToAdd);
// in an injector, just go silent - the injector has likely ended
// in other inputs (microphone, &c.), repeat with fade to avoid the harsh jump to silence
// we'll repeat the last block until it has a block to mix
// and we'll gradually fade that repeated block into silence.
// calculate its fade factor, which depends on how many times it's already been repeated.
repeatedFrameFadeFactor = calculateRepeatedFrameFadeFactor(streamToAdd.getConsecutiveNotMixedCount() - 1);
if (!isInjector && repeatedFrameFadeFactor > 0.0f) {
// apply the repeatedFrameFadeFactor to the gain
gain *= repeatedFrameFadeFactor;
forceSilentBlock = false;
}
}
if (forceSilentBlock) {
// we're deciding not to repeat either since we've already done it enough times or repetition with fade is disabled
// in this case we will call renderSilent with a forced silent block
// this ensures the correct tail from the previously mixed block and the correct spatialization of first block
// of any upcoming audio
if (!streamToAdd.isStereo() && !isEcho) {
// get the existing listener-source HRTF object, or create a new one
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
// this is not done for stereo streams since they do not go through the HRTF
static int16_t silentMonoBlock[AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL] = {};
hrtf.renderSilent(silentMonoBlock, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++stats.hrtfSilentRenders;
}
return;
}
}
// grab the stream from the ring buffer
AudioRingBuffer::ConstIterator streamPopOutput = streamToAdd.getLastPopOutput();
if (streamToAdd.isStereo() || isEcho) {
// this is a stereo source or server echo so we do not pass it through the HRTF
// simply apply our calculated gain to each sample
if (streamToAdd.isStereo()) {
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; ++i) {
_mixSamples[i] += float(streamPopOutput[i] * gain / AudioConstants::MAX_SAMPLE_VALUE);
}
++stats.manualStereoMixes;
} else {
for (int i = 0; i < AudioConstants::NETWORK_FRAME_SAMPLES_STEREO; i += 2) {
auto monoSample = float(streamPopOutput[i / 2] * gain / AudioConstants::MAX_SAMPLE_VALUE);
_mixSamples[i] += monoSample;
_mixSamples[i + 1] += monoSample;
}
++stats.manualEchoMixes;
}
return;
}
// get the existing listener-source HRTF object, or create a new one
auto& hrtf = listenerNodeData.hrtfForStream(sourceNodeID, streamToAdd.getStreamIdentifier());
streamPopOutput.readSamples(_bufferSamples, AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
// if the frame we're about to mix is silent, simply call render silent and move on
if (streamToAdd.getLastPopOutputLoudness() == 0.0f) {
// silent frame from source
// we still need to call renderSilent via the HRTF for mono source
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++stats.hrtfSilentRenders;
return;
}
float audibilityThreshold = AudioMixer::getMinimumAudibilityThreshold();
if (audibilityThreshold > 0.0f &&
streamToAdd.getLastPopOutputTrailingLoudness() / glm::length(relativePosition) <= audibilityThreshold) {
// the mixer is struggling so we're going to drop off some streams
// we call renderSilent via the HRTF with the actual frame data and a gain of 0.0
hrtf.renderSilent(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, 0.0f,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
++stats.hrtfStruggleRenders;
return;
}
++stats.hrtfRenders;
// mono stream, call the HRTF with our block and calculated azimuth and gain
hrtf.render(_bufferSamples, _mixSamples, HRTF_DATASET_INDEX, azimuth, distance, gain,
AudioConstants::NETWORK_FRAME_SAMPLES_PER_CHANNEL);
}
const int IEEE754_MANT_BITS = 23;
const int IEEE754_EXPN_BIAS = 127;
//
// for x > 0.0f, returns log2(x)
// for x <= 0.0f, returns large negative value
//
// abs |error| < 8e-3, smooth (exact for x=2^N) for NPOLY=3
// abs |error| < 2e-4, smooth (exact for x=2^N) for NPOLY=5
// rel |error| < 0.4 from precision loss very close to 1.0f
//
static inline float fastlog2(float x) {
union { float f; int32_t i; } mant, bits = { x };
// split into mantissa and exponent
mant.i = (bits.i & ((1 << IEEE754_MANT_BITS) - 1)) | (IEEE754_EXPN_BIAS << IEEE754_MANT_BITS);
int32_t expn = (bits.i >> IEEE754_MANT_BITS) - IEEE754_EXPN_BIAS;
mant.f -= 1.0f;
// polynomial for log2(1+x) over x=[0,1]
//x = (-0.346555386f * mant.f + 1.346555386f) * mant.f;
x = (((-0.0821307180f * mant.f + 0.321188984f) * mant.f - 0.677784014f) * mant.f + 1.43872575f) * mant.f;
return x + expn;
}
//
// for -126 <= x < 128, returns exp2(x)
//
// rel |error| < 3e-3, smooth (exact for x=N) for NPOLY=3
// rel |error| < 9e-6, smooth (exact for x=N) for NPOLY=5
//
static inline float fastexp2(float x) {
union { float f; int32_t i; } xi;
// bias such that x > 0
x += IEEE754_EXPN_BIAS;
//x = MAX(x, 1.0f);
//x = MIN(x, 254.9999f);
// split into integer and fraction
xi.i = (int32_t)x;
x -= xi.i;
// construct exp2(xi) as a float
xi.i <<= IEEE754_MANT_BITS;
// polynomial for exp2(x) over x=[0,1]
//x = (0.339766028f * x + 0.660233972f) * x + 1.0f;
x = (((0.0135557472f * x + 0.0520323690f) * x + 0.241379763f) * x + 0.693032121f) * x + 1.0f;
return x * xi.f;
}
float AudioMixerSlave::gainForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
const glm::vec3& relativePosition, bool isEcho) {
float gain = 1.0f;
float distanceBetween = glm::length(relativePosition);
if (distanceBetween < EPSILON) {
distanceBetween = EPSILON;
}
if (streamToAdd.getType() == PositionalAudioStream::Injector) {
gain *= reinterpret_cast<const InjectedAudioStream*>(&streamToAdd)->getAttenuationRatio();
}
if (!isEcho && (streamToAdd.getType() == PositionalAudioStream::Microphone)) {
// source is another avatar, apply fixed off-axis attenuation to make them quieter as they turn away from listener
glm::vec3 rotatedListenerPosition = glm::inverse(streamToAdd.getOrientation()) * relativePosition;
float angleOfDelivery = glm::angle(glm::vec3(0.0f, 0.0f, -1.0f),
glm::normalize(rotatedListenerPosition));
const float MAX_OFF_AXIS_ATTENUATION = 0.2f;
const float OFF_AXIS_ATTENUATION_FORMULA_STEP = (1 - MAX_OFF_AXIS_ATTENUATION) / 2.0f;
float offAxisCoefficient = MAX_OFF_AXIS_ATTENUATION +
(OFF_AXIS_ATTENUATION_FORMULA_STEP * (angleOfDelivery / PI_OVER_TWO));
// multiply the current attenuation coefficient by the calculated off axis coefficient
gain *= offAxisCoefficient;
}
float attenuationPerDoublingInDistance = AudioMixer::getAttenuationPerDoublingInDistance();
auto& zoneSettings = AudioMixer::getZoneSettings();
auto& audioZones = AudioMixer::getAudioZones();
for (int i = 0; i < zoneSettings.length(); ++i) {
if (audioZones[zoneSettings[i].source].contains(streamToAdd.getPosition()) &&
audioZones[zoneSettings[i].listener].contains(listeningNodeStream.getPosition())) {
attenuationPerDoublingInDistance = zoneSettings[i].coefficient;
break;
}
}
const float ATTENUATION_BEGINS_AT_DISTANCE = 1.0f;
if (distanceBetween >= ATTENUATION_BEGINS_AT_DISTANCE) {
// translate the zone setting to gain per log2(distance)
float g = 1.0f - attenuationPerDoublingInDistance;
g = (g < EPSILON) ? EPSILON : g;
g = (g > 1.0f) ? 1.0f : g;
// calculate the distance coefficient using the distance to this node
float distanceCoefficient = fastexp2(fastlog2(g) * fastlog2(distanceBetween/ATTENUATION_BEGINS_AT_DISTANCE));
// multiply the current attenuation coefficient by the distance coefficient
gain *= distanceCoefficient;
}
return gain;
}
float AudioMixerSlave::azimuthForSource(const AvatarAudioStream& listeningNodeStream, const PositionalAudioStream& streamToAdd,
const glm::vec3& relativePosition) {
glm::quat inverseOrientation = glm::inverse(listeningNodeStream.getOrientation());
// Compute sample delay for the two ears to create phase panning
glm::vec3 rotatedSourcePosition = inverseOrientation * relativePosition;
// project the rotated source position vector onto the XZ plane
rotatedSourcePosition.y = 0.0f;
static const float SOURCE_DISTANCE_THRESHOLD = 1e-30f;
if (glm::length2(rotatedSourcePosition) > SOURCE_DISTANCE_THRESHOLD) {
// produce an oriented angle about the y-axis
return glm::orientedAngle(glm::vec3(0.0f, 0.0f, -1.0f), glm::normalize(rotatedSourcePosition), glm::vec3(0.0f, -1.0f, 0.0f));
} else {
// there is no distance between listener and source - return no azimuth
return 0;
}
}

View file

@ -0,0 +1,63 @@
//
// AudioMixerSlave.h
// assignment-client/src/audio
//
// Created by Zach Pomerantz on 11/22/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioMixerSlave_h
#define hifi_AudioMixerSlave_h
#include <AABox.h>
#include <AudioHRTF.h>
#include <AudioRingBuffer.h>
#include <ThreadedAssignment.h>
#include <UUIDHasher.h>
#include <NodeList.h>
#include "AudioMixerStats.h"
class PositionalAudioStream;
class AvatarAudioStream;
class AudioHRTF;
class AudioMixerClientData;
class AudioMixerSlave {
public:
using ConstIter = NodeList::const_iterator;
void configure(ConstIter begin, ConstIter end, unsigned int frame);
// mix and broadcast non-ignored streams to the node
// returns true if a mixed packet was sent to the node
void mix(const SharedNodePointer& node);
AudioMixerStats stats;
private:
// create mix, returns true if mix has audio
bool prepareMix(const SharedNodePointer& node);
// add a stream to the mix
void addStreamToMix(AudioMixerClientData& listenerData, const QUuid& streamerID,
const AvatarAudioStream& listenerStream, const PositionalAudioStream& streamer);
float gainForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer,
const glm::vec3& relativePosition, bool isEcho);
float azimuthForSource(const AvatarAudioStream& listener, const PositionalAudioStream& streamer,
const glm::vec3& relativePosition);
// mixing buffers
float _mixSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
int16_t _bufferSamples[AudioConstants::NETWORK_FRAME_SAMPLES_STEREO];
// frame state
ConstIter _begin;
ConstIter _end;
unsigned int _frame { 0 };
};
#endif // hifi_AudioMixerSlave_h

View file

@ -0,0 +1,187 @@
//
// AudioMixerSlavePool.cpp
// assignment-client/src/audio
//
// Created by Zach Pomerantz on 11/16/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include <assert.h>
#include <algorithm>
#include "AudioMixerSlavePool.h"
void AudioMixerSlaveThread::run() {
while (true) {
wait();
// iterate over all available nodes
SharedNodePointer node;
while (try_pop(node)) {
mix(node);
}
bool stopping = _stop;
notify(stopping);
if (stopping) {
return;
}
}
}
void AudioMixerSlaveThread::wait() {
{
Lock lock(_pool._mutex);
_pool._slaveCondition.wait(lock, [&] {
assert(_pool._numStarted <= _pool._numThreads);
return _pool._numStarted != _pool._numThreads;
});
++_pool._numStarted;
}
configure(_pool._begin, _pool._end, _pool._frame);
}
void AudioMixerSlaveThread::notify(bool stopping) {
{
Lock lock(_pool._mutex);
assert(_pool._numFinished < _pool._numThreads);
++_pool._numFinished;
if (stopping) {
++_pool._numStopped;
}
}
_pool._poolCondition.notify_one();
}
bool AudioMixerSlaveThread::try_pop(SharedNodePointer& node) {
return _pool._queue.try_pop(node);
}
#ifdef AUDIO_SINGLE_THREADED
static AudioMixerSlave slave;
#endif
void AudioMixerSlavePool::mix(ConstIter begin, ConstIter end, unsigned int frame) {
_begin = begin;
_end = end;
_frame = frame;
#ifdef AUDIO_SINGLE_THREADED
slave.configure(_begin, _end, frame);
std::for_each(begin, end, [&](const SharedNodePointer& node) {
slave.mix(node);
});
#else
// fill the queue
std::for_each(_begin, _end, [&](const SharedNodePointer& node) {
_queue.emplace(node);
});
{
Lock lock(_mutex);
// mix
_numStarted = _numFinished = 0;
_slaveCondition.notify_all();
// wait
_poolCondition.wait(lock, [&] {
assert(_numFinished <= _numThreads);
return _numFinished == _numThreads;
});
assert(_numStarted == _numThreads);
}
assert(_queue.empty());
#endif
}
void AudioMixerSlavePool::each(std::function<void(AudioMixerSlave& slave)> functor) {
#ifdef AUDIO_SINGLE_THREADED
functor(slave);
#else
for (auto& slave : _slaves) {
functor(*slave.get());
}
#endif
}
void AudioMixerSlavePool::setNumThreads(int numThreads) {
// clamp to allowed size
{
int maxThreads = QThread::idealThreadCount();
if (maxThreads == -1) {
// idealThreadCount returns -1 if cores cannot be detected
static const int MAX_THREADS_IF_UNKNOWN = 4;
maxThreads = MAX_THREADS_IF_UNKNOWN;
}
int clampedThreads = std::min(std::max(1, numThreads), maxThreads);
if (clampedThreads != numThreads) {
qWarning("%s: clamped to %d (was %d)", __FUNCTION__, clampedThreads, numThreads);
numThreads = clampedThreads;
}
}
resize(numThreads);
}
void AudioMixerSlavePool::resize(int numThreads) {
assert(_numThreads == _slaves.size());
#ifdef AUDIO_SINGLE_THREADED
qDebug("%s: running single threaded", __FUNCTION__, numThreads);
#else
qDebug("%s: set %d threads (was %d)", __FUNCTION__, numThreads, _numThreads);
Lock lock(_mutex);
if (numThreads > _numThreads) {
// start new slaves
for (int i = 0; i < numThreads - _numThreads; ++i) {
auto slave = new AudioMixerSlaveThread(*this);
slave->start();
_slaves.emplace_back(slave);
}
} else if (numThreads < _numThreads) {
auto extraBegin = _slaves.begin() + numThreads;
// mark slaves to stop...
auto slave = extraBegin;
while (slave != _slaves.end()) {
(*slave)->_stop = true;
++slave;
}
// ...cycle them until they do stop...
_numStopped = 0;
while (_numStopped != (_numThreads - numThreads)) {
_numStarted = _numFinished = _numStopped;
_slaveCondition.notify_all();
_poolCondition.wait(lock, [&] {
assert(_numFinished <= _numThreads);
return _numFinished == _numThreads;
});
}
// ...wait for threads to finish...
slave = extraBegin;
while (slave != _slaves.end()) {
QThread* thread = reinterpret_cast<QThread*>(slave->get());
static const int MAX_THREAD_WAIT_TIME = 10;
thread->wait(MAX_THREAD_WAIT_TIME);
++slave;
}
// ...and erase them
_slaves.erase(extraBegin, _slaves.end());
}
_numThreads = _numStarted = _numFinished = numThreads;
assert(_numThreads == _slaves.size());
#endif
}

View file

@ -0,0 +1,97 @@
//
// AudioMixerSlavePool.h
// assignment-client/src/audio
//
// Created by Zach Pomerantz on 11/16/2016.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioMixerSlavePool_h
#define hifi_AudioMixerSlavePool_h
#include <condition_variable>
#include <mutex>
#include <vector>
#include <tbb/concurrent_queue.h>
#include <QThread>
#include "AudioMixerSlave.h"
class AudioMixerSlavePool;
class AudioMixerSlaveThread : public QThread, public AudioMixerSlave {
Q_OBJECT
using ConstIter = NodeList::const_iterator;
using Mutex = std::mutex;
using Lock = std::unique_lock<Mutex>;
public:
AudioMixerSlaveThread(AudioMixerSlavePool& pool) : _pool(pool) {}
void run() override final;
private:
friend class AudioMixerSlavePool;
void wait();
void notify(bool stopping);
bool try_pop(SharedNodePointer& node);
AudioMixerSlavePool& _pool;
bool _stop { false };
};
// Slave pool for audio mixers
// AudioMixerSlavePool is not thread-safe! It should be instantiated and used from a single thread.
class AudioMixerSlavePool {
using Queue = tbb::concurrent_queue<SharedNodePointer>;
using Mutex = std::mutex;
using Lock = std::unique_lock<Mutex>;
using ConditionVariable = std::condition_variable;
public:
using ConstIter = NodeList::const_iterator;
AudioMixerSlavePool(int numThreads = QThread::idealThreadCount()) { setNumThreads(numThreads); }
~AudioMixerSlavePool() { resize(0); }
// mix on slave threads
void mix(ConstIter begin, ConstIter end, unsigned int frame);
// iterate over all slaves
void each(std::function<void(AudioMixerSlave& slave)> functor);
void setNumThreads(int numThreads);
int numThreads() { return _numThreads; }
private:
void resize(int numThreads);
std::vector<std::unique_ptr<AudioMixerSlaveThread>> _slaves;
friend void AudioMixerSlaveThread::wait();
friend void AudioMixerSlaveThread::notify(bool stopping);
friend bool AudioMixerSlaveThread::try_pop(SharedNodePointer& node);
// synchronization state
Mutex _mutex;
ConditionVariable _slaveCondition;
ConditionVariable _poolCondition;
int _numThreads { 0 };
int _numStarted { 0 }; // guarded by _mutex
int _numFinished { 0 }; // guarded by _mutex
int _numStopped { 0 }; // guarded by _mutex
// frame state
Queue _queue;
unsigned int _frame { 0 };
ConstIter _begin;
ConstIter _end;
};
#endif // hifi_AudioMixerSlavePool_h

View file

@ -0,0 +1,34 @@
//
// AudioMixerStats.cpp
// assignment-client/src/audio
//
// Created by Zach Pomerantz on 11/22/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "AudioMixerStats.h"
void AudioMixerStats::reset() {
sumStreams = 0;
sumListeners = 0;
totalMixes = 0;
hrtfRenders = 0;
hrtfSilentRenders = 0;
hrtfStruggleRenders = 0;
manualStereoMixes = 0;
manualEchoMixes = 0;
}
void AudioMixerStats::accumulate(const AudioMixerStats& otherStats) {
sumStreams += otherStats.sumStreams;
sumListeners += otherStats.sumListeners;
totalMixes += otherStats.totalMixes;
hrtfRenders += otherStats.hrtfRenders;
hrtfSilentRenders += otherStats.hrtfSilentRenders;
hrtfStruggleRenders += otherStats.hrtfStruggleRenders;
manualStereoMixes += otherStats.manualStereoMixes;
manualEchoMixes += otherStats.manualEchoMixes;
}

View file

@ -0,0 +1,32 @@
//
// AudioMixerStats.h
// assignment-client/src/audio
//
// Created by Zach Pomerantz on 11/22/16.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_AudioMixerStats_h
#define hifi_AudioMixerStats_h
struct AudioMixerStats {
int sumStreams { 0 };
int sumListeners { 0 };
int totalMixes { 0 };
int hrtfRenders { 0 };
int hrtfSilentRenders { 0 };
int hrtfStruggleRenders { 0 };
int manualStereoMixes { 0 };
int manualEchoMixes { 0 };
void reset();
void accumulate(const AudioMixerStats& otherStats);
};
#endif // hifi_AudioMixerStats_h

View file

@ -6,8 +6,8 @@ if (WIN32)
include(ExternalProject)
ExternalProject_Add(
${EXTERNAL_NAME}
URL http://hifi-public.s3.amazonaws.com/dependencies/qtaudio_wasapi5.zip
URL_MD5 0530753e855ffc00232cc969bf1c84a8
URL http://hifi-public.s3.amazonaws.com/dependencies/qtaudio_wasapi4.zip
URL_MD5 2abde5340a64d387848f12b9536a7e85
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""

View file

@ -978,6 +978,29 @@
}
]
},
{
"name": "audio_threading",
"label": "Audio Threading",
"assignment-types": [0],
"settings": [
{
"name": "auto_threads",
"label": "Automatically determine thread count",
"type": "checkbox",
"help": "Allow system to determine number of threads (recommended)",
"default": false,
"advanced": true
},
{
"name": "num_threads",
"label": "Number of Threads",
"help": "Threads to spin up for audio mixing (if not automatically set)",
"placeholder": "1",
"default": "1",
"advanced": true
}
]
},
{
"name": "audio_env",
"label": "Audio Environment",

View file

@ -64,6 +64,7 @@
#include <ErrorDialog.h>
#include <FileScriptingInterface.h>
#include <Finally.h>
#include <FingerprintUtils.h>
#include <FramebufferCache.h>
#include <gpu/Batch.h>
#include <gpu/Context.h>
@ -572,7 +573,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
_window->setWindowTitle("Interface");
Model::setAbstractViewStateInterface(this); // The model class will sometimes need to know view state details from us
// TODO: This is temporary, while developing
FingerprintUtils::getMachineFingerprint();
// End TODO
auto nodeList = DependencyManager::get<NodeList>();
// Set up a watchdog thread to intentionally crash the application on deadlocks
@ -591,8 +595,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
qCDebug(interfaceapp) << "[VERSION] We will use DEVELOPMENT global services.";
#endif
bool wantsSandboxRunning = shouldRunServer();
static const QString NO_UPDATER_ARG = "--no-updater";
static const bool noUpdater = arguments().indexOf(NO_UPDATER_ARG) != -1;
static const bool wantsSandboxRunning = shouldRunServer();
static bool determinedSandboxState = false;
static bool sandboxIsRunning = false;
SandboxUtils sandboxUtils;
@ -602,11 +608,10 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
qCDebug(interfaceapp) << "Home sandbox appears to be running.....";
determinedSandboxState = true;
sandboxIsRunning = true;
}, [&, wantsSandboxRunning]() {
}, [&]() {
qCDebug(interfaceapp) << "Home sandbox does not appear to be running....";
if (wantsSandboxRunning) {
QString contentPath = getRunServerPath();
bool noUpdater = SteamClient::isRunning();
SandboxUtils::runLocalSandbox(contentPath, true, RUNNING_MARKER_FILENAME, noUpdater);
sandboxIsRunning = true;
}
@ -1128,7 +1133,7 @@ Application::Application(int& argc, char** argv, QElapsedTimer& startupTimer, bo
#endif
// If launched from Steam, let it handle updates
if (!SteamClient::isRunning()) {
if (!noUpdater) {
auto applicationUpdater = DependencyManager::get<AutoUpdater>();
connect(applicationUpdater.data(), &AutoUpdater::newVersionIsAvailable, dialogsManager.data(), &DialogsManager::showUpdateDialog);
applicationUpdater->checkForUpdate();

View file

@ -1165,6 +1165,7 @@ EntityItemProperties EntityItem::getProperties(EntityPropertyFlags desiredProper
properties._id = getID();
properties._idSet = true;
properties._created = _created;
properties._lastEdited = _lastEdited;
properties.setClientOnly(_clientOnly);
properties.setOwningAvatarID(_owningAvatarID);

View file

@ -369,6 +369,7 @@ QScriptValue EntityItemProperties::copyToScriptValue(QScriptEngine* engine, bool
COPY_PROPERTY_TO_QSCRIPTVALUE_GETTER_NO_SKIP(ageAsText, formatSecondsElapsed(getAge())); // gettable, but not settable
}
properties.setProperty("lastEdited", convertScriptValue(engine, _lastEdited));
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_LAST_EDITED_BY, lastEditedBy);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_POSITION, position);
COPY_PROPERTY_TO_QSCRIPTVALUE(PROP_DIMENSIONS, dimensions);

View file

@ -69,3 +69,15 @@ QThread* RENDER_THREAD = nullptr;
bool isRenderThread() {
return QThread::currentThread() == RENDER_THREAD;
}
namespace gl {
void withSavedContext(const std::function<void()>& f) {
// Save the original GL context, because creating a QML surface will create a new context
QOpenGLContext * savedContext = QOpenGLContext::currentContext();
QSurface * savedSurface = savedContext ? savedContext->surface() : nullptr;
f();
if (savedContext) {
savedContext->makeCurrent(savedSurface);
}
}
}

View file

@ -10,6 +10,7 @@
#ifndef hifi_GLHelpers_h
#define hifi_GLHelpers_h
#include <functional>
#include <QJsonObject>
// 16 bits of depth precision
@ -34,4 +35,8 @@ int glVersionToInteger(QString glVersion);
bool isRenderThread();
namespace gl {
void withSavedContext(const std::function<void()>& f);
}
#endif

View file

@ -467,40 +467,41 @@ void OffscreenQmlSurface::resize(const QSize& newSize_, bool forceResize) {
}
qCDebug(glLogging) << "Offscreen UI resizing to " << newSize.width() << "x" << newSize.height();
gl::withSavedContext([&] {
_canvas->makeCurrent();
_canvas->makeCurrent();
// Release hold on the textures of the old size
if (uvec2() != _size) {
// If the most recent texture was unused, we can directly recycle it
if (_latestTextureAndFence.first) {
offscreenTextures.releaseTexture(_latestTextureAndFence);
_latestTextureAndFence = { 0, 0 };
// Release hold on the textures of the old size
if (uvec2() != _size) {
// If the most recent texture was unused, we can directly recycle it
if (_latestTextureAndFence.first) {
offscreenTextures.releaseTexture(_latestTextureAndFence);
_latestTextureAndFence = { 0, 0 };
}
offscreenTextures.releaseSize(_size);
}
offscreenTextures.releaseSize(_size);
}
_size = newOffscreenSize;
_size = newOffscreenSize;
// Acquire the new texture size
if (uvec2() != _size) {
offscreenTextures.acquireSize(_size);
if (_depthStencil) {
glDeleteRenderbuffers(1, &_depthStencil);
_depthStencil = 0;
// Acquire the new texture size
if (uvec2() != _size) {
offscreenTextures.acquireSize(_size);
if (_depthStencil) {
glDeleteRenderbuffers(1, &_depthStencil);
_depthStencil = 0;
}
glGenRenderbuffers(1, &_depthStencil);
glBindRenderbuffer(GL_RENDERBUFFER, _depthStencil);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, _size.x, _size.y);
if (!_fbo) {
glGenFramebuffers(1, &_fbo);
}
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _fbo);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _depthStencil);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
}
glGenRenderbuffers(1, &_depthStencil);
glBindRenderbuffer(GL_RENDERBUFFER, _depthStencil);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, _size.x, _size.y);
if (!_fbo) {
glGenFramebuffers(1, &_fbo);
}
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _fbo);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, _depthStencil);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
}
_canvas->doneCurrent();
_canvas->doneCurrent();
});
}
QQuickItem* OffscreenQmlSurface::getRootItem() {

View file

@ -15,6 +15,10 @@ add_dependency_external_projects(tbb)
find_package(OpenSSL REQUIRED)
find_package(TBB REQUIRED)
if (APPLE)
find_library(FRAMEWORK_IOKIT IOKit)
endif ()
if (APPLE AND ${OPENSSL_INCLUDE_DIR} STREQUAL "/usr/include")
# this is a user on OS X using system OpenSSL, which is going to throw warnings since they're deprecating for their common crypto
message(WARNING "The found version of OpenSSL is the OS X system version. This will produce deprecation warnings."
@ -26,6 +30,11 @@ include_directories(SYSTEM "${OPENSSL_INCLUDE_DIR}")
# append OpenSSL to our list of libraries to link
target_link_libraries(${TARGET_NAME} ${OPENSSL_LIBRARIES} ${TBB_LIBRARIES})
# IOKit is needed for getting machine fingerprint
if (APPLE)
target_link_libraries(${TARGET_NAME} ${FRAMEWORK_IOKIT})
endif (APPLE)
# libcrypto uses dlopen in libdl
if (UNIX)
target_link_libraries(${TARGET_NAME} ${CMAKE_DL_LIBS})

View file

@ -0,0 +1,180 @@
//
// FingerprintUtils.h
// libraries/networking/src
//
// Created by David Kelly on 2016-12-02.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#include "FingerprintUtils.h"
#include <QDebug>
#include <SettingHandle.h>
#ifdef Q_OS_WIN
#include <comdef.h>
#include <Wbemidl.h>
#endif //Q_OS_WIN
#ifdef Q_OS_MAC
#include <IOKit/IOBSD.h>
#include <IOKit/IOKitLib.h>
#include <IOKit/storage/IOMedia.h>
#endif //Q_OS_MAC
static const QString FALLBACK_FINGERPRINT_KEY = "fallbackFingerprint";
QString FingerprintUtils::getMachineFingerprintString() {
QString uuidString;
#ifdef Q_OS_LINUX
// sadly need to be root to get smbios guid from linux, so
// for now lets do nothing.
#endif //Q_OS_LINUX
#ifdef Q_OS_MAC
io_registry_entry_t ioRegistryRoot = IORegistryEntryFromPath(kIOMasterPortDefault, "IOService:/");
CFStringRef uuidCf = (CFStringRef) IORegistryEntryCreateCFProperty(ioRegistryRoot, CFSTR(kIOPlatformUUIDKey), kCFAllocatorDefault, 0);
IOObjectRelease(ioRegistryRoot);
uuidString = QString::fromCFString(uuidCf);
CFRelease(uuidCf);
qDebug() << "Mac serial number: " << uuidString;
#endif //Q_OS_MAC
#ifdef Q_OS_WIN
HRESULT hres;
IWbemLocator *pLoc = NULL;
// initialize com
hres = CoCreateInstance(
CLSID_WbemLocator,
0,
CLSCTX_INPROC_SERVER,
IID_IWbemLocator, (LPVOID *) &pLoc);
if (FAILED(hres)) {
qDebug() << "Failed to initialize WbemLocator";
return uuidString;
}
// Connect to WMI through the IWbemLocator::ConnectServer method
IWbemServices *pSvc = NULL;
// Connect to the root\cimv2 namespace with
// the current user and obtain pointer pSvc
// to make IWbemServices calls.
hres = pLoc->ConnectServer(
_bstr_t(L"ROOT\\CIMV2"), // Object path of WMI namespace
NULL, // User name. NULL = current user
NULL, // User password. NULL = current
0, // Locale. NULL indicates current
NULL, // Security flags.
0, // Authority (for example, Kerberos)
0, // Context object
&pSvc // pointer to IWbemServices proxy
);
if (FAILED(hres)) {
pLoc->Release();
qDebug() << "Failed to connect to WMI";
return uuidString;
}
// Set security levels on the proxy
hres = CoSetProxyBlanket(
pSvc, // Indicates the proxy to set
RPC_C_AUTHN_WINNT, // RPC_C_AUTHN_xxx
RPC_C_AUTHZ_NONE, // RPC_C_AUTHZ_xxx
NULL, // Server principal name
RPC_C_AUTHN_LEVEL_CALL, // RPC_C_AUTHN_LEVEL_xxx
RPC_C_IMP_LEVEL_IMPERSONATE, // RPC_C_IMP_LEVEL_xxx
NULL, // client identity
EOAC_NONE // proxy capabilities
);
if (FAILED(hres)) {
pSvc->Release();
pLoc->Release();
qDebug() << "Failed to set security on proxy blanket";
return uuidString;
}
// Use the IWbemServices pointer to grab the Win32_BIOS stuff
IEnumWbemClassObject* pEnumerator = NULL;
hres = pSvc->ExecQuery(
bstr_t("WQL"),
bstr_t("SELECT * FROM Win32_ComputerSystemProduct"),
WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY,
NULL,
&pEnumerator);
if (FAILED(hres)) {
pSvc->Release();
pLoc->Release();
qDebug() << "query to get Win32_ComputerSystemProduct info";
return uuidString;
}
// Get the SerialNumber from the Win32_BIOS data
IWbemClassObject *pclsObj;
ULONG uReturn = 0;
SHORT sRetStatus = -100;
while (pEnumerator) {
HRESULT hr = pEnumerator->Next(WBEM_INFINITE, 1, &pclsObj, &uReturn);
if(0 == uReturn){
break;
}
VARIANT vtProp;
// Get the value of the Name property
hr = pclsObj->Get(L"UUID", 0, &vtProp, 0, 0);
if (!FAILED(hres)) {
switch (vtProp.vt) {
case VT_BSTR:
uuidString = QString::fromWCharArray(vtProp.bstrVal);
break;
}
}
VariantClear(&vtProp);
pclsObj->Release();
}
pEnumerator->Release();
// Cleanup
pSvc->Release();
pLoc->Release();
qDebug() << "Windows BIOS UUID: " << uuidString;
#endif //Q_OS_WIN
return uuidString;
}
QUuid FingerprintUtils::getMachineFingerprint() {
QString uuidString = getMachineFingerprintString();
// now, turn into uuid. A malformed string will
// return QUuid() ("{00000...}"), which handles
// any errors in getting the string
QUuid uuid(uuidString);
if (uuid == QUuid()) {
// read fallback key (if any)
Settings settings;
uuid = QUuid(settings.value(FALLBACK_FINGERPRINT_KEY).toString());
qDebug() << "read fallback maching fingerprint: " << uuid.toString();
if (uuid == QUuid()) {
// no fallback yet, set one
uuid = QUuid::createUuid();
settings.setValue(FALLBACK_FINGERPRINT_KEY, uuid.toString());
qDebug() << "no fallback machine fingerprint, setting it to: " << uuid.toString();
}
}
return uuid;
}

View file

@ -0,0 +1,27 @@
//
// FingerprintUtils.h
// libraries/networking/src
//
// Created by David Kelly on 2016-12-02.
// Copyright 2016 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_FingerprintUtils_h
#define hifi_FingerprintUtils_h
#include <QString>
#include <QUuid>
class FingerprintUtils {
public:
static QUuid getMachineFingerprint();
private:
static QString getMachineFingerprintString();
};
#endif // hifi_FingerprintUtils_h

View file

@ -174,7 +174,26 @@ public:
void sendPeerQueryToIceServer(const HifiSockAddr& iceServerSockAddr, const QUuid& clientID, const QUuid& peerID);
SharedNodePointer findNodeWithAddr(const HifiSockAddr& addr);
using value_type = SharedNodePointer;
using const_iterator = std::vector<value_type>::const_iterator;
// Cede control of iteration under a single read lock (e.g. for use by thread pools)
// Use this for nested loops instead of taking nested read locks!
// This allows multiple threads (i.e. a thread pool) to share a lock
// without deadlocking when a dying node attempts to acquire a write lock
template<typename NestedNodeLambda>
void nestedEach(NestedNodeLambda functor) {
QReadLocker readLock(&_nodeMutex);
std::vector<SharedNodePointer> nodes(_nodeHash.size());
std::transform(_nodeHash.cbegin(), _nodeHash.cend(), nodes.begin(), [](const NodeHash::value_type& it) {
return it.second;
});
functor(nodes.cbegin(), nodes.cend());
}
template<typename NodeLambda>
void eachNode(NodeLambda functor) {
QReadLocker readLock(&_nodeMutex);
@ -280,7 +299,7 @@ signals:
protected slots:
void connectedForLocalSocketTest();
void errorTestingLocalSocket();
void clientConnectionToSockAddrReset(const HifiSockAddr& sockAddr);
protected:
@ -347,7 +366,7 @@ protected:
functor(it);
}
}
private slots:
void flagTimeForConnectionStep(ConnectionStep connectionStep, quint64 timestamp);
void possiblyTimeoutSTUNAddressLookup();

View file

@ -24,7 +24,7 @@ struct FrustumGrid {
mat4 eyeToWorldMat;
};
uniform frustumGridBuffer {
layout(std140) uniform frustumGridBuffer {
FrustumGrid frustumGrid;
};
@ -51,16 +51,20 @@ float projection_getFar(mat4 projection) {
#define GRID_INDEX_TYPE ivec4
#define GRID_FETCH_BUFFER(i) i / 4][i % 4
<@else@>
#define GRID_NUM_ELEMENTS 16384
#define GRID_NUM_ELEMENTS 4096
#define GRID_INDEX_TYPE ivec4
#define GRID_FETCH_BUFFER(i) i / 4][i % 4
<!#define GRID_NUM_ELEMENTS 16384
#define GRID_INDEX_TYPE int
#define GRID_FETCH_BUFFER(i) i
#define GRID_FETCH_BUFFER(i) i!>
<@endif@>
uniform clusterGridBuffer {
layout(std140) uniform clusterGridBuffer {
GRID_INDEX_TYPE _clusterGridTable[GRID_NUM_ELEMENTS];
};
uniform clusterContentBuffer {
layout(std140) uniform clusterContentBuffer {
GRID_INDEX_TYPE _clusterGridContent[GRID_NUM_ELEMENTS];
};

View file

@ -27,21 +27,21 @@
enum LightClusterGridShader_MapSlot {
DEFERRED_BUFFER_LINEAR_DEPTH_UNIT = 0,
DEFERRED_BUFFER_COLOR_UNIT,
DEFERRED_BUFFER_NORMAL_UNIT,
DEFERRED_BUFFER_EMISSIVE_UNIT,
DEFERRED_BUFFER_DEPTH_UNIT,
DEFERRED_BUFFER_COLOR_UNIT = 1,
DEFERRED_BUFFER_NORMAL_UNIT = 2,
DEFERRED_BUFFER_EMISSIVE_UNIT = 3,
DEFERRED_BUFFER_DEPTH_UNIT = 4,
};
enum LightClusterGridShader_BufferSlot {
LIGHT_CLUSTER_GRID_FRUSTUM_GRID_SLOT = 0,
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT,
CAMERA_CORRECTION_BUFFER_SLOT,
DEFERRED_FRAME_TRANSFORM_BUFFER_SLOT =1,
CAMERA_CORRECTION_BUFFER_SLOT = 2,
LIGHT_GPU_SLOT = render::ShapePipeline::Slot::LIGHT,
LIGHT_INDEX_GPU_SLOT,
LIGHT_INDEX_GPU_SLOT = 5,
LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT,
LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT,
LIGHT_CLUSTER_GRID_CLUSTER_GRID_SLOT = 6,
LIGHT_CLUSTER_GRID_CLUSTER_CONTENT_SLOT = 7,
};
FrustumGrid::FrustumGrid(const FrustumGrid& source) :

View file

@ -34,7 +34,6 @@ in vec2 _texCoord0;
out vec4 _fragColor;
void main(void) {
// Grab the fragment data from the uv
vec2 texCoord = _texCoord0.st;
@ -49,7 +48,7 @@ void main(void) {
// Frag pos in world
mat4 invViewMat = getViewInverse();
vec4 fragPos = invViewMat * fragPosition;
// From frag world pos find the cluster
vec4 clusterEyePos = frustumGrid_worldToEye(fragPos);
ivec3 clusterPos = frustumGrid_eyeToClusterPos(clusterEyePos.xyz);
@ -84,8 +83,8 @@ void main(void) {
vec4 fragEyeVector = invViewMat * vec4(-frag.position.xyz, 0.0);
vec3 fragEyeDir = normalize(fragEyeVector.xyz);
// COmpute the rougness into gloss2 once:
float fragGloss2 = pow(frag.roughness + 0.001, 2.0);
// Compute the rougness into gloss2 once:
float fragGloss2 = pow(frag.roughness + 0.001, 4.0);
bool withScattering = (frag.scattering * isScatteringEnabled() > 0.0);
int numLightTouching = 0;

View file

@ -568,46 +568,20 @@ void AABox::transform(const Transform& transform) {
translate(transform.getTranslation());
}
// Logic based on http://clb.demon.fi/MathGeoLib/nightly/docs/AABB.cpp_code.html#471
void AABox::transform(const glm::mat4& matrix) {
auto minimum = _corner;
auto maximum = _corner + _scale;
auto halfSize = _scale * 0.5f;
auto center = _corner + halfSize;
halfSize = abs(halfSize);
auto newCenter = transformPoint(matrix, center);
glm::vec3 bottomLeftNear(minimum.x, minimum.y, minimum.z);
glm::vec3 bottomRightNear(maximum.x, minimum.y, minimum.z);
glm::vec3 bottomLeftFar(minimum.x, minimum.y, maximum.z);
glm::vec3 bottomRightFar(maximum.x, minimum.y, maximum.z);
glm::vec3 topLeftNear(minimum.x, maximum.y, minimum.z);
glm::vec3 topRightNear(maximum.x, maximum.y, minimum.z);
glm::vec3 topLeftFar(minimum.x, maximum.y, maximum.z);
glm::vec3 topRightFar(maximum.x, maximum.y, maximum.z);
auto mm = glm::transpose(glm::mat3(matrix));
vec3 newDir = vec3(
glm::dot(glm::abs(vec3(mm[0])), halfSize),
glm::dot(glm::abs(vec3(mm[1])), halfSize),
glm::dot(glm::abs(vec3(mm[2])), halfSize)
);
glm::vec3 bottomLeftNearTransformed = transformPoint(matrix, bottomLeftNear);
glm::vec3 bottomRightNearTransformed = transformPoint(matrix, bottomRightNear);
glm::vec3 bottomLeftFarTransformed = transformPoint(matrix, bottomLeftFar);
glm::vec3 bottomRightFarTransformed = transformPoint(matrix, bottomRightFar);
glm::vec3 topLeftNearTransformed = transformPoint(matrix, topLeftNear);
glm::vec3 topRightNearTransformed = transformPoint(matrix, topRightNear);
glm::vec3 topLeftFarTransformed = transformPoint(matrix, topLeftFar);
glm::vec3 topRightFarTransformed = transformPoint(matrix, topRightFar);
minimum = glm::min(bottomLeftNearTransformed,
glm::min(bottomRightNearTransformed,
glm::min(bottomLeftFarTransformed,
glm::min(bottomRightFarTransformed,
glm::min(topLeftNearTransformed,
glm::min(topRightNearTransformed,
glm::min(topLeftFarTransformed,
topRightFarTransformed)))))));
maximum = glm::max(bottomLeftNearTransformed,
glm::max(bottomRightNearTransformed,
glm::max(bottomLeftFarTransformed,
glm::max(bottomRightFarTransformed,
glm::max(topLeftNearTransformed,
glm::max(topRightNearTransformed,
glm::max(topLeftFarTransformed,
topRightFarTransformed)))))));
_corner = minimum;
_scale = maximum - minimum;
_corner = newCenter - newDir;
_scale = newDir * 2.0f;
}

View file

@ -50,4 +50,16 @@ describe('Entity', function() {
var props = Entities.getEntityProperties(boxEntity);
expect(Math.round(props.position.z)).toEqual(Math.round(newPos.z));
});
it("\'s last edited property working correctly", function() {
var props = Entities.getEntityProperties(boxEntity);
expect(props.lastEdited).toBeDefined();
expect(props.lastEdited).not.toBe(0);
var prevLastEdited = props.lastEdited;
// Now we make an edit to the entity, which should update its last edited time
Entities.editEntity(boxEntity, {color: {red: 0, green: 255, blue: 0}});
props = Entities.getEntityProperties(boxEntity);
expect(props.lastEdited).toBeGreaterThan(prevLastEdited);
});
});

View file

@ -0,0 +1,167 @@
<html>
<head>
<title>Photo Booth</title>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<link rel="stylesheet" type="text/css" href="../../../../../system/html/css/edit-style.css">
<script type="text/javascript" src="qrc:///qtwebchannel/qwebchannel.js"></script>
<script>
var EventBridge;
var openEventBridge = function (callback) {
var WebChannel = new QWebChannel(qt.webChannelTransport, function (channel) {
EventBridge = WebChannel.objects.eventBridgeWrapper.eventBridge;
callback();
});
};
var emit = function (eventType, data) {
data = data || {};
data.type = eventType;
EventBridge.emitWebEvent(JSON.stringify(data));
};
function loaded () {
openEventBridge(function () {
emit("onLoad", {value: "faye"});
var elModelURL = document.getElementById("model-url");
var elReloadModelButton = document.getElementById("reload-model-button");
var elCamera = document.getElementById("property-camera");
//var elLightingPreset = document.getElementById("property-lighting-preset");
var elPictureButton = document.getElementById("picture-button");
elReloadModelButton.addEventListener('click', function() {
emit("onClickReloadModelButton", {value: elModelURL.value});
});
elCamera.addEventListener('change', function() {
emit("onSelectCamera", {value: this.value});
});
// elLightingPreset.addEventListener('change', function() {
// emit("onSelectLightingPreset", {value: "faye"});
// });
elPictureButton.addEventListener('click', function() {
emit("onClickPictureButton", {value: "faye"});
});
});
// Drop downs
function setDropdownText(dropdown) {
var lis = dropdown.parentNode.getElementsByTagName("li");
var text = "";
for (var i = 0; i < lis.length; i++) {
if (lis[i].getAttribute("value") === dropdown.value) {
text = lis[i].textContent;
}
}
dropdown.firstChild.textContent = text;
}
function toggleDropdown(event) {
var element = event.target;
if (element.nodeName !== "DT") {
element = element.parentNode;
}
element = element.parentNode;
var isDropped = element.getAttribute("dropped");
element.setAttribute("dropped", isDropped !== "true" ? "true" : "false");
}
function setDropdownValue(event) {
var dt = event.target.parentNode.parentNode.previousSibling;
dt.value = event.target.getAttribute("value");
dt.firstChild.textContent = event.target.textContent;
dt.parentNode.setAttribute("dropped", "false");
var evt = document.createEvent("HTMLEvents");
evt.initEvent("change", true, true);
dt.dispatchEvent(evt);
}
var elDropdowns = document.getElementsByTagName("select");
for (var i = 0; i < elDropdowns.length; i++) {
var options = elDropdowns[i].getElementsByTagName("option");
var selectedOption = 0;
for (var j = 0; j < options.length; j++) {
if (options[j].getAttribute("selected") === "selected") {
selectedOption = j;
}
}
var div = elDropdowns[i].parentNode;
var dl = document.createElement("dl");
div.appendChild(dl);
var dt = document.createElement("dt");
dt.name = elDropdowns[i].name;
dt.id = elDropdowns[i].id;
dt.addEventListener("click", toggleDropdown, true);
dl.appendChild(dt);
var span = document.createElement("span");
span.setAttribute("value", options[selectedOption].value);
span.textContent = options[selectedOption].firstChild.textContent;
dt.appendChild(span);
var span = document.createElement("span");
span.textContent = "5"; // caratDn
dt.appendChild(span);
var dd = document.createElement("dd");
dl.appendChild(dd);
var ul = document.createElement("ul");
dd.appendChild(ul);
for (var j = 0; j < options.length; j++) {
var li = document.createElement("li");
li.setAttribute("value", options[j].value);
li.textContent = options[j].firstChild.textContent;
li.addEventListener("click", setDropdownValue);
ul.appendChild(li);
}
}
elDropdowns = document.getElementsByTagName("select");
while (elDropdowns.length > 0) {
var el = elDropdowns[0];
el.parentNode.removeChild(el);
elDropdowns = document.getElementsByTagName("select");
}
}
</script>
<style>
</style>
</head>
<body onload="loaded()">
<div id="properties-list">
<div class="property url refresh">
<label>Model URL</label>
<input type="text" id="model-url"></input>
<input type="button" id="reload-model-button" class="glyph" value="F">
</div>
<!--
<div class="property dropdown">
<label>Lighting Preset</label>
<select id="property-lighting-preset">
<option>Default Lighting</option>
<option>Sam's Cool Light</option>
<option>Alan's Light Magic</option>
</select>
</div>
-->
<div class="property dropdown">
<label>Camera</label>
<select id="property-camera">
<option>First Person Camera</option>
<option>Main Camera</option>
<option>Left Camera</option>
<option>Right Camera</option>
</select>
</div>
<div class="property">
<input id="picture-button" type="button" class="blue" value="Take Picture">
</div>
</div>
</body>
</html>

View file

@ -0,0 +1,176 @@
(function () {
var SNAPSHOT_DELAY = 500; // 500ms
var PHOTOBOOTH_WINDOW_HTML_URL = Script.resolvePath("./html/photobooth.html");
var PHOTOBOOTH_SETUP_JSON_URL = Script.resolvePath("./photoboothSetup.json");
var toolbar = Toolbars.getToolbar("com.highfidelity.interface.toolbar.system");
var MODEL_BOUNDING_BOX_DIMENSIONS = {x: 1.0174,y: 1.1925,z: 1.0165};
var PhotoBooth = {};
PhotoBooth.init = function () {
var success = Clipboard.importEntities(PHOTOBOOTH_SETUP_JSON_URL);
var frontFactor = 10;
var frontUnitVec = Vec3.normalize(Quat.getFront(MyAvatar.orientation));
var frontOffset = Vec3.multiply(frontUnitVec,frontFactor);
var rightFactor = 3;
var rightUnitVec = Vec3.normalize(Quat.getRight(MyAvatar.orientation));
var spawnLocation = Vec3.sum(Vec3.sum(MyAvatar.position,frontOffset),rightFactor);
if (success) {
this.pastedEntityIDs = Clipboard.pasteEntities(spawnLocation);
this.processPastedEntities();
}
};
PhotoBooth.processPastedEntities = function () {
var cameraResults = {};
var modelResult;
var modelPos;
this.pastedEntityIDs.forEach(function(id) {
var props = Entities.getEntityProperties(id);
var parts = props["name"].split(":");
if (parts[0] === "Photo Booth Camera") {
cameraResults[parts[1]] = id;
}
if (parts[0] === "Photo Booth Model") {
modelResult = id;
modelPos = props.position;
}
});
print(JSON.stringify(cameraResults));
print(JSON.stringify(modelResult));
this.cameraEntities = cameraResults;
this.modelEntityID = modelResult;
this.centrePos = modelPos;
};
// replace the model in scene with new model
PhotoBooth.changeModel = function (newModelURL) {
// deletes old model
Entities.deleteEntity(this.modelEntityID);
// create new model at centre of the photobooth
var newProps = {
type: "Model",
modelURL: newModelURL,
position: this.centrePos
};
var newModelEntityID = Entities.addEntity(newProps);
// scale model dimensions to fit in bounding box
var scaleModel = function () {
newProps = Entities.getEntityProperties(newModelEntityID);
var myDimensions = newProps.dimensions;
print("myDimensions: " + JSON.stringify(myDimensions));
var k;
if (myDimensions.x > MODEL_BOUNDING_BOX_DIMENSIONS.x) {
k = MODEL_BOUNDING_BOX_DIMENSIONS.x / myDimensions.x;
myDimensions = Vec3.multiply(k, myDimensions);
}
if (myDimensions.y > MODEL_BOUNDING_BOX_DIMENSIONS.y) {
k = MODEL_BOUNDING_BOX_DIMENSIONS.y / myDimensions.y;
myDimensions = Vec3.multiply(k, myDimensions);
}
if (myDimensions.z > MODEL_BOUNDING_BOX_DIMENSIONS.z) {
k = MODEL_BOUNDING_BOX_DIMENSIONS.z / myDimensions.z;
myDimensions = Vec3.multiply(k, myDimensions);
}
// position the new model on the table
var y_offset = (MODEL_BOUNDING_BOX_DIMENSIONS.y - myDimensions.y) / 2;
var myPosition = Vec3.sum(newProps.position, {x:0, y:-y_offset, z:0});
Entities.editEntity(newModelEntityID,{position: myPosition, dimensions: myDimensions});
};
// add a delay before scaling to make sure the entity server have gotten the right model dimensions
Script.setTimeout(function () {
scaleModel();
}, 400);
this.modelEntityID = newModelEntityID;
};
PhotoBooth.destroy = function () {
this.pastedEntityIDs.forEach(function(id) {
Entities.deleteEntity(id);
});
Entities.deleteEntity(this.modelEntityID);
};
var main = function () {
PhotoBooth.init();
var photoboothWindowListener = {};
photoboothWindowListener.onLoad = function (event) {
print("loaded" + event.value);
if (!event.hasOwnProperty("value")){
return;
}
};
photoboothWindowListener.onSelectCamera = function (event) {
print("selected camera " + event.value);
if (!event.hasOwnProperty("value")){
return;
}
if (event.value === "First Person Camera") {
Camera.mode = "first person";
} else {
Camera.mode = "entity";
var cameraID = PhotoBooth.cameraEntities[event.value];
Camera.setCameraEntity(cameraID);
}
};
photoboothWindowListener.onSelectLightingPreset = function (event) {
print("selected lighting preset" + event.value);
if (!event.hasOwnProperty("value")){
return;
}
};
photoboothWindowListener.onClickPictureButton = function (event) {
print("clicked picture button");
// hide HUD tool bar
toolbar.writeProperty("visible", false);
// hide Overlays (such as Running Scripts or other Dialog UI)
Menu.setIsOptionChecked("Overlays", false);
// hide mouse cursor
Reticle.visible = false;
// giving a delay here before snapshotting so that there is time to hide toolbar and other UIs
// void WindowScriptingInterface::takeSnapshot(bool notify, bool includeAnimated, float aspectRatio)
Script.setTimeout(function () {
Window.takeSnapshot(false, false, 1.91);
// show hidden items after snapshot is taken
toolbar.writeProperty("visible", true);
Menu.setIsOptionChecked("Overlays", true);
// unknown issue: somehow we don't need to reset cursor to visible in script and the mouse still returns after snapshot
// Reticle.visible = true;
}, SNAPSHOT_DELAY);
};
photoboothWindowListener.onClickReloadModelButton = function (event) {
print("clicked reload model button " + event.value);
PhotoBooth.changeModel(event.value);
};
var photoboothWindow = new OverlayWebWindow({
title: 'Photo Booth',
source: PHOTOBOOTH_WINDOW_HTML_URL,
width: 450,
height: 450,
visible: true
});
photoboothWindow.webEventReceived.connect(function (data) {
var event = JSON.parse(data);
if (photoboothWindowListener[event.type]) {
photoboothWindowListener[event.type](event);
}
});
};
main();
function cleanup() {
Camera.mode = "first person";
PhotoBooth.destroy();
}
Script.scriptEnding.connect(cleanup);
}());

View file

@ -0,0 +1,482 @@
{
"Entities": [
{
"clientOnly": 0,
"collisionless": 1,
"color": {
"blue": 149,
"green": 245,
"red": 245
},
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 0.05000000074505806,
"y": 0.05000000074505806,
"z": 0.0099999997764825821
},
"id": "{4a7b6258-ccc5-472e-ba41-dfd224115bee}",
"ignoreForCollisions": 1,
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"name": "Photo Booth Camera:Right Camera",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"parentID": "{838ac5ff-5e06-4768-9389-9796577c5bc5}",
"position": {
"x": -0.022934332489967346,
"y": -0.25898283720016479,
"z": 0.17889007925987244
},
"queryAACube": {
"scale": 0.071414284408092499,
"x": 15.183169364929199,
"y": -192.90565490722656,
"z": 25.429607391357422
},
"rotation": {
"w": -7.62939453125e-05,
"x": -1.52587890625e-05,
"y": 1,
"z": -4.57763671875e-05
},
"shape": "Cube",
"type": "Box",
"visible": 0
},
{
"clientOnly": 0,
"collisionless": 1,
"color": {
"blue": 149,
"green": 245,
"red": 245
},
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 0.05000000074505806,
"y": 0.05000000074505806,
"z": 0.0099999997764825821
},
"id": "{81ae005c-4738-4359-8860-98d00c8dd3a4}",
"ignoreForCollisions": 1,
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"name": "Photo Booth Camera:Main Camera",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"parentID": "{d5d16926-6f05-4411-931a-3ff8c897d728}",
"position": {
"x": -0.021826114505529404,
"y": -0.25215747952461243,
"z": 0.17469465732574463
},
"queryAACube": {
"scale": 0.071414284408092499,
"x": 16.758693695068359,
"y": -193.97714233398438,
"z": 24.816326141357422
},
"rotation": {
"w": -1.52587890625e-05,
"x": -1.52587890625e-05,
"y": 1,
"z": -1.52587890625e-05
},
"shape": "Cube",
"type": "Box",
"visible": 0
},
{
"clientOnly": 0,
"collisionless": 1,
"color": {
"blue": 149,
"green": 245,
"red": 245
},
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 0.05000000074505806,
"y": 0.05000000074505806,
"z": 0.0099999997764825821
},
"id": "{77817ac3-0862-46b6-8648-fdb8b855e4cb}",
"ignoreForCollisions": 1,
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"name": "Photo Booth Camera:Left Camera",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"parentID": "{2fc83747-6652-4fd1-bf21-c3d44ad610ea}",
"position": {
"x": -0.021829158067703247,
"y": -0.25214886665344238,
"z": 0.17469853162765503
},
"queryAACube": {
"scale": 0.071414284408092499,
"x": 18.187423706054688,
"y": -193.3980712890625,
"z": 25.408010482788086
},
"rotation": {
"w": -1.52587890625e-05,
"x": -7.62939453125e-05,
"y": 1,
"z": -1.52587890625e-05
},
"shape": "Cube",
"type": "Box",
"visible": 0
},
{
"clientOnly": 0,
"collisionless": 1,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 0.43360000848770142,
"y": 0.65679997205734253,
"z": 0.42160001397132874
},
"gravity": {
"x": 0,
"y": -9,
"z": 0
},
"id": "{d5d16926-6f05-4411-931a-3ff8c897d728}",
"ignoreForCollisions": 1,
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://hifi-content.s3.amazonaws.com/caitlyn/production/lazybonesToybox/cameras/35mm%20camera.fbx?232222",
"name": "35 MM SLR by Lazybones",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 2.5208168029785156,
"y": 2.11322021484375,
"z": 1.1888446807861328
},
"queryAACube": {
"scale": 0.89282792806625366,
"x": 2.0744028091430664,
"y": 1.6668062210083008,
"z": 0.74243068695068359
},
"rotation": {
"w": 1,
"x": -1.52587890625e-05,
"y": -1.52587890625e-05,
"z": -1.52587890625e-05
},
"scriptTimestamp": 1479859505510,
"shapeType": "simple-hull",
"type": "Model",
"userData": "{\"grabbableKey\":{\"grabbable\":true},\"wearable\":{\"joints\":{\"LeftHand\":[{\"x\":-0.23937,\"y\":0.334177,\"z\":0.150116},{\"x\":-0.31183,\"y\":0.535888,\"z\":-0.37311,\"w\":-0.69021}],\"RightHand\":[{\"x\":0.11031082272529602,\"y\":0.19449540972709656,\"z\":0.0405043363571167},{\"x\":0.2807741165161133,\"y\":0.6332069635391235,\"z\":0.2997693121433258,\"w\":-0.6557632088661194}]}}}"
},
{
"clientOnly": 0,
"collisionless": 1,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 0.43360000848770142,
"y": 0.65679997205734253,
"z": 0.42155000567436218
},
"gravity": {
"x": 0,
"y": -9,
"z": 0
},
"id": "{838ac5ff-5e06-4768-9389-9796577c5bc5}",
"ignoreForCollisions": 1,
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://hifi-content.s3.amazonaws.com/caitlyn/production/lazybonesToybox/cameras/35mm%20camera.fbx?232222",
"name": "35 MM SLR by Lazybones",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 0.86136627197265625,
"y": 3.2271270751953125,
"z": 1.8821067810058594
},
"queryAACube": {
"scale": 0.89280432462692261,
"x": 0.41496410965919495,
"y": 2.7807250022888184,
"z": 1.4357045888900757
},
"rotation": {
"w": 0.91699087619781494,
"x": 0.11256575584411621,
"y": 0.37981235980987549,
"z": -0.046890974044799805
},
"scriptTimestamp": 1479859456707,
"shapeType": "simple-hull",
"type": "Model",
"userData": "{\"grabbableKey\":{\"grabbable\":true},\"wearable\":{\"joints\":{\"LeftHand\":[{\"x\":-0.23937,\"y\":0.334177,\"z\":0.150116},{\"x\":-0.31183,\"y\":0.535888,\"z\":-0.37311,\"w\":-0.69021}],\"RightHand\":[{\"x\":0.11031082272529602,\"y\":0.19449540972709656,\"z\":0.0405043363571167},{\"x\":0.2807741165161133,\"y\":0.6332069635391235,\"z\":0.2997693121433258,\"w\":-0.6557632088661194}]}}}"
},
{
"clientOnly": 0,
"collisionless": 1,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 0.43360000848770142,
"y": 0.65679997205734253,
"z": 0.42155000567436218
},
"gravity": {
"x": 0,
"y": -9,
"z": 0
},
"id": "{2fc83747-6652-4fd1-bf21-c3d44ad610ea}",
"ignoreForCollisions": 1,
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://hifi-content.s3.amazonaws.com/caitlyn/production/lazybonesToybox/cameras/35mm%20camera.fbx?232222",
"name": "35 MM SLR by Lazybones",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 4.0442371368408203,
"y": 2.7116241455078125,
"z": 1.869842529296875
},
"queryAACube": {
"scale": 0.89280432462692261,
"x": 3.5978350639343262,
"y": 2.2652220726013184,
"z": 1.4234403371810913
},
"rotation": {
"w": 0.92196536064147949,
"x": 0.056198954582214355,
"y": -0.38243687152862549,
"z": 0.023208975791931152
},
"scriptTimestamp": 1479859451129,
"shapeType": "simple-hull",
"type": "Model",
"userData": "{\"grabbableKey\":{\"grabbable\":true},\"wearable\":{\"joints\":{\"LeftHand\":[{\"x\":-0.23937,\"y\":0.334177,\"z\":0.150116},{\"x\":-0.31183,\"y\":0.535888,\"z\":-0.37311,\"w\":-0.69021}],\"RightHand\":[{\"x\":0.11031082272529602,\"y\":0.19449540972709656,\"z\":0.0405043363571167},{\"x\":0.2807741165161133,\"y\":0.6332069635391235,\"z\":0.2997693121433258,\"w\":-0.6557632088661194}]}}}"
},
{
"clientOnly": 0,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 1.0173832178115845,
"y": 1.1924686431884766,
"z": 1.0164898633956909
},
"id": "{541efd7c-7e5f-40d5-b6ed-8e195afe9197}",
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://hifi-content.s3.amazonaws.com/alan/dev/Test-Object-7-metal.fbx",
"name": "Photo Booth Model:Default",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 2.5627613067626953,
"y": 1.8016510009765625,
"z": 3.6444053649902344
},
"queryAACube": {
"scale": 1.8682348728179932,
"x": 1.6286438703536987,
"y": 0.86753356456756592,
"z": 2.7102880477905273
},
"rotation": {
"w": 1,
"x": -1.52587890625e-05,
"y": -1.52587890625e-05,
"z": -1.52587890625e-05
},
"shapeType": "static-mesh",
"type": "Model"
},
{
"clientOnly": 0,
"created": "2016-11-29T23:44:49Z",
"dimensions": {
"x": 1.1263399124145508,
"y": 0.55930328369140625,
"z": 1.0736434459686279
},
"id": "{5a286dd0-d6d8-4ed9-a579-1c0587b63fbc}",
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "atp:/jimi/tutorialroom/table3.fbx",
"name": "Photo Booth Stool",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 2.5654888153076172,
"y": 0.9199371337890625,
"z": 3.7379035949707031
},
"queryAACube": {
"scale": 1.6535331010818481,
"x": 1.7387223243713379,
"y": 0.093170583248138428,
"z": 2.9111371040344238
},
"shapeType": "static-mesh",
"type": "Model"
},
{
"clientOnly": 0,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 1.3296165466308594,
"y": 3.0967316627502441,
"z": 2.4247901439666748
},
"id": "{a0cd3304-e7e3-4522-8fbb-c4cf8d234eca}",
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://hifi-content.s3.amazonaws.com/Examples%20Content/production/basketball/hoop.fbx",
"name": "Photo Booth Stand Right",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 0,
"y": 2.207672119140625,
"z": 0.97442245483398438
},
"queryAACube": {
"scale": 4.1517748832702637,
"x": -2.0758874416351318,
"y": 0.13178467750549316,
"z": -1.1014649868011475
},
"rotation": {
"w": -0.37505149841308594,
"x": -1.52587890625e-05,
"y": 0.92700088024139404,
"z": 1.52587890625e-05
},
"shapeType": "static-mesh",
"type": "Model"
},
{
"clientOnly": 0,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 0.6701958179473877,
"y": 3.0894412994384766,
"z": 6.0362682342529297
},
"id": "{f3c937d3-4493-41a1-8928-0cae4c4ce19f}",
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://mpassets.highfidelity.com/af67a13f-7610-49b4-9723-b284fb8ff37a-v1/Dungeon-Wall-6X3.fbx",
"name": "Photo Booth Backdrop",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 2.4534530639648438,
"y": 2.0988006591796875,
"z": 5.0006065368652344
},
"queryAACube": {
"scale": 6.8139815330505371,
"x": -0.9535377025604248,
"y": -1.3081901073455811,
"z": 1.5936157703399658
},
"rotation": {
"w": -0.70128941535949707,
"x": 1.52587890625e-05,
"y": 0.71288621425628662,
"z": -1.52587890625e-05
},
"shapeType": "box",
"type": "Model"
},
{
"clientOnly": 0,
"color": {
"blue": 201,
"green": 252,
"red": 255
},
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 8.0457677841186523,
"y": 1.3176910877227783,
"z": 8.4534158706665039
},
"id": "{7e4f4bed-a47b-449f-acb2-cb0410847e84}",
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 2.4956779479980469,
"y": 0,
"z": 2.2972354888916016
},
"queryAACube": {
"scale": 11.744400024414062,
"x": -3.3765220642089844,
"y": -5.8722000122070312,
"z": -3.5749645233154297
},
"rotation": {
"w": 1,
"x": -1.52587890625e-05,
"y": -1.52587890625e-05,
"z": -1.52587890625e-05
},
"shape": "Cube",
"type": "Box"
},
{
"clientOnly": 0,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 1.3296165466308594,
"y": 1.5271610021591187,
"z": 2.4247901439666748
},
"id": "{891fb90c-ca13-46fb-b21e-0da6063ad07b}",
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://hifi-content.s3.amazonaws.com/Examples%20Content/production/basketball/hoop.fbx",
"name": "Photo Booth Stand Middle",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 2.4825477600097656,
"y": 1.4576416015625,
"z": 0
},
"queryAACube": {
"scale": 3.1590676307678223,
"x": 0.90301394462585449,
"y": -0.12189221382141113,
"z": -1.5795338153839111
},
"rotation": {
"w": -1.52587890625e-05,
"x": -4.57763671875e-05,
"y": 1,
"z": -1.52587890625e-05
},
"shapeType": "static-mesh",
"type": "Model"
},
{
"clientOnly": 0,
"created": "2016-11-29T23:20:47Z",
"dimensions": {
"x": 1.3296165466308594,
"y": 2.3939504623413086,
"z": 2.4247901439666748
},
"id": "{3773db2f-5bd8-4d23-a3cc-53ffcc7c30e9}",
"lastEditedBy": "{d74cd0af-624e-4d3d-a930-f6cb7e47667d}",
"modelURL": "http://hifi-content.s3.amazonaws.com/Examples%20Content/production/basketball/hoop.fbx",
"name": "Photo Booth Stand - Left",
"owningAvatarID": "{00000000-0000-0000-0000-000000000000}",
"position": {
"x": 4.8861484527587891,
"y": 1.8541412353515625,
"z": 0.99666976928710938
},
"queryAACube": {
"scale": 3.6576614379882812,
"x": 3.0573177337646484,
"y": 0.025310516357421875,
"z": -0.83216094970703125
},
"rotation": {
"w": 0.38268101215362549,
"x": -4.57763671875e-05,
"y": 0.92385745048522949,
"z": -1.52587890625e-05
},
"shapeType": "static-mesh",
"type": "Model"
}
],
"Version": 65
}